linux_dsm_epyc7002/drivers/i2c/busses/i2c-designware-master.c

750 lines
19 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synopsys DesignWare I2C adapter driver (master only).
*
* Based on the TI DAVINCI I2C adapter driver.
*
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*/
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include "i2c-designware-core.h"
static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
{
/* Configure Tx/Rx FIFO threshold levels */
dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
dw_writel(dev, 0, DW_IC_RX_TL);
/* Configure the I2C master */
dw_writel(dev, dev->master_cfg, DW_IC_CON);
}
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
const char *mode_str, *fp_str = "";
u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings;
u32 ic_clk;
int ret;
ret = i2c_dw_acquire_lock(dev);
if (ret)
return ret;
comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
i2c_dw_release_lock(dev);
/* Set standard and fast speed dividers for high/low periods */
sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
/* Calculate SCL timing parameters for standard mode if not set */
if (!dev->ss_hcnt || !dev->ss_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->ss_hcnt =
i2c_dw_scl_hcnt(ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->ss_lcnt =
i2c_dw_scl_lcnt(ic_clk,
4700, /* tLOW = 4.7 us */
scl_falling_time,
0); /* No offset */
}
dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
dev->ss_hcnt, dev->ss_lcnt);
/*
* Set SCL timing parameters for fast mode or fast mode plus. Only
* difference is the timing parameter values since the registers are
* the same.
*/
if (t->bus_freq_hz == 1000000) {
/*
* Check are fast mode plus parameters available and use
* fast mode if not.
*/
if (dev->fp_hcnt && dev->fp_lcnt) {
dev->fs_hcnt = dev->fp_hcnt;
dev->fs_lcnt = dev->fp_lcnt;
fp_str = " Plus";
}
}
/*
* Calculate SCL timing parameters for fast mode if not set. They are
* needed also in high speed mode.
*/
if (!dev->fs_hcnt || !dev->fs_lcnt) {
ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
i2c_dw_scl_hcnt(ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
dev->fs_lcnt =
i2c_dw_scl_lcnt(ic_clk,
1300, /* tLOW = 1.3 us */
scl_falling_time,
0); /* No offset */
}
dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
fp_str, dev->fs_hcnt, dev->fs_lcnt);
/* Check is high speed possible and fall back to fast mode if not */
if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
DW_IC_CON_SPEED_HIGH) {
if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
dev_err(dev->dev, "High Speed not supported!\n");
dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
dev->master_cfg |= DW_IC_CON_SPEED_FAST;
dev->hs_hcnt = 0;
dev->hs_lcnt = 0;
} else if (dev->hs_hcnt && dev->hs_lcnt) {
dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
dev->hs_hcnt, dev->hs_lcnt);
}
}
ret = i2c_dw_set_sda_hold(dev);
if (ret)
goto out;
switch (dev->master_cfg & DW_IC_CON_SPEED_MASK) {
case DW_IC_CON_SPEED_STD:
mode_str = "Standard Mode";
break;
case DW_IC_CON_SPEED_HIGH:
mode_str = "High Speed Mode";
break;
default:
mode_str = "Fast Mode";
}
dev_dbg(dev->dev, "Bus speed: %s%s\n", mode_str, fp_str);
out:
return ret;
}
/**
* i2c_dw_init() - Initialize the designware I2C master hardware
* @dev: device private data
*
* This functions configures and enables the I2C master.
* This function is called during I2C init function, and in case of timeout at
* run time.
*/
static int i2c_dw_init_master(struct dw_i2c_dev *dev)
{
int ret;
ret = i2c_dw_acquire_lock(dev);
if (ret)
return ret;
/* Disable the adapter */
__i2c_dw_disable(dev);
/* Write standard speed timing parameters */
dw_writel(dev, dev->ss_hcnt, DW_IC_SS_SCL_HCNT);
dw_writel(dev, dev->ss_lcnt, DW_IC_SS_SCL_LCNT);
/* Write fast mode/fast mode plus timing parameters */
dw_writel(dev, dev->fs_hcnt, DW_IC_FS_SCL_HCNT);
dw_writel(dev, dev->fs_lcnt, DW_IC_FS_SCL_LCNT);
/* Write high speed timing parameters if supported */
if (dev->hs_hcnt && dev->hs_lcnt) {
dw_writel(dev, dev->hs_hcnt, DW_IC_HS_SCL_HCNT);
dw_writel(dev, dev->hs_lcnt, DW_IC_HS_SCL_LCNT);
}
/* Write SDA hold time if supported */
if (dev->sda_hold_time)
dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
i2c_dw_configure_fifo_master(dev);
i2c_dw_release_lock(dev);
return 0;
}
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 ic_con, ic_tar = 0;
/* Disable the adapter */
__i2c_dw_disable(dev);
/* If the slave address is ten bit address, enable 10BITADDR */
ic_con = dw_readl(dev, DW_IC_CON);
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
ic_con |= DW_IC_CON_10BITADDR_MASTER;
/*
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
* mode has to be enabled via bit 12 of IC_TAR register.
* We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
* detected from registers.
*/
ic_tar = DW_IC_TAR_10BITADDR_MASTER;
} else {
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
}
dw_writel(dev, ic_con, DW_IC_CON);
/*
* Set the slave (target) address and enable 10-bit addressing mode
* if applicable.
*/
dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR);
/* Enforce disabled interrupts (due to HW issues) */
2014-04-11 06:03:19 +07:00
i2c_dw_disable_int(dev);
/* Enable the adapter */
__i2c_dw_enable(dev);
/* Dummy read to avoid the register getting stuck on Bay Trail */
dw_readl(dev, DW_IC_ENABLE_STATUS);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
dw_writel(dev, DW_IC_INTR_MASTER_MASK, DW_IC_INTR_MASK);
}
/*
* Initiate (and continue) low level master read/write transaction.
* This function is only called from i2c_dw_isr, and pumping i2c_msg
* messages into the tx buffer. Even if the size of i2c_msg data is
* longer than the size of the tx buffer, it handles everything.
*/
static void
i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 intr_mask;
int tx_limit, rx_limit;
u32 addr = msgs[dev->msg_write_idx].addr;
u32 buf_len = dev->tx_buf_len;
u8 *buf = dev->tx_buf;
bool need_restart = false;
intr_mask = DW_IC_INTR_MASTER_MASK;
for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
u32 flags = msgs[dev->msg_write_idx].flags;
/*
* If target address has changed, we need to
* reprogram the target address in the I2C
* adapter when we are done with this transfer.
*/
if (msgs[dev->msg_write_idx].addr != addr) {
dev_err(dev->dev,
"%s: invalid target address\n", __func__);
dev->msg_err = -EINVAL;
break;
}
if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
/* new i2c_msg */
buf = msgs[dev->msg_write_idx].buf;
buf_len = msgs[dev->msg_write_idx].len;
/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
* IC_RESTART_EN are set, we must manually
* set restart bit between messages.
*/
if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
(dev->msg_write_idx > 0))
need_restart = true;
}
tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR);
rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR);
while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
u32 cmd = 0;
/*
* If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
* manually set the stop bit. However, it cannot be
* detected from the registers so we set it always
* when writing/reading the last byte.
*/
/*
* i2c-core always sets the buffer length of
* I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
* be adjusted when receiving the first byte.
* Thus we can't stop the transaction here.
*/
if (dev->msg_write_idx == dev->msgs_num - 1 &&
buf_len == 1 && !(flags & I2C_M_RECV_LEN))
cmd |= BIT(9);
if (need_restart) {
cmd |= BIT(10);
need_restart = false;
}
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
/* Avoid rx buffer overrun */
2016-11-19 02:40:10 +07:00
if (dev->rx_outstanding >= dev->rx_fifo_depth)
break;
dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
rx_limit--;
dev->rx_outstanding++;
} else
dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
tx_limit--; buf_len--;
}
dev->tx_buf = buf;
dev->tx_buf_len = buf_len;
/*
* Because we don't know the buffer length in the
* I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
* the transaction here.
*/
if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
/* more bytes to be written */
dev->status |= STATUS_WRITE_IN_PROGRESS;
break;
} else
dev->status &= ~STATUS_WRITE_IN_PROGRESS;
}
/*
* If i2c_msg index search is completed, we don't need TX_EMPTY
* interrupt any more.
*/
if (dev->msg_write_idx == dev->msgs_num)
intr_mask &= ~DW_IC_INTR_TX_EMPTY;
if (dev->msg_err)
intr_mask = 0;
dw_writel(dev, intr_mask, DW_IC_INTR_MASK);
}
static u8
i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
{
struct i2c_msg *msgs = dev->msgs;
u32 flags = msgs[dev->msg_read_idx].flags;
/*
* Adjust the buffer length and mask the flag
* after receiving the first byte.
*/
len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
msgs[dev->msg_read_idx].len = len;
msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
return len;
}
static void
i2c_dw_read(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
int rx_valid;
for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
u32 len;
u8 *buf;
if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
continue;
if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
len = msgs[dev->msg_read_idx].len;
buf = msgs[dev->msg_read_idx].buf;
} else {
len = dev->rx_buf_len;
buf = dev->rx_buf;
}
rx_valid = dw_readl(dev, DW_IC_RXFLR);
for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
u32 flags = msgs[dev->msg_read_idx].flags;
*buf = dw_readl(dev, DW_IC_DATA_CMD);
/* Ensure length byte is a valid value */
if (flags & I2C_M_RECV_LEN &&
*buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) {
len = i2c_dw_recv_len(dev, *buf);
}
buf++;
dev->rx_outstanding--;
}
if (len > 0) {
dev->status |= STATUS_READ_IN_PROGRESS;
dev->rx_buf_len = len;
dev->rx_buf = buf;
return;
} else
dev->status &= ~STATUS_READ_IN_PROGRESS;
}
}
/*
* Prepare controller for a transaction and call i2c_dw_xfer_msg.
*/
static int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
int ret;
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
pm_runtime_get_sync(dev->dev);
if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
i2c: designware: Do not allow i2c_dw_xfer() calls while suspended On most Intel Bay- and Cherry-Trail systems the PMIC is connected over I2C and the PMIC is accessed through various means by the _PS0 and _PS3 ACPI methods (power on / off methods) of various devices. This leads to suspend/resume ordering problems where a device may be resumed and get its _PS0 method executed before the I2C controller is resumed. On Cherry Trail this leads to errors like these: i2c_designware 808622C1:06: controller timed out ACPI Error: AE_ERROR, Returned by Handler for [UserDefinedRegion] ACPI Error: Method parse/execution failed \_SB.P18W._ON, AE_ERROR video LNXVIDEO:00: Failed to change power state to D0 But on Bay Trail this caused I2C reads to seem to succeed, but they end up returning wrong data, which ends up getting written back by the typical read-modify-write cycle done to turn on various power-resources. Debugging the problems caused by this silent data corruption is quite nasty. This commit adds a check which disallows i2c_dw_xfer() calls to happen until the controller's resume method has completed. Which turns the silent data corruption into getting these errors in dmesg instead: i2c_designware 80860F41:04: Error i2c_dw_xfer call while suspended ACPI Error: AE_ERROR, Returned by Handler for [UserDefinedRegion] ACPI Error: Method parse/execution failed \_SB.PCI0.GFX0._PS0, AE_ERROR Which is much better. Note the above errors are an example of issues which this patch will help to debug, the actual fix requires fixing the suspend order and this has been fixed by a different commit. Note the setting / clearing of the suspended flag in the suspend / resume methods is NOT protected by i2c_lock_bus(). This is intentional as these methods get called from i2c_dw_xfer() (through pm_runtime_get/put) a nd i2c_dw_xfer() is called with the i2c_bus_lock held, so otherwise we would deadlock. This means that there is a theoretical race between a non runtime suspend and the suspended check in i2c_dw_xfer(), this is not a problem since normally we should not hit the race and this check is primarily a debugging tool so hitting the check if there are suspend/resume ordering problems does not need to be 100% reliable. Signed-off-by: Hans de Goede <hdegoede@redhat.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
2019-02-22 20:08:40 +07:00
ret = -ESHUTDOWN;
goto done_nolock;
}
reinit_completion(&dev->cmd_complete);
dev->msgs = msgs;
dev->msgs_num = num;
dev->cmd_err = 0;
dev->msg_write_idx = 0;
dev->msg_read_idx = 0;
dev->msg_err = 0;
dev->status = STATUS_IDLE;
dev->abort_source = 0;
dev->rx_outstanding = 0;
ret = i2c_dw_acquire_lock(dev);
if (ret)
goto done_nolock;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
goto done;
/* Start the transfers */
i2c_dw_xfer_init(dev);
/* Wait for tx to complete */
if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
dev_err(dev->dev, "controller timed out\n");
/* i2c_dw_init implicitly disables the adapter */
i2c_recover_bus(&dev->adapter);
i2c_dw_init_master(dev);
ret = -ETIMEDOUT;
goto done;
}
/*
* We must disable the adapter before returning and signaling the end
* of the current transfer. Otherwise the hardware might continue
* generating interrupts which in turn causes a race condition with
* the following transfer. Needs some more investigation if the
* additional interrupts are a hardware bug or this driver doesn't
* handle them correctly yet.
*/
__i2c_dw_disable_nowait(dev);
if (dev->msg_err) {
ret = dev->msg_err;
goto done;
}
/* No error */
if (likely(!dev->cmd_err && !dev->status)) {
ret = num;
goto done;
}
/* We have an error */
if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
ret = i2c_dw_handle_tx_abort(dev);
goto done;
}
if (dev->status)
dev_err(dev->dev,
"transfer terminated early - interrupt latency too high?\n");
ret = -EIO;
done:
i2c_dw_release_lock(dev);
done_nolock:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const struct i2c_algorithm i2c_dw_algo = {
.master_xfer = i2c_dw_xfer,
.functionality = i2c_dw_func,
};
static const struct i2c_adapter_quirks i2c_dw_quirks = {
.flags = I2C_AQ_NO_ZERO_LEN,
};
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
{
u32 stat;
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
* Ths unmasked raw version of interrupt status bits are available
* in the IC_RAW_INTR_STAT register.
*
* That is,
* stat = dw_readl(IC_INTR_STAT);
* equals to,
* stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK);
*
* The raw version might be useful for debugging purposes.
*/
stat = dw_readl(dev, DW_IC_INTR_STAT);
/*
* Do not use the IC_CLR_INTR register to clear interrupts, or
* you'll miss some interrupts, triggered during the period from
* dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR).
*
* Instead, use the separately-prepared IC_CLR_* registers.
*/
if (stat & DW_IC_INTR_RX_UNDER)
dw_readl(dev, DW_IC_CLR_RX_UNDER);
if (stat & DW_IC_INTR_RX_OVER)
dw_readl(dev, DW_IC_CLR_RX_OVER);
if (stat & DW_IC_INTR_TX_OVER)
dw_readl(dev, DW_IC_CLR_TX_OVER);
if (stat & DW_IC_INTR_RD_REQ)
dw_readl(dev, DW_IC_CLR_RD_REQ);
if (stat & DW_IC_INTR_TX_ABRT) {
/*
* The IC_TX_ABRT_SOURCE register is cleared whenever
* the IC_CLR_TX_ABRT is read. Preserve it beforehand.
*/
dev->abort_source = dw_readl(dev, DW_IC_TX_ABRT_SOURCE);
dw_readl(dev, DW_IC_CLR_TX_ABRT);
}
if (stat & DW_IC_INTR_RX_DONE)
dw_readl(dev, DW_IC_CLR_RX_DONE);
if (stat & DW_IC_INTR_ACTIVITY)
dw_readl(dev, DW_IC_CLR_ACTIVITY);
if (stat & DW_IC_INTR_STOP_DET)
dw_readl(dev, DW_IC_CLR_STOP_DET);
if (stat & DW_IC_INTR_START_DET)
dw_readl(dev, DW_IC_CLR_START_DET);
if (stat & DW_IC_INTR_GEN_CALL)
dw_readl(dev, DW_IC_CLR_GEN_CALL);
return stat;
}
/*
* Interrupt service routine. This gets called whenever an I2C master interrupt
* occurs.
*/
static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
{
u32 stat;
stat = i2c_dw_read_clear_intrbits(dev);
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
dev->status = STATUS_IDLE;
/*
* Anytime TX_ABRT is set, the contents of the tx/rx
* buffers are flushed. Make sure to skip them.
*/
dw_writel(dev, 0, DW_IC_INTR_MASK);
goto tx_aborted;
}
if (stat & DW_IC_INTR_RX_FULL)
i2c_dw_read(dev);
if (stat & DW_IC_INTR_TX_EMPTY)
i2c_dw_xfer_msg(dev);
/*
* No need to modify or disable the interrupt mask here.
* i2c_dw_xfer_msg() will take care of it according to
* the current transmit status.
*/
tx_aborted:
if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
complete(&dev->cmd_complete);
else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
/* Workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
dw_writel(dev, stat, DW_IC_INTR_MASK);
}
return 0;
}
static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
{
struct dw_i2c_dev *dev = dev_id;
u32 stat, enabled;
enabled = dw_readl(dev, DW_IC_ENABLE);
stat = dw_readl(dev, DW_IC_RAW_INTR_STAT);
dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
return IRQ_NONE;
i2c_dw_irq_handler_master(dev);
return IRQ_HANDLED;
}
static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
i2c_dw_disable(dev);
reset_control_assert(dev->rst);
i2c_dw_prepare_clk(dev, false);
}
static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
i2c_dw_prepare_clk(dev, true);
reset_control_deassert(dev->rst);
i2c_dw_init_master(dev);
}
static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
{
struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
struct i2c_adapter *adap = &dev->adapter;
struct gpio_desc *gpio;
gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
if (IS_ERR_OR_NULL(gpio))
return PTR_ERR_OR_ZERO(gpio);
rinfo->scl_gpiod = gpio;
gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
rinfo->sda_gpiod = gpio;
rinfo->recover_bus = i2c_generic_scl_recovery;
rinfo->prepare_recovery = i2c_dw_prepare_recovery;
rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
adap->bus_recovery_info = rinfo;
dev_info(dev->dev, "running with gpio recovery mode! scl%s",
rinfo->sda_gpiod ? ",sda" : "");
return 0;
}
int i2c_dw_probe(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
i2c: designware: Never suspend i2c-busses used for accessing the system PMIC Currently we are already setting a pm_runtime_disabled flag and disabling runtime-pm for i2c-busses used for accessing the system PMIC on x86. But this is not enough, there are ACPI opregions which may want to access the PMIC during late-suspend and early-resume, so we need to completely disable pm to be safe. This commit renames the flag from pm_runtime_disabled to pm_disabled and adds the following new behavior if the flag is set: 1) Call dev_pm_syscore_device(dev, true) which disables normal suspend / resume and remove the pm_runtime_disabled check from dw_i2c_plat_resume since that will now never get called. This fixes suspend_late handlers which use ACPI PMIC opregions causing errors like these: PM: Suspending system (freeze) PM: suspend of devices complete after 1127.751 msecs i2c_designware 808622C1:06: timeout waiting for bus ready ACPI Exception: AE_ERROR, Returned by Handler for [UserDefinedRegion] acpi 80860F14:02: Failed to change power state to D3hot PM: late suspend of devices failed 2) Set IRQF_NO_SUSPEND irq flag. This fixes resume_early handlers which handlers which use ACPI PMIC opregions causing errors like these: PM: resume from suspend-to-idle i2c_designware 808622C1:06: controller timed out ACPI Exception: AE_ERROR, Returned by Handler for [UserDefinedRegion] Signed-off-by: Hans de Goede <hdegoede@redhat.com> Acked-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
2017-03-14 05:25:09 +07:00
unsigned long irq_flags;
int ret;
init_completion(&dev->cmd_complete);
dev->init = i2c_dw_init_master;
dev->disable = i2c_dw_disable;
dev->disable_int = i2c_dw_disable_int;
ret = i2c_dw_set_reg_access(dev);
if (ret)
return ret;
ret = i2c_dw_set_timings_master(dev);
if (ret)
return ret;
ret = dev->init(dev);
if (ret)
return ret;
snprintf(adap->name, sizeof(adap->name),
"Synopsys DesignWare I2C adapter");
adap->retries = 3;
adap->algo = &i2c_dw_algo;
adap->quirks = &i2c_dw_quirks;
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
i2c: designware: Never suspend i2c-busses used for accessing the system PMIC Currently we are already setting a pm_runtime_disabled flag and disabling runtime-pm for i2c-busses used for accessing the system PMIC on x86. But this is not enough, there are ACPI opregions which may want to access the PMIC during late-suspend and early-resume, so we need to completely disable pm to be safe. This commit renames the flag from pm_runtime_disabled to pm_disabled and adds the following new behavior if the flag is set: 1) Call dev_pm_syscore_device(dev, true) which disables normal suspend / resume and remove the pm_runtime_disabled check from dw_i2c_plat_resume since that will now never get called. This fixes suspend_late handlers which use ACPI PMIC opregions causing errors like these: PM: Suspending system (freeze) PM: suspend of devices complete after 1127.751 msecs i2c_designware 808622C1:06: timeout waiting for bus ready ACPI Exception: AE_ERROR, Returned by Handler for [UserDefinedRegion] acpi 80860F14:02: Failed to change power state to D3hot PM: late suspend of devices failed 2) Set IRQF_NO_SUSPEND irq flag. This fixes resume_early handlers which handlers which use ACPI PMIC opregions causing errors like these: PM: resume from suspend-to-idle i2c_designware 808622C1:06: controller timed out ACPI Exception: AE_ERROR, Returned by Handler for [UserDefinedRegion] Signed-off-by: Hans de Goede <hdegoede@redhat.com> Acked-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
2017-03-14 05:25:09 +07:00
irq_flags = IRQF_NO_SUSPEND;
} else {
irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
}
i2c_dw_disable_int(dev);
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
dev_name(dev->dev), dev);
if (ret) {
dev_err(dev->dev, "failure requesting irq %i: %d\n",
dev->irq, ret);
return ret;
}
ret = i2c_dw_init_recovery_info(dev);
if (ret)
return ret;
/*
* Increment PM usage count during adapter registration in order to
* avoid possible spurious runtime suspend when adapter device is
* registered to the device core and immediate resume in case bus has
* registered I2C slaves that do I2C transfers in their probe.
*/
pm_runtime_get_noresume(dev->dev);
ret = i2c_add_numbered_adapter(adap);
if (ret)
dev_err(dev->dev, "failure adding adapter: %d\n", ret);
pm_runtime_put_noidle(dev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(i2c_dw_probe);
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
MODULE_LICENSE("GPL");