spi: Fixes for v3.15

A few core fixes around outlying cases here, nothing that should affect
 most users but useful fixes.  The diffstat is rather larger than one
 might hope due some simple code motion in the fix for !CONFIG_DMA, the
 actual meaningful change is much smaller.
 
  - Fix handling of unsupported dual and quad mode support on slave
    registration so that drivers that can degrade gracefully do so,
    preventing regressions for drivers this is added.
  - Fix build in !CONFIG_DMA cases following addition of generic DMA
    mapping support.
  - Fix error handling for queue creation which due to wider kernel
    changes can be triggered more easily.
  - A couple of driver specific fixes.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJTc5CkAAoJELSic+t+oim9p+cP/2qp1ud28ZPSRk4bxcjoNZng
 0bR7JF5Ji0t2Md7KkvJx4/1k1A7OvAk/i3WM2TUHkjQDcUaIhfoYEEViie2xzX4H
 UN9PvCvE+NcEDewQfos54mT7lQgWt2D31NsfQLAK/wtQBauj7yEDpqToGzvMRdz6
 4ni+LG9/TDu7PN76pNzV0yth5ckrjAjSC9TZH5B0N7S80aZurGG2EkJU6UoyOnK1
 zFxHtUq+lBJw+DsOF8J6fIRCexoLyv1vw5cMO5AqXW6OPzCD7WEt0kpm2L4y3Ois
 NcOf3rnEJLHd8+4BeICgT6e9LStyeHhtRbGJB6b+YvBr0dR309+vAxSrIQvlFi7T
 U+f4CdbxlBWW6u07R39iUM+OPMYic4BE3gE9Z7NxlwdLWKJVUNvQ6ICazLnUITbL
 Qoqmk7cERMKeOjTtqm/fFI27tkvEq+rqhdRBzOldWxweuoeaSFnBFseWd0nAj3M8
 KrZtVuBcoldDpzx05cF/nwrYgbdScAJYp9XAepZRLBidb7Epyh78t6uWJWnyOz/N
 VZvgw+DkX2v6Gyhrb5/+c6jt6T1NGdbBILX7vTG1ZqWn0VanNP6oFiCTJvT2lOsm
 wQQ7HrTKUQJsJSV015l9rGnYZeg3h1gDRnsnE8SPc5wWTxJ+60U0Pvd51iE6l4uR
 Kb+pVOx+xVh9kiEgJwhV
 =WbM5
 -----END PGP SIGNATURE-----

Merge tag 'spi-v3.15-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi

Pull spi fixes from Mark Brown:
 "A few core fixes around outlying cases here, nothing that should
  affect most users but useful fixes.  The diffstat is rather larger
  than one might hope due some simple code motion in the fix for
  !CONFIG_DMA, the actual meaningful change is much smaller.

   - Fix handling of unsupported dual and quad mode support on slave
     registration so that drivers that can degrade gracefully do so,
     preventing regressions for drivers this is added.
   - Fix build in !CONFIG_DMA cases following addition of generic DMA
     mapping support.
   - Fix error handling for queue creation which due to wider kernel
     changes can be triggered more easily.
   - A couple of driver specific fixes"

* tag 'spi-v3.15-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi:
  spi/pxa2xx: Prevent DMA from transferring too many bytes
  spi: core: Don't destroy master queue if we fail to create it
  spi: qup: Fix return value checking for pm_runtime_get_sync()
  spi: core: Protect DMA code by #ifdef CONFIG_HAS_DMA
  spi: core: Ignore unsupported Dual/Quad Transfer Mode bits
This commit is contained in:
Linus Torvalds 2014-05-21 18:53:55 +09:00
commit b2e3432af1
3 changed files with 78 additions and 64 deletions

View File

@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
struct sg_table *sgt; struct sg_table *sgt;
void *buf, *pbuf; void *buf, *pbuf;
/*
* Some DMA controllers have problems transferring buffers that are
* not multiple of 4 bytes. So we truncate the transfer so that it
* is suitable for such controllers, and handle the trailing bytes
* manually after the DMA completes.
*
* REVISIT: It would be better if this information could be
* retrieved directly from the DMA device in a similar way than
* ->copy_align etc. is done.
*/
len = ALIGN(drv_data->len, 4);
if (dir == DMA_TO_DEVICE) { if (dir == DMA_TO_DEVICE) {
dmadev = drv_data->tx_chan->device->dev; dmadev = drv_data->tx_chan->device->dev;
sgt = &drv_data->tx_sgt; sgt = &drv_data->tx_sgt;
@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
if (!error) { if (!error) {
pxa2xx_spi_unmap_dma_buffers(drv_data); pxa2xx_spi_unmap_dma_buffers(drv_data);
/* Handle the last bytes of unaligned transfer */
drv_data->tx += drv_data->tx_map_len; drv_data->tx += drv_data->tx_map_len;
drv_data->write(drv_data);
drv_data->rx += drv_data->rx_map_len; drv_data->rx += drv_data->rx_map_len;
drv_data->read(drv_data);
msg->actual_length += drv_data->len; msg->actual_length += drv_data->len;
msg->state = pxa2xx_spi_next_transfer(drv_data); msg->state = pxa2xx_spi_next_transfer(drv_data);

View File

@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
int ret; int ret;
ret = pm_runtime_get_sync(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev);
if (ret) if (ret < 0)
return ret; return ret;
ret = spi_qup_set_state(controller, QUP_STATE_RESET); ret = spi_qup_set_state(controller, QUP_STATE_RESET);

View File

@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->master->set_cs(spi, !enable); spi->master->set_cs(spi, !enable);
} }
#ifdef CONFIG_HAS_DMA
static int spi_map_buf(struct spi_master *master, struct device *dev, static int spi_map_buf(struct spi_master *master, struct device *dev,
struct sg_table *sgt, void *buf, size_t len, struct sg_table *sgt, void *buf, size_t len,
enum dma_data_direction dir) enum dma_data_direction dir)
@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
} }
} }
static int spi_map_msg(struct spi_master *master, struct spi_message *msg) static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
{ {
struct device *tx_dev, *rx_dev; struct device *tx_dev, *rx_dev;
struct spi_transfer *xfer; struct spi_transfer *xfer;
void *tmp;
unsigned int max_tx, max_rx;
int ret; int ret;
if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
max_tx = 0;
max_rx = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if ((master->flags & SPI_MASTER_MUST_TX) &&
!xfer->tx_buf)
max_tx = max(xfer->len, max_tx);
if ((master->flags & SPI_MASTER_MUST_RX) &&
!xfer->rx_buf)
max_rx = max(xfer->len, max_rx);
}
if (max_tx) {
tmp = krealloc(master->dummy_tx, max_tx,
GFP_KERNEL | GFP_DMA);
if (!tmp)
return -ENOMEM;
master->dummy_tx = tmp;
memset(tmp, 0, max_tx);
}
if (max_rx) {
tmp = krealloc(master->dummy_rx, max_rx,
GFP_KERNEL | GFP_DMA);
if (!tmp)
return -ENOMEM;
master->dummy_rx = tmp;
}
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
if (!xfer->tx_buf)
xfer->tx_buf = master->dummy_tx;
if (!xfer->rx_buf)
xfer->rx_buf = master->dummy_rx;
}
}
}
if (!master->can_dma) if (!master->can_dma)
return 0; return 0;
@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
return 0; return 0;
} }
#else /* !CONFIG_HAS_DMA */
static inline int __spi_map_msg(struct spi_master *master,
struct spi_message *msg)
{
return 0;
}
static inline int spi_unmap_msg(struct spi_master *master,
struct spi_message *msg)
{
return 0;
}
#endif /* !CONFIG_HAS_DMA */
static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
{
struct spi_transfer *xfer;
void *tmp;
unsigned int max_tx, max_rx;
if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
max_tx = 0;
max_rx = 0;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if ((master->flags & SPI_MASTER_MUST_TX) &&
!xfer->tx_buf)
max_tx = max(xfer->len, max_tx);
if ((master->flags & SPI_MASTER_MUST_RX) &&
!xfer->rx_buf)
max_rx = max(xfer->len, max_rx);
}
if (max_tx) {
tmp = krealloc(master->dummy_tx, max_tx,
GFP_KERNEL | GFP_DMA);
if (!tmp)
return -ENOMEM;
master->dummy_tx = tmp;
memset(tmp, 0, max_tx);
}
if (max_rx) {
tmp = krealloc(master->dummy_rx, max_rx,
GFP_KERNEL | GFP_DMA);
if (!tmp)
return -ENOMEM;
master->dummy_rx = tmp;
}
if (max_tx || max_rx) {
list_for_each_entry(xfer, &msg->transfers,
transfer_list) {
if (!xfer->tx_buf)
xfer->tx_buf = master->dummy_tx;
if (!xfer->rx_buf)
xfer->rx_buf = master->dummy_rx;
}
}
}
return __spi_map_msg(master, msg);
}
/* /*
* spi_transfer_one_message - Default implementation of transfer_one_message() * spi_transfer_one_message - Default implementation of transfer_one_message()
@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
{ {
int ret; int ret;
master->queued = true;
master->transfer = spi_queued_transfer; master->transfer = spi_queued_transfer;
if (!master->transfer_one_message) if (!master->transfer_one_message)
master->transfer_one_message = spi_transfer_one_message; master->transfer_one_message = spi_transfer_one_message;
@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
dev_err(&master->dev, "problem initializing queue\n"); dev_err(&master->dev, "problem initializing queue\n");
goto err_init_queue; goto err_init_queue;
} }
master->queued = true;
ret = spi_start_queue(master); ret = spi_start_queue(master);
if (ret) { if (ret) {
dev_err(&master->dev, "problem starting queue\n"); dev_err(&master->dev, "problem starting queue\n");
@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
return 0; return 0;
err_start_queue: err_start_queue:
err_init_queue:
spi_destroy_queue(master); spi_destroy_queue(master);
err_init_queue:
return ret; return ret;
} }
@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
*/ */
int spi_setup(struct spi_device *spi) int spi_setup(struct spi_device *spi)
{ {
unsigned bad_bits; unsigned bad_bits, ugly_bits;
int status = 0; int status = 0;
/* check mode to prevent that DUAL and QUAD set at the same time /* check mode to prevent that DUAL and QUAD set at the same time
@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
* that aren't supported with their current master * that aren't supported with their current master
*/ */
bad_bits = spi->mode & ~spi->master->mode_bits; bad_bits = spi->mode & ~spi->master->mode_bits;
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
if (ugly_bits) {
dev_warn(&spi->dev,
"setup: ignoring unsupported mode bits %x\n",
ugly_bits);
spi->mode &= ~ugly_bits;
bad_bits &= ~ugly_bits;
}
if (bad_bits) { if (bad_bits) {
dev_err(&spi->dev, "setup: unsupported mode bits %x\n", dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
bad_bits); bad_bits);