mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 19:26:47 +07:00
623ff7739e
Artem's cleanup of the MTD API continues apace. Fixes and improvements for ST FSMC and SuperH FLCTL NAND, amongst others. More work on DiskOnChip G3, new driver for DiskOnChip G4. Clean up debug/warning printks in JFFS2 to use pr_<level>. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.12 (GNU/Linux) iEYEABECAAYFAk92K6UACgkQdwG7hYl686NrMACfWQJRWasR78MWKfkT2vWZwTFJ X5AAoKiSYO2pfo5gWJGOAahNC1zUqMX0 =i3Vb -----END PGP SIGNATURE----- Merge tag 'for-linus-3.4' of git://git.infradead.org/mtd-2.6 Pull MTD changes from David Woodhouse: - Artem's cleanup of the MTD API continues apace. - Fixes and improvements for ST FSMC and SuperH FLCTL NAND, amongst others. - More work on DiskOnChip G3, new driver for DiskOnChip G4. - Clean up debug/warning printks in JFFS2 to use pr_<level>. Fix up various trivial conflicts, largely due to changes in calling conventions for things like dmaengine_prep_slave_sg() (new inline wrapper to hide new parameter, clashing with rewrite of previously last parameter that used to be an 'append' flag, and is now a bitmap of 'unsigned long flags'). (Also some header file fallout - like so many merges this merge window - and silly conflicts with sparse fixes) * tag 'for-linus-3.4' of git://git.infradead.org/mtd-2.6: (120 commits) mtd: docg3 add protection against concurrency mtd: docg3 refactor cascade floors structure mtd: docg3 increase write/erase timeout mtd: docg3 fix inbound calculations mtd: nand: gpmi: fix function annotations mtd: phram: fix section mismatch for phram_setup mtd: unify initialization of erase_info->fail_addr mtd: support ONFI multi lun NAND mtd: sm_ftl: fix typo in major number. mtd: add device-tree support to spear_smi mtd: spear_smi: Remove default partition information from driver mtd: Add device-tree support to fsmc_nand mtd: fix section mismatch for doc_probe_device mtd: nand/fsmc: Remove sparse warnings and errors mtd: nand/fsmc: Add DMA support mtd: nand/fsmc: Access the NAND device word by word whenever possible mtd: nand/fsmc: Use dev_err to report error scenario mtd: nand/fsmc: Use devm routines mtd: nand/fsmc: Modify fsmc driver to accept nand timing parameters via platform mtd: fsmc_nand: add pm callbacks to support hibernation ...
693 lines
17 KiB
C
693 lines
17 KiB
C
/****************************************************************************
|
|
* Driver for Solarflare Solarstorm network controllers and boards
|
|
* Copyright 2005-2006 Fen Systems Ltd.
|
|
* Copyright 2006-2010 Solarflare Communications Inc.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of the GNU General Public License version 2 as published
|
|
* by the Free Software Foundation, incorporated herein by reference.
|
|
*/
|
|
|
|
#include <linux/bitops.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mtd/mtd.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include "net_driver.h"
|
|
#include "spi.h"
|
|
#include "efx.h"
|
|
#include "nic.h"
|
|
#include "mcdi.h"
|
|
#include "mcdi_pcol.h"
|
|
|
|
#define EFX_SPI_VERIFY_BUF_LEN 16
|
|
|
|
struct efx_mtd_partition {
|
|
struct mtd_info mtd;
|
|
union {
|
|
struct {
|
|
bool updating;
|
|
u8 nvram_type;
|
|
u16 fw_subtype;
|
|
} mcdi;
|
|
size_t offset;
|
|
};
|
|
const char *type_name;
|
|
char name[IFNAMSIZ + 20];
|
|
};
|
|
|
|
struct efx_mtd_ops {
|
|
int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
|
|
size_t *retlen, u8 *buffer);
|
|
int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
|
|
int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
|
|
size_t *retlen, const u8 *buffer);
|
|
int (*sync)(struct mtd_info *mtd);
|
|
};
|
|
|
|
struct efx_mtd {
|
|
struct list_head node;
|
|
struct efx_nic *efx;
|
|
const struct efx_spi_device *spi;
|
|
const char *name;
|
|
const struct efx_mtd_ops *ops;
|
|
size_t n_parts;
|
|
struct efx_mtd_partition part[0];
|
|
};
|
|
|
|
#define efx_for_each_partition(part, efx_mtd) \
|
|
for ((part) = &(efx_mtd)->part[0]; \
|
|
(part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
|
|
(part)++)
|
|
|
|
#define to_efx_mtd_partition(mtd) \
|
|
container_of(mtd, struct efx_mtd_partition, mtd)
|
|
|
|
static int falcon_mtd_probe(struct efx_nic *efx);
|
|
static int siena_mtd_probe(struct efx_nic *efx);
|
|
|
|
/* SPI utilities */
|
|
|
|
static int
|
|
efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
|
|
{
|
|
struct efx_mtd *efx_mtd = part->mtd.priv;
|
|
const struct efx_spi_device *spi = efx_mtd->spi;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
u8 status;
|
|
int rc, i;
|
|
|
|
/* Wait up to 4s for flash/EEPROM to finish a slow operation. */
|
|
for (i = 0; i < 40; i++) {
|
|
__set_current_state(uninterruptible ?
|
|
TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
|
|
schedule_timeout(HZ / 10);
|
|
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
|
|
&status, sizeof(status));
|
|
if (rc)
|
|
return rc;
|
|
if (!(status & SPI_STATUS_NRDY))
|
|
return 0;
|
|
if (signal_pending(current))
|
|
return -EINTR;
|
|
}
|
|
pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
|
|
return -ETIMEDOUT;
|
|
}
|
|
|
|
static int
|
|
efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
|
|
{
|
|
const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
|
|
SPI_STATUS_BP0);
|
|
u8 status;
|
|
int rc;
|
|
|
|
rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
|
|
&status, sizeof(status));
|
|
if (rc)
|
|
return rc;
|
|
|
|
if (!(status & unlock_mask))
|
|
return 0; /* already unlocked */
|
|
|
|
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
|
|
if (rc)
|
|
return rc;
|
|
rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
|
|
if (rc)
|
|
return rc;
|
|
|
|
status &= ~unlock_mask;
|
|
rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
|
|
NULL, sizeof(status));
|
|
if (rc)
|
|
return rc;
|
|
rc = falcon_spi_wait_write(efx, spi);
|
|
if (rc)
|
|
return rc;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int
|
|
efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
|
|
{
|
|
struct efx_mtd *efx_mtd = part->mtd.priv;
|
|
const struct efx_spi_device *spi = efx_mtd->spi;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
unsigned pos, block_len;
|
|
u8 empty[EFX_SPI_VERIFY_BUF_LEN];
|
|
u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
|
|
int rc;
|
|
|
|
if (len != spi->erase_size)
|
|
return -EINVAL;
|
|
|
|
if (spi->erase_command == 0)
|
|
return -EOPNOTSUPP;
|
|
|
|
rc = efx_spi_unlock(efx, spi);
|
|
if (rc)
|
|
return rc;
|
|
rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
|
|
if (rc)
|
|
return rc;
|
|
rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
|
|
NULL, 0);
|
|
if (rc)
|
|
return rc;
|
|
rc = efx_spi_slow_wait(part, false);
|
|
|
|
/* Verify the entire region has been wiped */
|
|
memset(empty, 0xff, sizeof(empty));
|
|
for (pos = 0; pos < len; pos += block_len) {
|
|
block_len = min(len - pos, sizeof(buffer));
|
|
rc = falcon_spi_read(efx, spi, start + pos, block_len,
|
|
NULL, buffer);
|
|
if (rc)
|
|
return rc;
|
|
if (memcmp(empty, buffer, block_len))
|
|
return -EIO;
|
|
|
|
/* Avoid locking up the system */
|
|
cond_resched();
|
|
if (signal_pending(current))
|
|
return -EINTR;
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
/* MTD interface */
|
|
|
|
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
|
|
{
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
int rc;
|
|
|
|
rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
|
|
if (rc == 0) {
|
|
erase->state = MTD_ERASE_DONE;
|
|
} else {
|
|
erase->state = MTD_ERASE_FAILED;
|
|
erase->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
|
|
}
|
|
mtd_erase_callback(erase);
|
|
return rc;
|
|
}
|
|
|
|
static void efx_mtd_sync(struct mtd_info *mtd)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
int rc;
|
|
|
|
rc = efx_mtd->ops->sync(mtd);
|
|
if (rc)
|
|
pr_err("%s: %s sync failed (%d)\n",
|
|
part->name, efx_mtd->name, rc);
|
|
}
|
|
|
|
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
|
|
{
|
|
int rc;
|
|
|
|
for (;;) {
|
|
rc = mtd_device_unregister(&part->mtd);
|
|
if (rc != -EBUSY)
|
|
break;
|
|
ssleep(1);
|
|
}
|
|
WARN_ON(rc);
|
|
}
|
|
|
|
static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
|
|
{
|
|
struct efx_mtd_partition *part;
|
|
|
|
efx_for_each_partition(part, efx_mtd)
|
|
efx_mtd_remove_partition(part);
|
|
list_del(&efx_mtd->node);
|
|
kfree(efx_mtd);
|
|
}
|
|
|
|
static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
|
|
{
|
|
struct efx_mtd_partition *part;
|
|
|
|
efx_for_each_partition(part, efx_mtd)
|
|
if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
|
|
snprintf(part->name, sizeof(part->name),
|
|
"%s %s:%02x", efx_mtd->efx->name,
|
|
part->type_name, part->mcdi.fw_subtype);
|
|
else
|
|
snprintf(part->name, sizeof(part->name),
|
|
"%s %s", efx_mtd->efx->name,
|
|
part->type_name);
|
|
}
|
|
|
|
static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
|
|
{
|
|
struct efx_mtd_partition *part;
|
|
|
|
efx_mtd->efx = efx;
|
|
|
|
efx_mtd_rename_device(efx_mtd);
|
|
|
|
efx_for_each_partition(part, efx_mtd) {
|
|
part->mtd.writesize = 1;
|
|
|
|
part->mtd.owner = THIS_MODULE;
|
|
part->mtd.priv = efx_mtd;
|
|
part->mtd.name = part->name;
|
|
part->mtd._erase = efx_mtd_erase;
|
|
part->mtd._read = efx_mtd->ops->read;
|
|
part->mtd._write = efx_mtd->ops->write;
|
|
part->mtd._sync = efx_mtd_sync;
|
|
|
|
if (mtd_device_register(&part->mtd, NULL, 0))
|
|
goto fail;
|
|
}
|
|
|
|
list_add(&efx_mtd->node, &efx->mtd_list);
|
|
return 0;
|
|
|
|
fail:
|
|
while (part != &efx_mtd->part[0]) {
|
|
--part;
|
|
efx_mtd_remove_partition(part);
|
|
}
|
|
/* Failure is unlikely here, but probably means we're out of memory */
|
|
return -ENOMEM;
|
|
}
|
|
|
|
void efx_mtd_remove(struct efx_nic *efx)
|
|
{
|
|
struct efx_mtd *efx_mtd, *next;
|
|
|
|
WARN_ON(efx_dev_registered(efx));
|
|
|
|
list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
|
|
efx_mtd_remove_device(efx_mtd);
|
|
}
|
|
|
|
void efx_mtd_rename(struct efx_nic *efx)
|
|
{
|
|
struct efx_mtd *efx_mtd;
|
|
|
|
ASSERT_RTNL();
|
|
|
|
list_for_each_entry(efx_mtd, &efx->mtd_list, node)
|
|
efx_mtd_rename_device(efx_mtd);
|
|
}
|
|
|
|
int efx_mtd_probe(struct efx_nic *efx)
|
|
{
|
|
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
|
|
return siena_mtd_probe(efx);
|
|
else
|
|
return falcon_mtd_probe(efx);
|
|
}
|
|
|
|
/* Implementation of MTD operations for Falcon */
|
|
|
|
static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
|
|
size_t len, size_t *retlen, u8 *buffer)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
const struct efx_spi_device *spi = efx_mtd->spi;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
struct falcon_nic_data *nic_data = efx->nic_data;
|
|
int rc;
|
|
|
|
rc = mutex_lock_interruptible(&nic_data->spi_lock);
|
|
if (rc)
|
|
return rc;
|
|
rc = falcon_spi_read(efx, spi, part->offset + start, len,
|
|
retlen, buffer);
|
|
mutex_unlock(&nic_data->spi_lock);
|
|
return rc;
|
|
}
|
|
|
|
static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
struct falcon_nic_data *nic_data = efx->nic_data;
|
|
int rc;
|
|
|
|
rc = mutex_lock_interruptible(&nic_data->spi_lock);
|
|
if (rc)
|
|
return rc;
|
|
rc = efx_spi_erase(part, part->offset + start, len);
|
|
mutex_unlock(&nic_data->spi_lock);
|
|
return rc;
|
|
}
|
|
|
|
static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
|
|
size_t len, size_t *retlen, const u8 *buffer)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
const struct efx_spi_device *spi = efx_mtd->spi;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
struct falcon_nic_data *nic_data = efx->nic_data;
|
|
int rc;
|
|
|
|
rc = mutex_lock_interruptible(&nic_data->spi_lock);
|
|
if (rc)
|
|
return rc;
|
|
rc = falcon_spi_write(efx, spi, part->offset + start, len,
|
|
retlen, buffer);
|
|
mutex_unlock(&nic_data->spi_lock);
|
|
return rc;
|
|
}
|
|
|
|
static int falcon_mtd_sync(struct mtd_info *mtd)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
struct falcon_nic_data *nic_data = efx->nic_data;
|
|
int rc;
|
|
|
|
mutex_lock(&nic_data->spi_lock);
|
|
rc = efx_spi_slow_wait(part, true);
|
|
mutex_unlock(&nic_data->spi_lock);
|
|
return rc;
|
|
}
|
|
|
|
static const struct efx_mtd_ops falcon_mtd_ops = {
|
|
.read = falcon_mtd_read,
|
|
.erase = falcon_mtd_erase,
|
|
.write = falcon_mtd_write,
|
|
.sync = falcon_mtd_sync,
|
|
};
|
|
|
|
static int falcon_mtd_probe(struct efx_nic *efx)
|
|
{
|
|
struct falcon_nic_data *nic_data = efx->nic_data;
|
|
struct efx_spi_device *spi;
|
|
struct efx_mtd *efx_mtd;
|
|
int rc = -ENODEV;
|
|
|
|
ASSERT_RTNL();
|
|
|
|
spi = &nic_data->spi_flash;
|
|
if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
|
|
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
|
|
GFP_KERNEL);
|
|
if (!efx_mtd)
|
|
return -ENOMEM;
|
|
|
|
efx_mtd->spi = spi;
|
|
efx_mtd->name = "flash";
|
|
efx_mtd->ops = &falcon_mtd_ops;
|
|
|
|
efx_mtd->n_parts = 1;
|
|
efx_mtd->part[0].mtd.type = MTD_NORFLASH;
|
|
efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
|
|
efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
|
|
efx_mtd->part[0].mtd.erasesize = spi->erase_size;
|
|
efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
|
|
efx_mtd->part[0].type_name = "sfc_flash_bootrom";
|
|
|
|
rc = efx_mtd_probe_device(efx, efx_mtd);
|
|
if (rc) {
|
|
kfree(efx_mtd);
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
spi = &nic_data->spi_eeprom;
|
|
if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
|
|
efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
|
|
GFP_KERNEL);
|
|
if (!efx_mtd)
|
|
return -ENOMEM;
|
|
|
|
efx_mtd->spi = spi;
|
|
efx_mtd->name = "EEPROM";
|
|
efx_mtd->ops = &falcon_mtd_ops;
|
|
|
|
efx_mtd->n_parts = 1;
|
|
efx_mtd->part[0].mtd.type = MTD_RAM;
|
|
efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
|
|
efx_mtd->part[0].mtd.size =
|
|
min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
|
|
EFX_EEPROM_BOOTCONFIG_START;
|
|
efx_mtd->part[0].mtd.erasesize = spi->erase_size;
|
|
efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
|
|
efx_mtd->part[0].type_name = "sfc_bootconfig";
|
|
|
|
rc = efx_mtd_probe_device(efx, efx_mtd);
|
|
if (rc) {
|
|
kfree(efx_mtd);
|
|
return rc;
|
|
}
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
/* Implementation of MTD operations for Siena */
|
|
|
|
static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
|
|
size_t len, size_t *retlen, u8 *buffer)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
loff_t offset = start;
|
|
loff_t end = min_t(loff_t, start + len, mtd->size);
|
|
size_t chunk;
|
|
int rc = 0;
|
|
|
|
while (offset < end) {
|
|
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
|
|
rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
|
|
buffer, chunk);
|
|
if (rc)
|
|
goto out;
|
|
offset += chunk;
|
|
buffer += chunk;
|
|
}
|
|
out:
|
|
*retlen = offset - start;
|
|
return rc;
|
|
}
|
|
|
|
static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
|
|
loff_t end = min_t(loff_t, start + len, mtd->size);
|
|
size_t chunk = part->mtd.erasesize;
|
|
int rc = 0;
|
|
|
|
if (!part->mcdi.updating) {
|
|
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
|
|
if (rc)
|
|
goto out;
|
|
part->mcdi.updating = true;
|
|
}
|
|
|
|
/* The MCDI interface can in fact do multiple erase blocks at once;
|
|
* but erasing may be slow, so we make multiple calls here to avoid
|
|
* tripping the MCDI RPC timeout. */
|
|
while (offset < end) {
|
|
rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
|
|
chunk);
|
|
if (rc)
|
|
goto out;
|
|
offset += chunk;
|
|
}
|
|
out:
|
|
return rc;
|
|
}
|
|
|
|
static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
|
|
size_t len, size_t *retlen, const u8 *buffer)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
loff_t offset = start;
|
|
loff_t end = min_t(loff_t, start + len, mtd->size);
|
|
size_t chunk;
|
|
int rc = 0;
|
|
|
|
if (!part->mcdi.updating) {
|
|
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
|
|
if (rc)
|
|
goto out;
|
|
part->mcdi.updating = true;
|
|
}
|
|
|
|
while (offset < end) {
|
|
chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
|
|
rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
|
|
buffer, chunk);
|
|
if (rc)
|
|
goto out;
|
|
offset += chunk;
|
|
buffer += chunk;
|
|
}
|
|
out:
|
|
*retlen = offset - start;
|
|
return rc;
|
|
}
|
|
|
|
static int siena_mtd_sync(struct mtd_info *mtd)
|
|
{
|
|
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
|
struct efx_mtd *efx_mtd = mtd->priv;
|
|
struct efx_nic *efx = efx_mtd->efx;
|
|
int rc = 0;
|
|
|
|
if (part->mcdi.updating) {
|
|
part->mcdi.updating = false;
|
|
rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
static const struct efx_mtd_ops siena_mtd_ops = {
|
|
.read = siena_mtd_read,
|
|
.erase = siena_mtd_erase,
|
|
.write = siena_mtd_write,
|
|
.sync = siena_mtd_sync,
|
|
};
|
|
|
|
struct siena_nvram_type_info {
|
|
int port;
|
|
const char *name;
|
|
};
|
|
|
|
static const struct siena_nvram_type_info siena_nvram_types[] = {
|
|
[MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
|
|
[MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
|
|
[MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
|
|
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
|
|
[MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
|
|
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
|
|
[MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
|
|
[MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
|
|
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
|
|
[MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
|
|
[MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
|
|
[MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
|
|
};
|
|
|
|
static int siena_mtd_probe_partition(struct efx_nic *efx,
|
|
struct efx_mtd *efx_mtd,
|
|
unsigned int part_id,
|
|
unsigned int type)
|
|
{
|
|
struct efx_mtd_partition *part = &efx_mtd->part[part_id];
|
|
const struct siena_nvram_type_info *info;
|
|
size_t size, erase_size;
|
|
bool protected;
|
|
int rc;
|
|
|
|
if (type >= ARRAY_SIZE(siena_nvram_types))
|
|
return -ENODEV;
|
|
|
|
info = &siena_nvram_types[type];
|
|
|
|
if (info->port != efx_port_num(efx))
|
|
return -ENODEV;
|
|
|
|
rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
|
|
if (rc)
|
|
return rc;
|
|
if (protected)
|
|
return -ENODEV; /* hide it */
|
|
|
|
part->mcdi.nvram_type = type;
|
|
part->type_name = info->name;
|
|
|
|
part->mtd.type = MTD_NORFLASH;
|
|
part->mtd.flags = MTD_CAP_NORFLASH;
|
|
part->mtd.size = size;
|
|
part->mtd.erasesize = erase_size;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
|
|
struct efx_mtd *efx_mtd)
|
|
{
|
|
struct efx_mtd_partition *part;
|
|
uint16_t fw_subtype_list[MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM];
|
|
int rc;
|
|
|
|
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
|
|
if (rc)
|
|
return rc;
|
|
|
|
efx_for_each_partition(part, efx_mtd)
|
|
part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int siena_mtd_probe(struct efx_nic *efx)
|
|
{
|
|
struct efx_mtd *efx_mtd;
|
|
int rc = -ENODEV;
|
|
u32 nvram_types;
|
|
unsigned int type;
|
|
|
|
ASSERT_RTNL();
|
|
|
|
rc = efx_mcdi_nvram_types(efx, &nvram_types);
|
|
if (rc)
|
|
return rc;
|
|
|
|
efx_mtd = kzalloc(sizeof(*efx_mtd) +
|
|
hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
|
|
GFP_KERNEL);
|
|
if (!efx_mtd)
|
|
return -ENOMEM;
|
|
|
|
efx_mtd->name = "Siena NVRAM manager";
|
|
|
|
efx_mtd->ops = &siena_mtd_ops;
|
|
|
|
type = 0;
|
|
efx_mtd->n_parts = 0;
|
|
|
|
while (nvram_types != 0) {
|
|
if (nvram_types & 1) {
|
|
rc = siena_mtd_probe_partition(efx, efx_mtd,
|
|
efx_mtd->n_parts, type);
|
|
if (rc == 0)
|
|
efx_mtd->n_parts++;
|
|
else if (rc != -ENODEV)
|
|
goto fail;
|
|
}
|
|
type++;
|
|
nvram_types >>= 1;
|
|
}
|
|
|
|
rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
|
|
if (rc)
|
|
goto fail;
|
|
|
|
rc = efx_mtd_probe_device(efx, efx_mtd);
|
|
fail:
|
|
if (rc)
|
|
kfree(efx_mtd);
|
|
return rc;
|
|
}
|
|
|