mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 06:50:53 +07:00
Merge branch 'upstream'
Conflicts: drivers/scsi/sata_mv.c
This commit is contained in:
commit
4f0e7c51ae
@ -120,14 +120,27 @@ void (*dev_config) (struct ata_port *, struct ata_device *);
|
||||
<programlisting>
|
||||
void (*set_piomode) (struct ata_port *, struct ata_device *);
|
||||
void (*set_dmamode) (struct ata_port *, struct ata_device *);
|
||||
void (*post_set_mode) (struct ata_port *ap);
|
||||
void (*post_set_mode) (struct ata_port *);
|
||||
unsigned int (*mode_filter) (struct ata_port *, struct ata_device *, unsigned int);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
Hooks called prior to the issue of SET FEATURES - XFER MODE
|
||||
command. dev->pio_mode is guaranteed to be valid when
|
||||
->set_piomode() is called, and dev->dma_mode is guaranteed to be
|
||||
valid when ->set_dmamode() is called. ->post_set_mode() is
|
||||
command. The optional ->mode_filter() hook is called when libata
|
||||
has built a mask of the possible modes. This is passed to the
|
||||
->mode_filter() function which should return a mask of valid modes
|
||||
after filtering those unsuitable due to hardware limits. It is not
|
||||
valid to use this interface to add modes.
|
||||
</para>
|
||||
<para>
|
||||
dev->pio_mode and dev->dma_mode are guaranteed to be valid when
|
||||
->set_piomode() and when ->set_dmamode() is called. The timings for
|
||||
any other drive sharing the cable will also be valid at this point.
|
||||
That is the library records the decisions for the modes of each
|
||||
drive on a channel before it attempts to set any of them.
|
||||
</para>
|
||||
<para>
|
||||
->post_set_mode() is
|
||||
called unconditionally, after the SET FEATURES - XFER MODE
|
||||
command completes successfully.
|
||||
</para>
|
||||
@ -230,6 +243,32 @@ void (*dev_select)(struct ata_port *ap, unsigned int device);
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2><title>Private tuning method</title>
|
||||
<programlisting>
|
||||
void (*set_mode) (struct ata_port *ap);
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
By default libata performs drive and controller tuning in
|
||||
accordance with the ATA timing rules and also applies blacklists
|
||||
and cable limits. Some controllers need special handling and have
|
||||
custom tuning rules, typically raid controllers that use ATA
|
||||
commands but do not actually do drive timing.
|
||||
</para>
|
||||
|
||||
<warning>
|
||||
<para>
|
||||
This hook should not be used to replace the standard controller
|
||||
tuning logic when a controller has quirks. Replacing the default
|
||||
tuning logic in that case would bypass handling for drive and
|
||||
bridge quirks that may be important to data reliability. If a
|
||||
controller needs to filter the mode selection it should use the
|
||||
mode_filter hook instead.
|
||||
</para>
|
||||
</warning>
|
||||
|
||||
</sect2>
|
||||
|
||||
<sect2><title>Reset ATA bus</title>
|
||||
<programlisting>
|
||||
void (*phy_reset) (struct ata_port *ap);
|
||||
|
@ -703,6 +703,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
|
||||
struct ata_probe_ent *probe_ent =
|
||||
ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
|
||||
int p = 0;
|
||||
unsigned long bmdma;
|
||||
|
||||
if (!probe_ent)
|
||||
return NULL;
|
||||
@ -716,7 +717,12 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
|
||||
probe_ent->port[p].altstatus_addr =
|
||||
probe_ent->port[p].ctl_addr =
|
||||
pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
|
||||
probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4);
|
||||
bmdma = pci_resource_start(pdev, 4);
|
||||
if (bmdma) {
|
||||
if (inb(bmdma + 2) & 0x80)
|
||||
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
|
||||
probe_ent->port[p].bmdma_addr = bmdma;
|
||||
}
|
||||
ata_std_ports(&probe_ent->port[p]);
|
||||
p++;
|
||||
}
|
||||
@ -726,7 +732,13 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
|
||||
probe_ent->port[p].altstatus_addr =
|
||||
probe_ent->port[p].ctl_addr =
|
||||
pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
|
||||
probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8;
|
||||
bmdma = pci_resource_start(pdev, 4);
|
||||
if (bmdma) {
|
||||
bmdma += 8;
|
||||
if(inb(bmdma + 2) & 0x80)
|
||||
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
|
||||
probe_ent->port[p].bmdma_addr = bmdma;
|
||||
}
|
||||
ata_std_ports(&probe_ent->port[p]);
|
||||
p++;
|
||||
}
|
||||
@ -740,6 +752,7 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
|
||||
struct ata_port_info *port, int port_num)
|
||||
{
|
||||
struct ata_probe_ent *probe_ent;
|
||||
unsigned long bmdma;
|
||||
|
||||
probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
|
||||
if (!probe_ent)
|
||||
@ -766,8 +779,13 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
|
||||
break;
|
||||
}
|
||||
|
||||
probe_ent->port[0].bmdma_addr =
|
||||
pci_resource_start(pdev, 4) + 8 * port_num;
|
||||
bmdma = pci_resource_start(pdev, 4);
|
||||
if (bmdma != 0) {
|
||||
bmdma += 8 * port_num;
|
||||
probe_ent->port[0].bmdma_addr = bmdma;
|
||||
if (inb(bmdma + 2) & 0x80)
|
||||
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
|
||||
}
|
||||
ata_std_ports(&probe_ent->port[0]);
|
||||
|
||||
return probe_ent;
|
||||
|
@ -62,7 +62,9 @@
|
||||
#include "libata.h"
|
||||
|
||||
static unsigned int ata_dev_init_params(struct ata_port *ap,
|
||||
struct ata_device *dev);
|
||||
struct ata_device *dev,
|
||||
u16 heads,
|
||||
u16 sectors);
|
||||
static void ata_set_mode(struct ata_port *ap);
|
||||
static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
|
||||
struct ata_device *dev);
|
||||
@ -1140,7 +1142,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
|
||||
swap_buf_le16(id, ATA_ID_WORDS);
|
||||
|
||||
/* sanity check */
|
||||
if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
|
||||
if ((class == ATA_DEV_ATA) != (ata_id_is_ata(id) | ata_id_is_cfa(id))) {
|
||||
rc = -EINVAL;
|
||||
reason = "device reports illegal type";
|
||||
goto err_out;
|
||||
@ -1156,7 +1158,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
|
||||
* Some drives were very specific about that exact sequence.
|
||||
*/
|
||||
if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
|
||||
err_mask = ata_dev_init_params(ap, dev);
|
||||
err_mask = ata_dev_init_params(ap, dev, id[3], id[6]);
|
||||
if (err_mask) {
|
||||
rc = -EIO;
|
||||
reason = "INIT_DEV_PARAMS failed";
|
||||
@ -1418,7 +1420,11 @@ static int ata_bus_probe(struct ata_port *ap)
|
||||
if (!found)
|
||||
goto err_out_disable;
|
||||
|
||||
ata_set_mode(ap);
|
||||
if (ap->ops->set_mode)
|
||||
ap->ops->set_mode(ap);
|
||||
else
|
||||
ata_set_mode(ap);
|
||||
|
||||
if (ap->flags & ATA_FLAG_PORT_DISABLED)
|
||||
goto err_out_disable;
|
||||
|
||||
@ -1823,7 +1829,7 @@ static void ata_host_set_dma(struct ata_port *ap)
|
||||
*/
|
||||
static void ata_set_mode(struct ata_port *ap)
|
||||
{
|
||||
int i, rc;
|
||||
int i, rc, used_dma = 0;
|
||||
|
||||
/* step 1: calculate xfer_mask */
|
||||
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
||||
@ -1841,6 +1847,9 @@ static void ata_set_mode(struct ata_port *ap)
|
||||
dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
|
||||
dev->pio_mode = ata_xfer_mask2mode(pio_mask);
|
||||
dev->dma_mode = ata_xfer_mask2mode(dma_mask);
|
||||
|
||||
if (dev->dma_mode)
|
||||
used_dma = 1;
|
||||
}
|
||||
|
||||
/* step 2: always set host PIO timings */
|
||||
@ -1862,6 +1871,17 @@ static void ata_set_mode(struct ata_port *ap)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Record simplex status. If we selected DMA then the other
|
||||
* host channels are not permitted to do so.
|
||||
*/
|
||||
|
||||
if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX))
|
||||
ap->host_set->simplex_claimed = 1;
|
||||
|
||||
/*
|
||||
* Chip specific finalisation
|
||||
*/
|
||||
if (ap->ops->post_set_mode)
|
||||
ap->ops->post_set_mode(ap);
|
||||
|
||||
@ -2151,9 +2171,9 @@ static int sata_phy_resume(struct ata_port *ap)
|
||||
* so makes reset sequence different from the original
|
||||
* ->phy_reset implementation and Jeff nervous. :-P
|
||||
*/
|
||||
extern void ata_std_probeinit(struct ata_port *ap)
|
||||
void ata_std_probeinit(struct ata_port *ap)
|
||||
{
|
||||
if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
|
||||
if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
|
||||
sata_phy_resume(ap);
|
||||
if (sata_dev_present(ap))
|
||||
ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
|
||||
@ -2651,13 +2671,14 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
|
||||
*/
|
||||
static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
|
||||
{
|
||||
struct ata_host_set *hs = ap->host_set;
|
||||
unsigned long xfer_mask;
|
||||
int i;
|
||||
|
||||
xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
|
||||
ap->udma_mask);
|
||||
|
||||
/* use port-wide xfermask for now */
|
||||
/* FIXME: Use port-wide xfermask for now */
|
||||
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
||||
struct ata_device *d = &ap->device[i];
|
||||
if (!ata_dev_present(d))
|
||||
@ -2667,12 +2688,23 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
|
||||
xfer_mask &= ata_id_xfermask(d->id);
|
||||
if (ata_dma_blacklisted(d))
|
||||
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
||||
/* Apply cable rule here. Don't apply it early because when
|
||||
we handle hot plug the cable type can itself change */
|
||||
if (ap->cbl == ATA_CBL_PATA40)
|
||||
xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
|
||||
}
|
||||
|
||||
if (ata_dma_blacklisted(dev))
|
||||
printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
|
||||
"disabling DMA\n", ap->id, dev->devno);
|
||||
|
||||
if (hs->flags & ATA_HOST_SIMPLEX) {
|
||||
if (hs->simplex_claimed)
|
||||
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
|
||||
}
|
||||
if (ap->ops->mode_filter)
|
||||
xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
|
||||
|
||||
ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
|
||||
&dev->udma_mask);
|
||||
}
|
||||
@ -2727,16 +2759,16 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
|
||||
*/
|
||||
|
||||
static unsigned int ata_dev_init_params(struct ata_port *ap,
|
||||
struct ata_device *dev)
|
||||
struct ata_device *dev,
|
||||
u16 heads,
|
||||
u16 sectors)
|
||||
{
|
||||
struct ata_taskfile tf;
|
||||
unsigned int err_mask;
|
||||
u16 sectors = dev->id[6];
|
||||
u16 heads = dev->id[3];
|
||||
|
||||
/* Number of sectors per track 1-255. Number of heads 1-16 */
|
||||
if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
|
||||
return 0;
|
||||
return AC_ERR_INVALID;
|
||||
|
||||
/* set up init dev params taskfile */
|
||||
DPRINTK("init dev params \n");
|
||||
@ -4678,6 +4710,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
|
||||
host_set->mmio_base = ent->mmio_base;
|
||||
host_set->private_data = ent->private_data;
|
||||
host_set->ops = ent->port_ops;
|
||||
host_set->flags = ent->host_set_flags;
|
||||
|
||||
/* register each port bound to this device */
|
||||
for (i = 0; i < ent->n_ports; i++) {
|
||||
|
@ -1010,7 +1010,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
|
||||
|
||||
pp->sg_tbl[i].addr = cpu_to_le32(addr & 0xffffffff);
|
||||
pp->sg_tbl[i].addr_hi = cpu_to_le32((addr >> 16) >> 16);
|
||||
pp->sg_tbl[i].flags_size = cpu_to_le32(len);
|
||||
pp->sg_tbl[i].flags_size = cpu_to_le32(len & 0xffff);
|
||||
|
||||
sg_len -= len;
|
||||
addr += len;
|
||||
@ -1350,7 +1350,6 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
|
||||
{
|
||||
void __iomem *mmio = host_set->mmio_base;
|
||||
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
|
||||
struct ata_port *ap;
|
||||
struct ata_queued_cmd *qc;
|
||||
u32 hc_irq_cause;
|
||||
int shift, port, port0, hard_port, handled;
|
||||
@ -1373,21 +1372,29 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
|
||||
|
||||
for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
|
||||
u8 ata_status = 0;
|
||||
ap = host_set->ports[port];
|
||||
struct ata_port *ap = host_set->ports[port];
|
||||
struct mv_port_priv *pp = ap->private_data;
|
||||
|
||||
hard_port = port & MV_PORT_MASK; /* range 0-3 */
|
||||
handled = 0; /* ensure ata_status is set if handled++ */
|
||||
|
||||
if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
|
||||
/* new CRPB on the queue; just one at a time until NCQ
|
||||
*/
|
||||
ata_status = mv_get_crpb_status(ap);
|
||||
handled++;
|
||||
} else if ((DEV_IRQ << hard_port) & hc_irq_cause) {
|
||||
/* received ATA IRQ; read the status reg to clear INTRQ
|
||||
*/
|
||||
ata_status = readb((void __iomem *)
|
||||
/* Note that DEV_IRQ might happen spuriously during EDMA,
|
||||
* and should be ignored in such cases. We could mask it,
|
||||
* but it's pretty rare and may not be worth the overhead.
|
||||
*/
|
||||
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
|
||||
/* EDMA: check for response queue interrupt */
|
||||
if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
|
||||
ata_status = mv_get_crpb_status(ap);
|
||||
handled = 1;
|
||||
}
|
||||
} else {
|
||||
/* PIO: check for device (drive) interrupt */
|
||||
if ((DEV_IRQ << hard_port) & hc_irq_cause) {
|
||||
ata_status = readb((void __iomem *)
|
||||
ap->ioaddr.status_addr);
|
||||
handled++;
|
||||
handled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED))
|
||||
@ -1402,12 +1409,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
|
||||
if ((PORT0_ERR << shift) & relevant) {
|
||||
mv_err_intr(ap);
|
||||
err_mask |= AC_ERR_OTHER;
|
||||
handled++;
|
||||
handled = 1;
|
||||
}
|
||||
|
||||
if (handled && ap) {
|
||||
if (handled) {
|
||||
qc = ata_qc_from_tag(ap, ap->active_tag);
|
||||
if (NULL != qc) {
|
||||
if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
|
||||
VPRINTK("port %u IRQ found for qc, "
|
||||
"ata_status 0x%x\n", port,ata_status);
|
||||
/* mark qc status appropriately */
|
||||
|
@ -161,6 +161,9 @@ enum {
|
||||
ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
|
||||
ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */
|
||||
|
||||
/* host set flags */
|
||||
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
|
||||
|
||||
/* various lengths of time */
|
||||
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
|
||||
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
|
||||
@ -275,6 +278,7 @@ struct ata_probe_ent {
|
||||
unsigned long irq;
|
||||
unsigned int irq_flags;
|
||||
unsigned long host_flags;
|
||||
unsigned long host_set_flags;
|
||||
void __iomem *mmio_base;
|
||||
void *private_data;
|
||||
};
|
||||
@ -287,6 +291,9 @@ struct ata_host_set {
|
||||
unsigned int n_ports;
|
||||
void *private_data;
|
||||
const struct ata_port_operations *ops;
|
||||
unsigned long flags;
|
||||
int simplex_claimed; /* Keep seperate in case we
|
||||
ever need to do this locked */
|
||||
struct ata_port * ports[0];
|
||||
};
|
||||
|
||||
@ -415,6 +422,7 @@ struct ata_port_operations {
|
||||
|
||||
void (*set_piomode) (struct ata_port *, struct ata_device *);
|
||||
void (*set_dmamode) (struct ata_port *, struct ata_device *);
|
||||
unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
|
||||
|
||||
void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
|
||||
@ -425,6 +433,7 @@ struct ata_port_operations {
|
||||
void (*dev_select)(struct ata_port *ap, unsigned int device);
|
||||
|
||||
void (*phy_reset) (struct ata_port *ap); /* obsolete */
|
||||
void (*set_mode) (struct ata_port *ap);
|
||||
int (*probe_reset) (struct ata_port *ap, unsigned int *classes);
|
||||
|
||||
void (*post_set_mode) (struct ata_port *ap);
|
||||
|
Loading…
Reference in New Issue
Block a user