2017-11-03 17:28:30 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Driver for the PLX NET2280 USB device controller.
|
|
|
|
* Specs and errata are available from <http://www.plxtech.com>.
|
|
|
|
*
|
2006-09-02 17:13:45 +07:00
|
|
|
* PLX Technology Inc. (formerly NetChip Technology) supported the
|
2005-04-17 05:20:36 +07:00
|
|
|
* development of this driver.
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* CODE STATUS HIGHLIGHTS
|
|
|
|
*
|
|
|
|
* This driver should work well with most "gadget" drivers, including
|
2012-11-07 04:52:36 +07:00
|
|
|
* the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
|
2005-04-17 05:20:36 +07:00
|
|
|
* as well as Gadget Zero and Gadgetfs.
|
|
|
|
*
|
2014-11-28 20:50:46 +07:00
|
|
|
* DMA is enabled by default.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2014-05-20 23:30:03 +07:00
|
|
|
* MSI is enabled by default. The legacy IRQ is used if MSI couldn't
|
|
|
|
* be enabled.
|
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* Note that almost all the errata workarounds here are only needed for
|
|
|
|
* rev1 chips. Rev1a silicon (0110) fixes almost all of them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2003 David Brownell
|
|
|
|
* Copyright (C) 2003-2005 PLX Technology, Inc.
|
2014-05-20 23:30:03 +07:00
|
|
|
* Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
2006-09-02 17:13:45 +07:00
|
|
|
* Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
|
|
|
|
* with 2282 chip
|
2006-03-20 02:49:14 +07:00
|
|
|
*
|
2014-05-20 23:30:03 +07:00
|
|
|
* Modified Ricardo Ribalda Qtechnology AS to provide compatibility
|
|
|
|
* with usb 338x chip. Based on PLX driver
|
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pci.h>
|
2006-01-19 14:55:08 +07:00
|
|
|
#include <linux/dma-mapping.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/timer.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/device.h>
|
2006-12-17 06:34:53 +07:00
|
|
|
#include <linux/usb/ch9.h>
|
2007-10-05 08:05:17 +07:00
|
|
|
#include <linux/usb/gadget.h>
|
2011-06-02 11:51:29 +07:00
|
|
|
#include <linux/prefetch.h>
|
2014-05-20 23:30:09 +07:00
|
|
|
#include <linux/io.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/unaligned.h>
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
|
|
|
|
#define DRIVER_VERSION "2005 Sept 27/v3.0"
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define EP_DONTUSE 13 /* nonzero */
|
|
|
|
|
|
|
|
#define USE_RDK_LEDS /* GPIO pins control three LEDs */
|
|
|
|
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static const char driver_name[] = "net2280";
|
|
|
|
static const char driver_desc[] = DRIVER_DESC;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
|
2014-05-20 23:30:09 +07:00
|
|
|
static const char ep0name[] = "ep0";
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-07-31 21:00:36 +07:00
|
|
|
#define EP_INFO(_name, _caps) \
|
|
|
|
{ \
|
|
|
|
.name = _name, \
|
|
|
|
.caps = _caps, \
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct {
|
|
|
|
const char *name;
|
|
|
|
const struct usb_ep_caps caps;
|
|
|
|
} ep_info_dft[] = { /* Default endpoint configuration */
|
|
|
|
EP_INFO(ep0name,
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-a",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-b",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-c",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-d",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-e",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-f",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-g",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep-h",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
}, ep_info_adv[] = { /* Endpoints for usb3380 advance mode */
|
|
|
|
EP_INFO(ep0name,
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
|
|
|
|
EP_INFO("ep1in",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
|
|
|
|
EP_INFO("ep2out",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
|
|
|
|
EP_INFO("ep3in",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
|
|
|
|
EP_INFO("ep4out",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
|
|
|
|
EP_INFO("ep1out",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
|
|
|
|
EP_INFO("ep2in",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
|
|
|
|
EP_INFO("ep3out",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_OUT)),
|
|
|
|
EP_INFO("ep4in",
|
|
|
|
USB_EP_CAPS(USB_EP_CAPS_TYPE_ALL, USB_EP_CAPS_DIR_IN)),
|
2015-02-02 16:55:23 +07:00
|
|
|
};
|
|
|
|
|
2015-07-31 21:00:36 +07:00
|
|
|
#undef EP_INFO
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* mode 0 == ep-{a,b,c,d} 1K fifo each
|
|
|
|
* mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
|
|
|
|
* mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
static ushort fifo_mode;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* "modprobe net2280 fifo_mode=1" etc */
|
2014-05-20 23:30:10 +07:00
|
|
|
module_param(fifo_mode, ushort, 0644);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* enable_suspend -- When enabled, the driver will respond to
|
|
|
|
* USB suspend requests by powering down the NET2280. Otherwise,
|
2011-03-31 08:57:33 +07:00
|
|
|
* USB suspend requests will be ignored. This is acceptable for
|
2006-03-20 02:49:14 +07:00
|
|
|
* self-powered devices
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2014-05-20 23:30:06 +07:00
|
|
|
static bool enable_suspend;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* "modprobe net2280 enable_suspend=1" etc */
|
2014-05-20 23:30:10 +07:00
|
|
|
module_param(enable_suspend, bool, 0444);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static char *type_string(u8 bmAttributes)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
|
|
|
|
case USB_ENDPOINT_XFER_BULK: return "bulk";
|
|
|
|
case USB_ENDPOINT_XFER_ISOC: return "iso";
|
|
|
|
case USB_ENDPOINT_XFER_INT: return "intr";
|
2013-10-09 06:01:37 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
return "control";
|
|
|
|
}
|
|
|
|
|
|
|
|
#include "net2280.h"
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
#define valid_bit cpu_to_le32(BIT(VALID_BIT))
|
|
|
|
#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-05-17 03:33:31 +07:00
|
|
|
static void ep_clear_seqnum(struct net2280_ep *ep);
|
2015-05-17 03:33:39 +07:00
|
|
|
static void stop_activity(struct net2280 *dev,
|
|
|
|
struct usb_gadget_driver *driver);
|
|
|
|
static void ep0_start(struct net2280 *dev);
|
2015-05-17 03:33:31 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*-------------------------------------------------------------------------*/
|
2014-05-20 23:30:03 +07:00
|
|
|
static inline void enable_pciirqenb(struct net2280_ep *ep)
|
|
|
|
{
|
|
|
|
u32 tmp = readl(&ep->dev->regs->pciirqenb0);
|
|
|
|
|
2014-05-20 23:30:12 +07:00
|
|
|
if (ep->dev->quirks & PLX_LEGACY)
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(ep->num);
|
2014-05-20 23:30:03 +07:00
|
|
|
else
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(ep_bit[ep->num]);
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(tmp, &ep->dev->regs->pciirqenb0);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
static int
|
2014-05-20 23:30:09 +07:00
|
|
|
net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
struct net2280_ep *ep;
|
2015-05-17 03:33:36 +07:00
|
|
|
u32 max;
|
|
|
|
u32 tmp = 0;
|
|
|
|
u32 type;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long flags;
|
2014-05-20 23:30:03 +07:00
|
|
|
static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
|
2015-02-02 16:55:25 +07:00
|
|
|
int ret = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2014-05-20 23:30:10 +07:00
|
|
|
if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
|
2015-02-02 16:55:25 +07:00
|
|
|
desc->bDescriptorType != USB_DT_ENDPOINT) {
|
|
|
|
pr_err("%s: failed at line=%d\n", __func__, __LINE__);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
dev = ep->dev;
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
ret = -ESHUTDOWN;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* erratum 0119 workaround ties up an endpoint number */
|
2015-02-02 16:55:25 +07:00
|
|
|
if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE) {
|
|
|
|
ret = -EDOM;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if (dev->quirks & PLX_PCIE) {
|
2015-02-02 16:55:25 +07:00
|
|
|
if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
|
|
|
|
ret = -EDOM;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2014-05-20 23:30:03 +07:00
|
|
|
ep->is_in = !!usb_endpoint_dir_in(desc);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (dev->enhanced_mode && ep->is_in && ep_key[ep->num]) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* sanity check ep-e/ep-f since their fifos are small */
|
2016-09-28 18:17:38 +07:00
|
|
|
max = usb_endpoint_maxp(desc);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY)) {
|
|
|
|
ret = -ERANGE;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
2016-09-28 18:17:38 +07:00
|
|
|
_ep->maxpacket = max;
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->desc = desc;
|
|
|
|
|
|
|
|
/* ep_reset() has already been called */
|
|
|
|
ep->stopped = 0;
|
2008-08-15 02:49:11 +07:00
|
|
|
ep->wedged = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->out_overflow = 0;
|
|
|
|
|
|
|
|
/* set speed-dependent max packet; may kick in high bandwidth */
|
2014-05-20 23:30:03 +07:00
|
|
|
set_max_speed(ep, max);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* set type, direction, address; reset fifo counters */
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
|
2015-05-17 03:33:36 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
|
2015-05-17 03:33:36 +07:00
|
|
|
tmp = readl(&ep->cfg->ep_cfg);
|
|
|
|
/* If USB ep number doesn't match hardware ep number */
|
|
|
|
if ((tmp & 0xf) != usb_endpoint_num(desc)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
|
|
|
goto print_err;
|
|
|
|
}
|
|
|
|
if (ep->is_in)
|
|
|
|
tmp &= ~USB3380_EP_CFG_MASK_IN;
|
|
|
|
else
|
|
|
|
tmp &= ~USB3380_EP_CFG_MASK_OUT;
|
|
|
|
}
|
|
|
|
type = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
|
|
|
|
if (type == USB_ENDPOINT_XFER_INT) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* erratum 0105 workaround prevents hs NYET */
|
2014-05-20 23:30:10 +07:00
|
|
|
if (dev->chiprev == 0100 &&
|
|
|
|
dev->gadget.speed == USB_SPEED_HIGH &&
|
|
|
|
!(desc->bEndpointAddress & USB_DIR_IN))
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
|
2005-04-17 05:20:36 +07:00
|
|
|
&ep->regs->ep_rsp);
|
2015-05-17 03:33:36 +07:00
|
|
|
} else if (type == USB_ENDPOINT_XFER_BULK) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* catch some particularly blatant driver bugs */
|
2014-05-20 23:30:03 +07:00
|
|
|
if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
|
|
|
|
(dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
|
|
|
|
(dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
|
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2015-02-02 16:55:25 +07:00
|
|
|
ret = -ERANGE;
|
|
|
|
goto print_err;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
2015-05-17 03:33:36 +07:00
|
|
|
ep->is_iso = (type == USB_ENDPOINT_XFER_ISOC);
|
2014-05-20 23:30:03 +07:00
|
|
|
/* Enable this endpoint */
|
2014-05-20 23:30:12 +07:00
|
|
|
if (dev->quirks & PLX_LEGACY) {
|
2015-05-17 03:33:36 +07:00
|
|
|
tmp |= type << ENDPOINT_TYPE;
|
2014-05-20 23:30:03 +07:00
|
|
|
tmp |= desc->bEndpointAddress;
|
|
|
|
/* default full fifo lines */
|
|
|
|
tmp |= (4 << ENDPOINT_BYTE_COUNT);
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(ENDPOINT_ENABLE);
|
2014-05-20 23:30:03 +07:00
|
|
|
ep->is_in = (tmp & USB_DIR_IN) != 0;
|
|
|
|
} else {
|
|
|
|
/* In Legacy mode, only OUT endpoints are used */
|
|
|
|
if (dev->enhanced_mode && ep->is_in) {
|
2015-05-17 03:33:36 +07:00
|
|
|
tmp |= type << IN_ENDPOINT_TYPE;
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(IN_ENDPOINT_ENABLE);
|
2014-05-20 23:30:03 +07:00
|
|
|
} else {
|
2015-05-17 03:33:36 +07:00
|
|
|
tmp |= type << OUT_ENDPOINT_TYPE;
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(OUT_ENDPOINT_ENABLE);
|
2014-05-20 23:30:03 +07:00
|
|
|
tmp |= (ep->is_in << ENDPOINT_DIRECTION);
|
|
|
|
}
|
|
|
|
|
2015-05-17 03:33:34 +07:00
|
|
|
tmp |= (4 << ENDPOINT_BYTE_COUNT);
|
2015-05-17 03:33:36 +07:00
|
|
|
if (!dev->enhanced_mode)
|
|
|
|
tmp |= usb_endpoint_num(desc);
|
2014-05-20 23:30:03 +07:00
|
|
|
tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure all the registers are written before ep_rsp*/
|
|
|
|
wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* for OUT transfers, block the rx fifo until a read is posted */
|
|
|
|
if (!ep->is_in)
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
|
2014-05-20 23:30:12 +07:00
|
|
|
else if (!(dev->quirks & PLX_2280)) {
|
2006-09-02 17:13:45 +07:00
|
|
|
/* Added for 2282, Don't use nak packets on an in endpoint,
|
|
|
|
* this was ignored on 2280
|
|
|
|
*/
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(CLEAR_NAK_OUT_PACKETS) |
|
|
|
|
BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
|
2006-03-20 02:49:14 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if (dev->quirks & PLX_PCIE)
|
2015-05-17 03:33:31 +07:00
|
|
|
ep_clear_seqnum(ep);
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(tmp, &ep->cfg->ep_cfg);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* enable irqs */
|
|
|
|
if (!ep->dma) { /* pio, per-packet */
|
2014-05-20 23:30:03 +07:00
|
|
|
enable_pciirqenb(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
|
|
|
|
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
|
2014-05-20 23:30:12 +07:00
|
|
|
if (dev->quirks & PLX_2280)
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp |= readl(&ep->regs->ep_irqenb);
|
|
|
|
writel(tmp, &ep->regs->ep_irqenb);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else { /* dma, per-request */
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT((8 + ep->num)); /* completion */
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp |= readl(&dev->regs->pciirqenb1);
|
|
|
|
writel(tmp, &dev->regs->pciirqenb1);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* for short OUT transfers, dma completions can't
|
|
|
|
* advance the queue; do it pio-style, by hand.
|
|
|
|
* NOTE erratum 0112 workaround #2
|
|
|
|
*/
|
|
|
|
if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, &ep->regs->ep_irqenb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
enable_pciirqenb(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = desc->bEndpointAddress;
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
|
2014-05-20 23:30:09 +07:00
|
|
|
_ep->name, tmp & 0x0f, DIR_STRING(tmp),
|
|
|
|
type_string(desc->bmAttributes),
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->dma ? "dma" : "pio", max);
|
|
|
|
|
|
|
|
/* pci writes may still be posted */
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2015-02-02 16:55:25 +07:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
print_err:
|
|
|
|
dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u32 result;
|
|
|
|
|
|
|
|
do {
|
2014-05-20 23:30:09 +07:00
|
|
|
result = readl(ptr);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (result == ~(u32)0) /* "device unplugged" */
|
|
|
|
return -ENODEV;
|
|
|
|
result &= mask;
|
|
|
|
if (result == done)
|
|
|
|
return 0;
|
2014-05-20 23:30:09 +07:00
|
|
|
udelay(1);
|
2005-04-17 05:20:36 +07:00
|
|
|
usec--;
|
|
|
|
} while (usec > 0);
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
2006-09-02 17:13:45 +07:00
|
|
|
static const struct usb_ep_ops net2280_ep_ops;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void ep_reset_228x(struct net2280_regs __iomem *regs,
|
|
|
|
struct net2280_ep *ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
ep->desc = NULL;
|
2014-05-20 23:30:09 +07:00
|
|
|
INIT_LIST_HEAD(&ep->queue);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-12-13 18:23:38 +07:00
|
|
|
usb_ep_set_maxpacket_limit(&ep->ep, ~0);
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->ep.ops = &net2280_ep_ops;
|
|
|
|
|
|
|
|
/* disable the dma, irqs, endpoint... */
|
|
|
|
if (ep->dma) {
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(0, &ep->dma->dmactl);
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
|
|
|
|
BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
|
|
|
|
BIT(DMA_ABORT),
|
|
|
|
&ep->dma->dmastat);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = readl(®s->pciirqenb0);
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp &= ~BIT(ep->num);
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, ®s->pciirqenb0);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = readl(®s->pciirqenb1);
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp &= ~BIT((8 + ep->num)); /* completion */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, ®s->pciirqenb1);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(0, &ep->regs->ep_irqenb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* init to our chosen defaults, notably so that we NAK OUT
|
|
|
|
* packets until the driver queues a read (+note erratum 0112)
|
|
|
|
*/
|
2014-05-20 23:30:12 +07:00
|
|
|
if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
|
|
|
|
BIT(SET_NAK_OUT_PACKETS) |
|
|
|
|
BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
|
|
|
|
BIT(CLEAR_INTERRUPT_MODE);
|
2006-03-20 02:49:14 +07:00
|
|
|
} else {
|
|
|
|
/* added for 2282 */
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
|
|
|
|
BIT(CLEAR_NAK_OUT_PACKETS) |
|
|
|
|
BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
|
|
|
|
BIT(CLEAR_INTERRUPT_MODE);
|
2006-03-20 02:49:14 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (ep->num != 0) {
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
|
|
|
|
BIT(CLEAR_ENDPOINT_HALT);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, &ep->regs->ep_rsp);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* scrub most status bits, and flush any fifo state */
|
2014-05-20 23:30:12 +07:00
|
|
|
if (ep->dev->quirks & PLX_2280)
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(FIFO_OVERFLOW) |
|
|
|
|
BIT(FIFO_UNDERFLOW);
|
2006-03-20 02:49:14 +07:00
|
|
|
else
|
|
|
|
tmp = 0;
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(tmp | BIT(TIMEOUT) |
|
|
|
|
BIT(USB_STALL_SENT) |
|
|
|
|
BIT(USB_IN_NAK_SENT) |
|
|
|
|
BIT(USB_IN_ACK_RCVD) |
|
|
|
|
BIT(USB_OUT_PING_NAK_SENT) |
|
|
|
|
BIT(USB_OUT_ACK_SENT) |
|
|
|
|
BIT(FIFO_FLUSH) |
|
|
|
|
BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
|
|
|
|
BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
|
|
|
|
BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
|
|
|
|
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
|
|
|
|
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
|
2014-05-20 23:30:10 +07:00
|
|
|
BIT(DATA_IN_TOKEN_INTERRUPT),
|
|
|
|
&ep->regs->ep_stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* fifo size is handled separately */
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void ep_reset_338x(struct net2280_regs __iomem *regs,
|
|
|
|
struct net2280_ep *ep)
|
|
|
|
{
|
|
|
|
u32 tmp, dmastat;
|
|
|
|
|
|
|
|
ep->desc = NULL;
|
|
|
|
INIT_LIST_HEAD(&ep->queue);
|
|
|
|
|
|
|
|
usb_ep_set_maxpacket_limit(&ep->ep, ~0);
|
|
|
|
ep->ep.ops = &net2280_ep_ops;
|
|
|
|
|
|
|
|
/* disable the dma, irqs, endpoint... */
|
|
|
|
if (ep->dma) {
|
|
|
|
writel(0, &ep->dma->dmactl);
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
|
|
|
|
BIT(DMA_PAUSE_DONE_INTERRUPT) |
|
|
|
|
BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
|
2014-05-20 23:30:10 +07:00
|
|
|
BIT(DMA_TRANSACTION_DONE_INTERRUPT),
|
|
|
|
/* | BIT(DMA_ABORT), */
|
|
|
|
&ep->dma->dmastat);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
|
|
|
dmastat = readl(&ep->dma->dmastat);
|
|
|
|
if (dmastat == 0x5002) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_warn(ep->dev, "The dmastat return = %x!!\n",
|
2014-05-20 23:30:03 +07:00
|
|
|
dmastat);
|
|
|
|
writel(0x5a, &ep->dma->dmastat);
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = readl(®s->pciirqenb0);
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp &= ~BIT(ep_bit[ep->num]);
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(tmp, ®s->pciirqenb0);
|
|
|
|
} else {
|
|
|
|
if (ep->num < 5) {
|
|
|
|
tmp = readl(®s->pciirqenb1);
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp &= ~BIT((8 + ep->num)); /* completion */
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(tmp, ®s->pciirqenb1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
writel(0, &ep->regs->ep_irqenb);
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
|
|
|
|
BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
|
|
|
|
BIT(FIFO_OVERFLOW) |
|
|
|
|
BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
|
|
|
|
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
|
|
|
|
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
|
|
|
|
BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
|
2015-05-17 03:33:38 +07:00
|
|
|
|
|
|
|
tmp = readl(&ep->cfg->ep_cfg);
|
|
|
|
if (ep->is_in)
|
|
|
|
tmp &= ~USB3380_EP_CFG_MASK_IN;
|
|
|
|
else
|
|
|
|
tmp &= ~USB3380_EP_CFG_MASK_OUT;
|
|
|
|
writel(tmp, &ep->cfg->ep_cfg);
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void nuke(struct net2280_ep *);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_disable(struct usb_ep *_ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || !ep->desc || _ep->name == ep0name) {
|
|
|
|
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&ep->dev->lock, flags);
|
|
|
|
nuke(ep);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if (ep->dev->quirks & PLX_PCIE)
|
2014-05-20 23:30:03 +07:00
|
|
|
ep_reset_338x(ep->dev->regs, ep);
|
|
|
|
else
|
|
|
|
ep_reset_228x(ep->dev->regs, ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(ep->dev, "disabled %s %s\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->dma ? "dma" : "pio", _ep->name);
|
|
|
|
|
|
|
|
/* synch memory views with the device */
|
2014-05-20 23:30:03 +07:00
|
|
|
(void)readl(&ep->cfg->ep_cfg);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-11-28 20:50:49 +07:00
|
|
|
if (!ep->dma && ep->num >= 1 && ep->num <= 4)
|
2014-05-20 23:30:09 +07:00
|
|
|
ep->dma = &ep->dev->dma[ep->num - 1];
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&ep->dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static struct usb_request
|
|
|
|
*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
struct net2280_request *req;
|
|
|
|
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep) {
|
|
|
|
pr_err("%s: Invalid ep\n", __func__);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-02-28 04:34:10 +07:00
|
|
|
req = kzalloc(sizeof(*req), gfp_flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!req)
|
|
|
|
return NULL;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
INIT_LIST_HEAD(&req->queue);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* this dma descriptor may be swapped with the previous dummy */
|
|
|
|
if (ep->dma) {
|
|
|
|
struct net2280_dma *td;
|
|
|
|
|
2017-03-08 23:19:54 +07:00
|
|
|
td = dma_pool_alloc(ep->dev->requests, gfp_flags,
|
2005-04-17 05:20:36 +07:00
|
|
|
&req->td_dma);
|
|
|
|
if (!td) {
|
2014-05-20 23:30:09 +07:00
|
|
|
kfree(req);
|
2005-04-17 05:20:36 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
td->dmacount = 0; /* not VALID */
|
|
|
|
td->dmadesc = td->dmaaddr;
|
|
|
|
req->td = td;
|
|
|
|
}
|
|
|
|
return &req->req;
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
struct net2280_request *req;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || !_req) {
|
2016-08-24 13:44:19 +07:00
|
|
|
dev_err(&ep->dev->pdev->dev, "%s: Invalid ep=%p or req=%p\n",
|
2015-02-02 16:55:25 +07:00
|
|
|
__func__, _ep, _req);
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
req = container_of(_req, struct net2280_request, req);
|
|
|
|
WARN_ON(!list_empty(&req->queue));
|
2005-04-17 05:20:36 +07:00
|
|
|
if (req->td)
|
2017-03-08 23:19:54 +07:00
|
|
|
dma_pool_free(ep->dev->requests, req->td, req->td_dma);
|
2014-05-20 23:30:09 +07:00
|
|
|
kfree(req);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* load a packet into the fifo we use for usb IN transfers.
|
|
|
|
* works for all endpoints.
|
|
|
|
*
|
|
|
|
* NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
|
|
|
|
* at a time, but this code is simpler because it knows it only writes
|
|
|
|
* one packet. ep-a..ep-d should use dma instead.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep_regs __iomem *regs = ep->regs;
|
|
|
|
u8 *buf;
|
|
|
|
u32 tmp;
|
|
|
|
unsigned count, total;
|
|
|
|
|
|
|
|
/* INVARIANT: fifo is currently empty. (testable) */
|
|
|
|
|
|
|
|
if (req) {
|
|
|
|
buf = req->buf + req->actual;
|
2014-05-20 23:30:09 +07:00
|
|
|
prefetch(buf);
|
2005-04-17 05:20:36 +07:00
|
|
|
total = req->length - req->actual;
|
|
|
|
} else {
|
|
|
|
total = 0;
|
|
|
|
buf = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write just one packet at a time */
|
|
|
|
count = ep->ep.maxpacket;
|
|
|
|
if (count > total) /* min() cannot be used on a bitfield */
|
|
|
|
count = total;
|
|
|
|
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->ep.name, count,
|
|
|
|
(count != ep->ep.maxpacket) ? " (short)" : "",
|
|
|
|
req);
|
|
|
|
while (count >= 4) {
|
|
|
|
/* NOTE be careful if you try to align these. fifo lines
|
|
|
|
* should normally be full (4 bytes) and successive partial
|
|
|
|
* lines are ok only in certain cases.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = get_unaligned((u32 *)buf);
|
|
|
|
cpu_to_le32s(&tmp);
|
|
|
|
writel(tmp, ®s->ep_data);
|
2005-04-17 05:20:36 +07:00
|
|
|
buf += 4;
|
|
|
|
count -= 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* last fifo entry is "short" unless we wrote a full packet.
|
|
|
|
* also explicitly validate last word in (periodic) transfers
|
|
|
|
* when maxpacket is not a multiple of 4 bytes.
|
|
|
|
*/
|
|
|
|
if (count || total < ep->ep.maxpacket) {
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = count ? get_unaligned((u32 *)buf) : count;
|
|
|
|
cpu_to_le32s(&tmp);
|
|
|
|
set_fifo_bytecount(ep, count & 0x03);
|
|
|
|
writel(tmp, ®s->ep_data);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* pci writes may still be posted */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* work around erratum 0106: PCI and USB race over the OUT fifo.
|
|
|
|
* caller guarantees chiprev 0100, out endpoint is NAKing, and
|
|
|
|
* there's no real data in the fifo.
|
|
|
|
*
|
|
|
|
* NOTE: also used in cases where that erratum doesn't apply:
|
|
|
|
* where the host wrote "too much" data to us.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
static void out_flush(struct net2280_ep *ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u32 __iomem *statp;
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
statp = &ep->regs->ep_stat;
|
2014-11-28 20:51:01 +07:00
|
|
|
|
|
|
|
tmp = readl(statp);
|
|
|
|
if (tmp & BIT(NAK_OUT_PACKETS)) {
|
|
|
|
ep_dbg(ep->dev, "%s %s %08x !NAK\n",
|
|
|
|
ep->ep.name, __func__, tmp);
|
|
|
|
writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
|
2014-05-20 23:30:10 +07:00
|
|
|
BIT(DATA_PACKET_RECEIVED_INTERRUPT),
|
|
|
|
statp);
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(FIFO_FLUSH), statp);
|
2014-05-20 23:30:09 +07:00
|
|
|
/* Make sure that stap is written */
|
|
|
|
mb();
|
|
|
|
tmp = readl(statp);
|
2014-05-20 23:30:10 +07:00
|
|
|
if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
|
2005-04-17 05:20:36 +07:00
|
|
|
/* high speed did bulk NYET; fifo isn't filling */
|
2014-05-20 23:30:10 +07:00
|
|
|
ep->dev->gadget.speed == USB_SPEED_FULL) {
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned usec;
|
|
|
|
|
|
|
|
usec = 50; /* 64 byte bulk/interrupt */
|
2014-05-20 23:30:05 +07:00
|
|
|
handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
|
|
|
|
BIT(USB_OUT_PING_NAK_SENT), usec);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unload packet(s) from the fifo we use for usb OUT transfers.
|
|
|
|
* returns true iff the request completed, because of short packet
|
|
|
|
* or the request buffer having filled with full packets.
|
|
|
|
*
|
|
|
|
* for ep-a..ep-d this will read multiple packets out when they
|
|
|
|
* have been accepted.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep_regs __iomem *regs = ep->regs;
|
|
|
|
u8 *buf = req->req.buf + req->req.actual;
|
|
|
|
unsigned count, tmp, is_short;
|
|
|
|
unsigned cleanup = 0, prevent = 0;
|
|
|
|
|
|
|
|
/* erratum 0106 ... packets coming in during fifo reads might
|
|
|
|
* be incompletely rejected. not all cases have workarounds.
|
|
|
|
*/
|
2014-05-20 23:30:10 +07:00
|
|
|
if (ep->dev->chiprev == 0x0100 &&
|
|
|
|
ep->dev->gadget.speed == USB_SPEED_FULL) {
|
2014-05-20 23:30:09 +07:00
|
|
|
udelay(1);
|
|
|
|
tmp = readl(&ep->regs->ep_stat);
|
2014-05-20 23:30:05 +07:00
|
|
|
if ((tmp & BIT(NAK_OUT_PACKETS)))
|
2005-04-17 05:20:36 +07:00
|
|
|
cleanup = 1;
|
2014-05-20 23:30:05 +07:00
|
|
|
else if ((tmp & BIT(FIFO_FULL))) {
|
2014-05-20 23:30:09 +07:00
|
|
|
start_out_naking(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
prevent = 1;
|
|
|
|
}
|
|
|
|
/* else: hope we don't see the problem */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* never overflow the rx buffer. the fifo reads packets until
|
|
|
|
* it sees a short one; we might not be ready for them all.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
prefetchw(buf);
|
|
|
|
count = readl(®s->ep_avail);
|
|
|
|
if (unlikely(count == 0)) {
|
|
|
|
udelay(1);
|
|
|
|
tmp = readl(&ep->regs->ep_stat);
|
|
|
|
count = readl(®s->ep_avail);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* handled that data already? */
|
2014-05-20 23:30:05 +07:00
|
|
|
if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = req->req.length - req->req.actual;
|
|
|
|
if (count > tmp) {
|
|
|
|
/* as with DMA, data overflow gets flushed */
|
|
|
|
if ((tmp % ep->ep.maxpacket) != 0) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_err(ep->dev,
|
2005-04-17 05:20:36 +07:00
|
|
|
"%s out fifo %d bytes, expected %d\n",
|
|
|
|
ep->ep.name, count, tmp);
|
|
|
|
req->req.status = -EOVERFLOW;
|
|
|
|
cleanup = 1;
|
|
|
|
/* NAK_OUT_PACKETS will be set, so flushing is safe;
|
|
|
|
* the next read will start with the next packet
|
|
|
|
*/
|
|
|
|
} /* else it's a ZLP, no worries */
|
|
|
|
count = tmp;
|
|
|
|
}
|
|
|
|
req->req.actual += count;
|
|
|
|
|
|
|
|
is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
|
|
|
|
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->ep.name, count, is_short ? " (short)" : "",
|
|
|
|
cleanup ? " flush" : "", prevent ? " nak" : "",
|
|
|
|
req, req->req.actual, req->req.length);
|
|
|
|
|
|
|
|
while (count >= 4) {
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = readl(®s->ep_data);
|
|
|
|
cpu_to_le32s(&tmp);
|
|
|
|
put_unaligned(tmp, (u32 *)buf);
|
2005-04-17 05:20:36 +07:00
|
|
|
buf += 4;
|
|
|
|
count -= 4;
|
|
|
|
}
|
|
|
|
if (count) {
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = readl(®s->ep_data);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* LE conversion is implicit here: */
|
|
|
|
do {
|
|
|
|
*buf++ = (u8) tmp;
|
|
|
|
tmp >>= 8;
|
|
|
|
} while (--count);
|
|
|
|
}
|
|
|
|
if (cleanup)
|
2014-05-20 23:30:09 +07:00
|
|
|
out_flush(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (prevent) {
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
|
2014-05-20 23:30:09 +07:00
|
|
|
(void) readl(&ep->regs->ep_rsp);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:10 +07:00
|
|
|
return is_short || ((req->req.actual == req->req.length) &&
|
|
|
|
!req->req.zero);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* fill out dma descriptor to match a given request */
|
2014-05-20 23:30:09 +07:00
|
|
|
static void fill_dma_desc(struct net2280_ep *ep,
|
|
|
|
struct net2280_request *req, int valid)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_dma *td = req->td;
|
|
|
|
u32 dmacount = req->req.length;
|
|
|
|
|
|
|
|
/* don't let DMA continue after a short OUT packet,
|
|
|
|
* so overruns can't affect the next transfer.
|
|
|
|
* in case of overruns on max-size packets, we can't
|
|
|
|
* stop the fifo from filling but we can flush it.
|
|
|
|
*/
|
|
|
|
if (ep->is_in)
|
2014-05-20 23:30:05 +07:00
|
|
|
dmacount |= BIT(DMA_DIRECTION);
|
2014-05-20 23:30:10 +07:00
|
|
|
if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
|
2014-05-20 23:30:12 +07:00
|
|
|
!(ep->dev->quirks & PLX_2280))
|
2014-05-20 23:30:05 +07:00
|
|
|
dmacount |= BIT(END_OF_CHAIN);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
req->valid = valid;
|
|
|
|
if (valid)
|
2014-05-20 23:30:05 +07:00
|
|
|
dmacount |= BIT(VALID_BIT);
|
2014-11-28 20:50:46 +07:00
|
|
|
dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* td->dmadesc = previously set by caller */
|
|
|
|
td->dmaaddr = cpu_to_le32 (req->req.dma);
|
|
|
|
|
|
|
|
/* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
|
2014-05-20 23:30:09 +07:00
|
|
|
wmb();
|
2008-10-30 04:25:51 +07:00
|
|
|
td->dmacount = cpu_to_le32(dmacount);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static const u32 dmactl_default =
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
|
|
|
|
BIT(DMA_CLEAR_COUNT_ENABLE) |
|
2005-04-17 05:20:36 +07:00
|
|
|
/* erratum 0116 workaround part 1 (use POLLING) */
|
2014-05-20 23:30:05 +07:00
|
|
|
(POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
|
|
|
|
BIT(DMA_VALID_BIT_POLLING_ENABLE) |
|
|
|
|
BIT(DMA_VALID_BIT_ENABLE) |
|
|
|
|
BIT(DMA_SCATTER_GATHER_ENABLE) |
|
2005-04-17 05:20:36 +07:00
|
|
|
/* erratum 0116 workaround part 2 (no AUTOSTART) */
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(DMA_ENABLE);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-05-20 23:30:05 +07:00
|
|
|
handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_stop_dma(dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_dma_regs __iomem *dma = ep->dma;
|
2014-05-20 23:30:05 +07:00
|
|
|
unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:12 +07:00
|
|
|
if (!(ep->dev->quirks & PLX_2280))
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(END_OF_CHAIN);
|
2006-03-20 02:49:14 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, &dma->dmacount);
|
|
|
|
writel(readl(&dma->dmastat), &dma->dmastat);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(td_dma, &dma->dmadesc);
|
2016-05-23 20:58:41 +07:00
|
|
|
if (ep->dev->quirks & PLX_PCIE)
|
2014-05-20 23:30:05 +07:00
|
|
|
dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(dmactl, &dma->dmactl);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* erratum 0116 workaround part 3: pci arbiter away from net2280 */
|
2014-05-20 23:30:09 +07:00
|
|
|
(void) readl(&ep->dev->pci->pcimstctl);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DMA_START), &dma->dmastat);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!ep->is_in)
|
2014-05-20 23:30:09 +07:00
|
|
|
stop_out_naking(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
struct net2280_dma_regs __iomem *dma = ep->dma;
|
|
|
|
|
|
|
|
/* FIXME can't use DMA for ZLPs */
|
|
|
|
|
|
|
|
/* on this path we "know" there's no dma active (yet) */
|
2014-05-20 23:30:05 +07:00
|
|
|
WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(0, &ep->dma->dmactl);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* previous OUT packet might have been short */
|
2014-05-20 23:30:09 +07:00
|
|
|
if (!ep->is_in && (readl(&ep->regs->ep_stat) &
|
|
|
|
BIT(NAK_OUT_PACKETS))) {
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
|
2005-04-17 05:20:36 +07:00
|
|
|
&ep->regs->ep_stat);
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = readl(&ep->regs->ep_avail);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (tmp) {
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(readl(&dma->dmastat), &dma->dmastat);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* transfer all/some fifo data */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(req->req.dma, &dma->dmaaddr);
|
|
|
|
tmp = min(tmp, req->req.length);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* dma irq, faking scatterlist status */
|
2014-05-20 23:30:09 +07:00
|
|
|
req->td->dmacount = cpu_to_le32(req->req.length - tmp);
|
2014-05-20 23:30:10 +07:00
|
|
|
writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
|
|
|
|
&dma->dmacount);
|
2005-04-17 05:20:36 +07:00
|
|
|
req->td->dmadesc = 0;
|
|
|
|
req->valid = 1;
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DMA_ENABLE), &dma->dmactl);
|
|
|
|
writel(BIT(DMA_START), &dma->dmastat);
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = dmactl_default;
|
|
|
|
|
|
|
|
/* force packet boundaries between dma requests, but prevent the
|
|
|
|
* controller from automagically writing a last "short" packet
|
|
|
|
* (zero length) unless the driver explicitly said to do that.
|
|
|
|
*/
|
|
|
|
if (ep->is_in) {
|
2014-05-20 23:30:09 +07:00
|
|
|
if (likely((req->req.length % ep->ep.maxpacket) ||
|
|
|
|
req->req.zero)){
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(DMA_FIFO_VALIDATE);
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->in_fifo_validate = 1;
|
|
|
|
} else
|
|
|
|
ep->in_fifo_validate = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* init req->td, pointing to the current dummy */
|
|
|
|
req->td->dmadesc = cpu_to_le32 (ep->td_dma);
|
2014-05-20 23:30:09 +07:00
|
|
|
fill_dma_desc(ep, req, 1);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-11-28 20:50:46 +07:00
|
|
|
req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
start_queue(ep, tmp, req->td_dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2014-05-20 23:30:09 +07:00
|
|
|
queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_dma *end;
|
|
|
|
dma_addr_t tmp;
|
|
|
|
|
|
|
|
/* swap new dummy for old, link; fill and maybe activate */
|
|
|
|
end = ep->dummy;
|
|
|
|
ep->dummy = req->td;
|
|
|
|
req->td = end;
|
|
|
|
|
|
|
|
tmp = ep->td_dma;
|
|
|
|
ep->td_dma = req->td_dma;
|
|
|
|
req->td_dma = tmp;
|
|
|
|
|
|
|
|
end->dmadesc = cpu_to_le32 (ep->td_dma);
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
fill_dma_desc(ep, req, valid);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2014-05-20 23:30:09 +07:00
|
|
|
done(struct net2280_ep *ep, struct net2280_request *req, int status)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
unsigned stopped = ep->stopped;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
list_del_init(&req->queue);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (req->req.status == -EINPROGRESS)
|
|
|
|
req->req.status = status;
|
|
|
|
else
|
|
|
|
status = req->req.status;
|
|
|
|
|
|
|
|
dev = ep->dev;
|
2011-12-19 17:09:56 +07:00
|
|
|
if (ep->dma)
|
|
|
|
usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (status && status != -ESHUTDOWN)
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->ep.name, &req->req, status,
|
|
|
|
req->req.actual, req->req.length);
|
|
|
|
|
|
|
|
/* don't modify queue heads during completion callback */
|
|
|
|
ep->stopped = 1;
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock(&dev->lock);
|
2014-09-25 03:43:19 +07:00
|
|
|
usb_gadget_giveback_request(&ep->ep, &req->req);
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock(&dev->lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->stopped = stopped;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
static int
|
2014-05-20 23:30:09 +07:00
|
|
|
net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_request *req;
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
struct net2280 *dev;
|
|
|
|
unsigned long flags;
|
2015-02-02 16:55:25 +07:00
|
|
|
int ret = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* we always require a cpu-view buffer, so that we can
|
|
|
|
* always use pio (as fallback or whatever).
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || (!ep->desc && ep->num != 0)) {
|
|
|
|
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
|
|
|
req = container_of(_req, struct net2280_request, req);
|
|
|
|
if (!_req || !_req->complete || !_req->buf ||
|
|
|
|
!list_empty(&req->queue)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto print_err;
|
|
|
|
}
|
|
|
|
if (_req->length > (~0 & DMA_BYTE_COUNT_MASK)) {
|
|
|
|
ret = -EDOM;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
dev = ep->dev;
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
ret = -ESHUTDOWN;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* FIXME implement PIO fallback for ZLPs with DMA */
|
2015-02-02 16:55:25 +07:00
|
|
|
if (ep->dma && _req->length == 0) {
|
|
|
|
ret = -EOPNOTSUPP;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* set up dma mapping in case the caller didn't */
|
2011-12-19 17:09:56 +07:00
|
|
|
if (ep->dma) {
|
|
|
|
ret = usb_gadget_map_request(&dev->gadget, _req,
|
|
|
|
ep->is_in);
|
|
|
|
if (ret)
|
2015-02-02 16:55:25 +07:00
|
|
|
goto print_err;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
_ep->name, _req, _req->length, _req->buf);
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
_req->status = -EINPROGRESS;
|
|
|
|
_req->actual = 0;
|
|
|
|
|
|
|
|
/* kickstart this i/o queue? */
|
2014-11-28 20:50:56 +07:00
|
|
|
if (list_empty(&ep->queue) && !ep->stopped &&
|
2016-05-23 20:58:41 +07:00
|
|
|
!((dev->quirks & PLX_PCIE) && ep->dma &&
|
2014-11-28 20:50:56 +07:00
|
|
|
(readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* use DMA if the endpoint supports it, else pio */
|
2014-11-28 20:50:56 +07:00
|
|
|
if (ep->dma)
|
2014-05-20 23:30:09 +07:00
|
|
|
start_dma(ep, req);
|
2005-04-17 05:20:36 +07:00
|
|
|
else {
|
|
|
|
/* maybe there's no control data, just status ack */
|
|
|
|
if (ep->num == 0 && _req->length == 0) {
|
2014-05-20 23:30:09 +07:00
|
|
|
allow_status(ep);
|
|
|
|
done(ep, req, 0);
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "%s status ack\n", ep->ep.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PIO ... stuff the fifo, or unblock it. */
|
|
|
|
if (ep->is_in)
|
2014-05-20 23:30:09 +07:00
|
|
|
write_fifo(ep, _req);
|
|
|
|
else if (list_empty(&ep->queue)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
u32 s;
|
|
|
|
|
|
|
|
/* OUT FIFO might have packet(s) buffered */
|
2014-05-20 23:30:09 +07:00
|
|
|
s = readl(&ep->regs->ep_stat);
|
2014-05-20 23:30:05 +07:00
|
|
|
if ((s & BIT(FIFO_EMPTY)) == 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* note: _req->short_not_ok is
|
|
|
|
* ignored here since PIO _always_
|
|
|
|
* stops queue advance here, and
|
|
|
|
* _req->status doesn't change for
|
|
|
|
* short reads (only _req->actual)
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
if (read_fifo(ep, req) &&
|
|
|
|
ep->num == 0) {
|
|
|
|
done(ep, req, 0);
|
|
|
|
allow_status(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* don't queue it */
|
|
|
|
req = NULL;
|
2014-05-20 23:30:09 +07:00
|
|
|
} else if (read_fifo(ep, req) &&
|
|
|
|
ep->num != 0) {
|
|
|
|
done(ep, req, 0);
|
|
|
|
req = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else
|
2014-05-20 23:30:09 +07:00
|
|
|
s = readl(&ep->regs->ep_stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* don't NAK, let the fifo fill */
|
2014-05-20 23:30:05 +07:00
|
|
|
if (req && (s & BIT(NAK_OUT_PACKETS)))
|
|
|
|
writel(BIT(CLEAR_NAK_OUT_PACKETS),
|
2005-04-17 05:20:36 +07:00
|
|
|
&ep->regs->ep_rsp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if (ep->dma) {
|
|
|
|
int valid = 1;
|
|
|
|
|
|
|
|
if (ep->is_in) {
|
|
|
|
int expect;
|
|
|
|
|
|
|
|
/* preventing magic zlps is per-engine state, not
|
|
|
|
* per-transfer; irq logic must recover hiccups.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
expect = likely(req->req.zero ||
|
|
|
|
(req->req.length % ep->ep.maxpacket));
|
2005-04-17 05:20:36 +07:00
|
|
|
if (expect != ep->in_fifo_validate)
|
|
|
|
valid = 0;
|
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
queue_dma(ep, req, valid);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
} /* else the irq handler advances the queue. */
|
|
|
|
|
2006-11-16 22:16:00 +07:00
|
|
|
ep->responded = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (req)
|
2014-05-20 23:30:09 +07:00
|
|
|
list_add_tail(&req->queue, &ep->queue);
|
2005-04-17 05:20:36 +07:00
|
|
|
done:
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* pci writes may still be posted */
|
2015-02-02 16:55:25 +07:00
|
|
|
return ret;
|
|
|
|
|
|
|
|
print_err:
|
|
|
|
dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, ret);
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
2014-05-20 23:30:09 +07:00
|
|
|
dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
|
|
|
|
int status)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
|
2014-05-20 23:30:09 +07:00
|
|
|
done(ep, req, status);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2016-08-12 21:29:34 +07:00
|
|
|
static int scan_dma_completions(struct net2280_ep *ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2016-08-12 21:29:34 +07:00
|
|
|
int num_completed = 0;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* only look at descriptors that were "naturally" retired,
|
|
|
|
* so fifo and list head state won't matter
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
while (!list_empty(&ep->queue)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request *req;
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
u32 req_dma_count;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
req = list_entry(ep->queue.next,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request, queue);
|
|
|
|
if (!req->valid)
|
|
|
|
break;
|
2014-05-20 23:30:09 +07:00
|
|
|
rmb();
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
req_dma_count = le32_to_cpup(&req->td->dmacount);
|
|
|
|
if ((req_dma_count & BIT(VALID_BIT)) != 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
|
|
|
|
* cases where DMA must be aborted; this code handles
|
|
|
|
* all non-abort DMA completions.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
if (unlikely(req->td->dmadesc == 0)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* paranoia */
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
u32 const ep_dmacount = readl(&ep->dma->dmacount);
|
|
|
|
|
|
|
|
if (ep_dmacount & DMA_BYTE_COUNT_MASK)
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
/* single transfer mode */
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
dma_done(ep, req, req_dma_count, 0);
|
2016-08-12 21:29:34 +07:00
|
|
|
num_completed++;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
2014-05-20 23:30:10 +07:00
|
|
|
} else if (!ep->is_in &&
|
2014-11-28 20:51:00 +07:00
|
|
|
(req->req.length % ep->ep.maxpacket) &&
|
2016-05-23 20:58:41 +07:00
|
|
|
!(ep->dev->quirks & PLX_PCIE)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
u32 const ep_stat = readl(&ep->regs->ep_stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* AVOID TROUBLE HERE by not issuing short reads from
|
|
|
|
* your gadget driver. That helps avoids errata 0121,
|
|
|
|
* 0122, and 0124; not all cases trigger the warning.
|
|
|
|
*/
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_warn(ep->dev, "%s lost packet sync!\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->ep.name);
|
|
|
|
req->req.status = -EOVERFLOW;
|
2014-05-20 23:30:09 +07:00
|
|
|
} else {
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
u32 const ep_avail = readl(&ep->regs->ep_avail);
|
|
|
|
if (ep_avail) {
|
2014-05-20 23:30:09 +07:00
|
|
|
/* fifo gets flushed later */
|
|
|
|
ep->out_overflow = 1;
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(ep->dev,
|
2014-05-20 23:30:09 +07:00
|
|
|
"%s dma, discard %d len %d\n",
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
ep->ep.name, ep_avail,
|
2005-04-17 05:20:36 +07:00
|
|
|
req->req.length);
|
2014-05-20 23:30:09 +07:00
|
|
|
req->req.status = -EOVERFLOW;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
usb: gadget: udc: net2280: Fix tmp reusage in net2280 driver
In the function scan_dma_completions() there is a reusage of tmp
variable. That coused a wrong value being used in some case when
reading a short packet terminated transaction from an endpoint,
in 2 concecutive reads.
This was my logic for the patch:
The req->td->dmadesc equals to 0 iff:
-- There was a transaction ending with a short packet, and
-- The read() to read it was shorter than the transaction length, and
-- The read() to complete it is longer than the residue.
I believe this is true from the printouts of various cases,
but I can't be positive it is correct.
Entering this if, there should be no more data in the endpoint
(a short packet terminated the transaction).
If there is, the transaction wasn't really done and we should exit and
wait for it to finish entirely. That is the inner if.
That inner if should never happen, but it is there to be on the safe
side. That is why it is marked with the comment /* paranoia */.
The size of the data available in the endpoint is ep->dma->dmacount
and it is read to tmp.
This entire clause is based on my own educated guesses.
If we passed that inner if without breaking in the original code,
than tmp & DMA_BYTE_MASK_COUNT== 0.
That means we will always pass dma bytes count of 0 to dma_done(),
meaning all the requested bytes were read.
dma_done() reports back to the upper layer that the request (read())
was done and how many bytes were read.
In the original code that would always be the request size,
regardless of the actual size of the data.
That did not make sense to me at all.
However, the original value of tmp is req->td->dmacount,
which is the dmacount value when the request's dma transaction was
finished. And that is a much more reasonable value to report back to
the caller.
To recreate the problem:
Read from a bulk out endpoint in a loop, 1024 * n bytes in each
iteration.
Connect the PLX to a host you can control.
Send to that endpoint 1024 * n + x bytes,
such that 0 < x < 1024 * n and (x % 1024) != 0
You would expect the first read() to return 1024 * n
and the second read() to return x.
But you will get the first read to return 1024 * n
and the second one to return 1024 * n.
That is true for every positive integer n.
Cc: Felipe Balbi <balbi@kernel.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: linux-usb@vger.kernel.org
Signed-off-by: Raz Manor <Raz.Manor@valens.com>
Signed-off-by: Felipe Balbi <felipe.balbi@linux.intel.com>
2017-02-09 14:41:08 +07:00
|
|
|
dma_done(ep, req, req_dma_count, 0);
|
2016-08-12 21:29:34 +07:00
|
|
|
num_completed++;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2016-08-12 21:29:34 +07:00
|
|
|
|
|
|
|
return num_completed;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void restart_dma(struct net2280_ep *ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_request *req;
|
|
|
|
|
|
|
|
if (ep->stopped)
|
|
|
|
return;
|
2014-05-20 23:30:09 +07:00
|
|
|
req = list_entry(ep->queue.next, struct net2280_request, queue);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-11-28 20:50:46 +07:00
|
|
|
start_dma(ep, req);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-11-28 20:50:55 +07:00
|
|
|
static void abort_dma(struct net2280_ep *ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
/* abort the current transfer */
|
2014-05-20 23:30:09 +07:00
|
|
|
if (likely(!list_empty(&ep->queue))) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* FIXME work around errata 0121, 0122, 0124 */
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DMA_ABORT), &ep->dma->dmastat);
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_stop_dma(ep->dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else
|
2014-05-20 23:30:09 +07:00
|
|
|
stop_dma(ep->dma);
|
|
|
|
scan_dma_completions(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* dequeue ALL requests */
|
2014-05-20 23:30:09 +07:00
|
|
|
static void nuke(struct net2280_ep *ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_request *req;
|
|
|
|
|
|
|
|
/* called with spinlock held */
|
|
|
|
ep->stopped = 1;
|
|
|
|
if (ep->dma)
|
2014-05-20 23:30:09 +07:00
|
|
|
abort_dma(ep);
|
|
|
|
while (!list_empty(&ep->queue)) {
|
|
|
|
req = list_entry(ep->queue.next,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request,
|
|
|
|
queue);
|
2014-05-20 23:30:09 +07:00
|
|
|
done(ep, req, -ESHUTDOWN);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* dequeue JUST ONE request */
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
struct net2280_request *req;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 dmactl;
|
|
|
|
int stopped;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || (!ep->desc && ep->num != 0) || !_req) {
|
|
|
|
pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n",
|
|
|
|
__func__, _ep, _req);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&ep->dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
stopped = ep->stopped;
|
|
|
|
|
|
|
|
/* quiesce dma while we patch the queue */
|
|
|
|
dmactl = 0;
|
|
|
|
ep->stopped = 1;
|
|
|
|
if (ep->dma) {
|
2014-05-20 23:30:09 +07:00
|
|
|
dmactl = readl(&ep->dma->dmactl);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* WARNING erratum 0127 may kick in ... */
|
2014-05-20 23:30:09 +07:00
|
|
|
stop_dma(ep->dma);
|
|
|
|
scan_dma_completions(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure it's still queued on this endpoint */
|
2014-05-20 23:30:09 +07:00
|
|
|
list_for_each_entry(req, &ep->queue, queue) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (&req->req == _req)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (&req->req != _req) {
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&ep->dev->lock, flags);
|
2015-02-02 16:55:25 +07:00
|
|
|
dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
|
|
|
|
__func__);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* queue head may be partially complete. */
|
|
|
|
if (ep->queue.next == &req->queue) {
|
|
|
|
if (ep->dma) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
|
2005-04-17 05:20:36 +07:00
|
|
|
_req->status = -ECONNRESET;
|
2014-05-20 23:30:09 +07:00
|
|
|
abort_dma(ep);
|
|
|
|
if (likely(ep->queue.next == &req->queue)) {
|
|
|
|
/* NOTE: misreports single-transfer mode*/
|
2005-04-17 05:20:36 +07:00
|
|
|
req->td->dmacount = 0; /* invalidate */
|
2014-05-20 23:30:09 +07:00
|
|
|
dma_done(ep, req,
|
|
|
|
readl(&ep->dma->dmacount),
|
2005-04-17 05:20:36 +07:00
|
|
|
-ECONNRESET);
|
|
|
|
}
|
|
|
|
} else {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
|
2014-05-20 23:30:09 +07:00
|
|
|
done(ep, req, -ECONNRESET);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
req = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (req)
|
2014-05-20 23:30:09 +07:00
|
|
|
done(ep, req, -ECONNRESET);
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->stopped = stopped;
|
|
|
|
|
|
|
|
if (ep->dma) {
|
|
|
|
/* turn off dma on inactive queues */
|
2014-05-20 23:30:09 +07:00
|
|
|
if (list_empty(&ep->queue))
|
|
|
|
stop_dma(ep->dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
else if (!ep->stopped) {
|
|
|
|
/* resume current request, or start new one */
|
|
|
|
if (req)
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(dmactl, &ep->dma->dmactl);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
2014-05-20 23:30:09 +07:00
|
|
|
start_dma(ep, list_entry(ep->queue.next,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request, queue));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&ep->dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_fifo_status(struct usb_ep *_ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
static int
|
2008-08-15 02:49:11 +07:00
|
|
|
net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
unsigned long flags;
|
|
|
|
int retval = 0;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || (!ep->desc && ep->num != 0)) {
|
|
|
|
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
|
|
|
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
retval = -ESHUTDOWN;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
|
2015-02-02 16:55:25 +07:00
|
|
|
== USB_ENDPOINT_XFER_ISOC) {
|
|
|
|
retval = -EINVAL;
|
|
|
|
goto print_err;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&ep->dev->lock, flags);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!list_empty(&ep->queue)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -EAGAIN;
|
2015-02-02 16:55:25 +07:00
|
|
|
goto print_unlock;
|
|
|
|
} else if (ep->is_in && value && net2280_fifo_status(_ep) != 0) {
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -EAGAIN;
|
2015-02-02 16:55:25 +07:00
|
|
|
goto print_unlock;
|
|
|
|
} else {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
|
2008-08-15 02:49:11 +07:00
|
|
|
value ? "set" : "clear",
|
|
|
|
wedged ? "wedge" : "halt");
|
2005-04-17 05:20:36 +07:00
|
|
|
/* set/clear, then synch memory views with the device */
|
|
|
|
if (value) {
|
|
|
|
if (ep->num == 0)
|
|
|
|
ep->dev->protocol_stall = 1;
|
|
|
|
else
|
2014-05-20 23:30:09 +07:00
|
|
|
set_halt(ep);
|
2008-08-15 02:49:11 +07:00
|
|
|
if (wedged)
|
|
|
|
ep->wedged = 1;
|
|
|
|
} else {
|
2014-05-20 23:30:09 +07:00
|
|
|
clear_halt(ep);
|
2016-05-23 20:58:41 +07:00
|
|
|
if (ep->dev->quirks & PLX_PCIE &&
|
2014-05-20 23:30:03 +07:00
|
|
|
!list_empty(&ep->queue) && ep->td_dma)
|
|
|
|
restart_dma(ep);
|
2008-08-15 02:49:11 +07:00
|
|
|
ep->wedged = 0;
|
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
(void) readl(&ep->regs->ep_rsp);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&ep->dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return retval;
|
2015-02-02 16:55:25 +07:00
|
|
|
|
|
|
|
print_unlock:
|
|
|
|
spin_unlock_irqrestore(&ep->dev->lock, flags);
|
|
|
|
print_err:
|
|
|
|
dev_err(&ep->dev->pdev->dev, "%s: error=%d\n", __func__, retval);
|
|
|
|
return retval;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_set_halt(struct usb_ep *_ep, int value)
|
2008-08-15 02:49:11 +07:00
|
|
|
{
|
|
|
|
return net2280_set_halt_and_wedge(_ep, value, 0);
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_set_wedge(struct usb_ep *_ep)
|
2008-08-15 02:49:11 +07:00
|
|
|
{
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || _ep->name == ep0name) {
|
|
|
|
pr_err("%s: Invalid ep=%p or ep0\n", __func__, _ep);
|
2008-08-15 02:49:11 +07:00
|
|
|
return -EINVAL;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2008-08-15 02:49:11 +07:00
|
|
|
return net2280_set_halt_and_wedge(_ep, 1, 1);
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_fifo_status(struct usb_ep *_ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
u32 avail;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || (!ep->desc && ep->num != 0)) {
|
|
|
|
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -ENODEV;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
|
|
|
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
dev_err(&ep->dev->pdev->dev,
|
|
|
|
"%s: Invalid driver=%p or speed=%d\n",
|
|
|
|
__func__, ep->dev->driver, ep->dev->gadget.speed);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -ESHUTDOWN;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (avail > ep->fifo_size) {
|
|
|
|
dev_err(&ep->dev->pdev->dev, "%s: Fifo overflow\n", __func__);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EOVERFLOW;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ep->is_in)
|
|
|
|
avail = ep->fifo_size - avail;
|
|
|
|
return avail;
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void net2280_fifo_flush(struct usb_ep *_ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = container_of(_ep, struct net2280_ep, ep);
|
2015-02-02 16:55:25 +07:00
|
|
|
if (!_ep || (!ep->desc && ep->num != 0)) {
|
|
|
|
pr_err("%s: Invalid ep=%p or ep->desc\n", __func__, _ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
|
|
|
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN) {
|
|
|
|
dev_err(&ep->dev->pdev->dev,
|
|
|
|
"%s: Invalid driver=%p or speed=%d\n",
|
|
|
|
__func__, ep->dev->driver, ep->dev->gadget.speed);
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
2015-02-02 16:55:25 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
|
2014-05-20 23:30:09 +07:00
|
|
|
(void) readl(&ep->regs->ep_rsp);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-09-02 17:13:45 +07:00
|
|
|
static const struct usb_ep_ops net2280_ep_ops = {
|
2005-04-17 05:20:36 +07:00
|
|
|
.enable = net2280_enable,
|
|
|
|
.disable = net2280_disable,
|
|
|
|
|
|
|
|
.alloc_request = net2280_alloc_request,
|
|
|
|
.free_request = net2280_free_request,
|
|
|
|
|
|
|
|
.queue = net2280_queue,
|
|
|
|
.dequeue = net2280_dequeue,
|
|
|
|
|
|
|
|
.set_halt = net2280_set_halt,
|
2008-08-15 02:49:11 +07:00
|
|
|
.set_wedge = net2280_set_wedge,
|
2005-04-17 05:20:36 +07:00
|
|
|
.fifo_status = net2280_fifo_status,
|
|
|
|
.fifo_flush = net2280_fifo_flush,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_get_frame(struct usb_gadget *_gadget)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
unsigned long flags;
|
|
|
|
u16 retval;
|
|
|
|
|
|
|
|
if (!_gadget)
|
|
|
|
return -ENODEV;
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = container_of(_gadget, struct net2280, gadget);
|
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
|
|
|
retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
|
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_wakeup(struct usb_gadget *_gadget)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
u32 tmp;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!_gadget)
|
|
|
|
return 0;
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = container_of(_gadget, struct net2280, gadget);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
|
|
|
tmp = readl(&dev->usb->usbctl);
|
2014-05-20 23:30:05 +07:00
|
|
|
if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
|
|
|
|
writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* pci writes may still be posted */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
u32 tmp;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!_gadget)
|
|
|
|
return 0;
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = container_of(_gadget, struct net2280, gadget);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
|
|
|
tmp = readl(&dev->usb->usbctl);
|
2014-05-20 23:30:03 +07:00
|
|
|
if (value) {
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp |= BIT(SELF_POWERED_STATUS);
|
2015-01-28 15:32:35 +07:00
|
|
|
_gadget->is_selfpowered = 1;
|
2014-05-20 23:30:03 +07:00
|
|
|
} else {
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp &= ~BIT(SELF_POWERED_STATUS);
|
2015-01-28 15:32:35 +07:00
|
|
|
_gadget->is_selfpowered = 0;
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, &dev->usb->usbctl);
|
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
|
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
u32 tmp;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!_gadget)
|
|
|
|
return -ENODEV;
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = container_of(_gadget, struct net2280, gadget);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
|
|
|
tmp = readl(&dev->usb->usbctl);
|
2005-04-17 05:20:36 +07:00
|
|
|
dev->softconnect = (is_on != 0);
|
2015-05-17 03:33:39 +07:00
|
|
|
if (is_on) {
|
|
|
|
ep0_start(dev);
|
|
|
|
writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
|
|
|
|
} else {
|
|
|
|
writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
|
|
|
|
stop_activity(dev, dev->driver);
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-06 19:11:13 +07:00
|
|
|
static struct usb_ep *net2280_match_ep(struct usb_gadget *_gadget,
|
|
|
|
struct usb_endpoint_descriptor *desc,
|
|
|
|
struct usb_ss_ep_comp_descriptor *ep_comp)
|
|
|
|
{
|
|
|
|
char name[8];
|
|
|
|
struct usb_ep *ep;
|
|
|
|
|
|
|
|
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT) {
|
|
|
|
/* ep-e, ep-f are PIO with only 64 byte fifos */
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep-e");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep-f");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
}
|
|
|
|
|
2016-08-12 21:29:35 +07:00
|
|
|
/* USB3380: Only first four endpoints have DMA channels. Allocate
|
|
|
|
* slower interrupt endpoints from PIO hw endpoints, to allow bulk/isoc
|
|
|
|
* endpoints use DMA hw endpoints.
|
|
|
|
*/
|
|
|
|
if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
|
|
|
|
usb_endpoint_dir_in(desc)) {
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep2in");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep4in");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
} else if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
|
|
|
|
!usb_endpoint_dir_in(desc)) {
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep1out");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep3out");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
} else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
|
|
|
|
usb_endpoint_dir_in(desc)) {
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep1in");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep3in");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
} else if (usb_endpoint_type(desc) != USB_ENDPOINT_XFER_BULK &&
|
|
|
|
!usb_endpoint_dir_in(desc)) {
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep2out");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, "ep4out");
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
}
|
|
|
|
|
2015-08-06 19:11:13 +07:00
|
|
|
/* USB3380: use same address for usb and hardware endpoints */
|
|
|
|
snprintf(name, sizeof(name), "ep%d%s", usb_endpoint_num(desc),
|
|
|
|
usb_endpoint_dir_in(desc) ? "in" : "out");
|
|
|
|
ep = gadget_find_ep_by_name(_gadget, name);
|
|
|
|
if (ep && usb_gadget_ep_match_desc(_gadget, ep, desc, ep_comp))
|
|
|
|
return ep;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2011-10-10 14:37:17 +07:00
|
|
|
static int net2280_start(struct usb_gadget *_gadget,
|
|
|
|
struct usb_gadget_driver *driver);
|
2014-10-18 00:05:12 +07:00
|
|
|
static int net2280_stop(struct usb_gadget *_gadget);
|
2011-06-28 20:33:47 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static const struct usb_gadget_ops net2280_ops = {
|
|
|
|
.get_frame = net2280_get_frame,
|
|
|
|
.wakeup = net2280_wakeup,
|
|
|
|
.set_selfpowered = net2280_set_selfpowered,
|
|
|
|
.pullup = net2280_pullup,
|
2011-10-10 14:37:17 +07:00
|
|
|
.udc_start = net2280_start,
|
|
|
|
.udc_stop = net2280_stop,
|
2015-08-06 19:11:13 +07:00
|
|
|
.match_ep = net2280_match_ep,
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2014-07-04 16:27:03 +07:00
|
|
|
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* FIXME move these into procfs, and use seq_file.
|
|
|
|
* Sysfs _still_ doesn't behave for arbitrarily sized files,
|
|
|
|
* and also doesn't help products using this with 2.4 kernels.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* "function" sysfs attribute */
|
2013-08-24 06:34:43 +07:00
|
|
|
static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-05-20 23:30:09 +07:00
|
|
|
struct net2280 *dev = dev_get_drvdata(_dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
if (!dev->driver || !dev->driver->function ||
|
|
|
|
strlen(dev->driver->function) > PAGE_SIZE)
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
2014-05-20 23:30:09 +07:00
|
|
|
return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2013-08-24 06:34:43 +07:00
|
|
|
static DEVICE_ATTR_RO(function);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-08-24 06:34:43 +07:00
|
|
|
static ssize_t registers_show(struct device *_dev,
|
|
|
|
struct device_attribute *attr, char *buf)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
char *next;
|
|
|
|
unsigned size, t;
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
|
|
|
u32 t1, t2;
|
2005-06-27 07:18:46 +07:00
|
|
|
const char *s;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = dev_get_drvdata(_dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
next = buf;
|
|
|
|
size = PAGE_SIZE;
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (dev->driver)
|
|
|
|
s = dev->driver->driver.name;
|
|
|
|
else
|
|
|
|
s = "(none)";
|
|
|
|
|
|
|
|
/* Main Control Registers */
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size, "%s version " DRIVER_VERSION
|
2014-11-28 20:50:49 +07:00
|
|
|
", chiprev %04x\n\n"
|
2005-04-17 05:20:36 +07:00
|
|
|
"devinit %03x fifoctl %08x gadget '%s'\n"
|
|
|
|
"pci irqenb0 %02x irqenb1 %08x "
|
|
|
|
"irqstat0 %04x irqstat1 %08x\n",
|
|
|
|
driver_name, dev->chiprev,
|
2014-05-20 23:30:09 +07:00
|
|
|
readl(&dev->regs->devinit),
|
|
|
|
readl(&dev->regs->fifoctl),
|
2005-04-17 05:20:36 +07:00
|
|
|
s,
|
2014-05-20 23:30:09 +07:00
|
|
|
readl(&dev->regs->pciirqenb0),
|
|
|
|
readl(&dev->regs->pciirqenb1),
|
|
|
|
readl(&dev->regs->irqstat0),
|
|
|
|
readl(&dev->regs->irqstat1));
|
2005-04-17 05:20:36 +07:00
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
|
|
|
/* USB Control Registers */
|
2014-05-20 23:30:09 +07:00
|
|
|
t1 = readl(&dev->usb->usbctl);
|
|
|
|
t2 = readl(&dev->usb->usbstat);
|
2014-05-20 23:30:05 +07:00
|
|
|
if (t1 & BIT(VBUS_PIN)) {
|
|
|
|
if (t2 & BIT(HIGH_SPEED))
|
2005-04-17 05:20:36 +07:00
|
|
|
s = "high speed";
|
|
|
|
else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
|
|
|
|
s = "powered";
|
|
|
|
else
|
|
|
|
s = "full speed";
|
|
|
|
/* full speed bit (6) not working?? */
|
|
|
|
} else
|
|
|
|
s = "not attached";
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size,
|
2005-04-17 05:20:36 +07:00
|
|
|
"stdrsp %08x usbctl %08x usbstat %08x "
|
|
|
|
"addr 0x%02x (%s)\n",
|
2014-05-20 23:30:09 +07:00
|
|
|
readl(&dev->usb->stdrsp), t1, t2,
|
|
|
|
readl(&dev->usb->ouraddr), s);
|
2005-04-17 05:20:36 +07:00
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
|
|
|
/* PCI Master Control Registers */
|
|
|
|
|
|
|
|
/* DMA Control Registers */
|
|
|
|
|
|
|
|
/* Configurable EP Control Registers */
|
2014-05-20 23:30:03 +07:00
|
|
|
for (i = 0; i < dev->n_ep; i++) {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_ep *ep;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = &dev->ep[i];
|
2005-04-17 05:20:36 +07:00
|
|
|
if (i && !ep->desc)
|
|
|
|
continue;
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
t1 = readl(&ep->cfg->ep_cfg);
|
2014-05-20 23:30:09 +07:00
|
|
|
t2 = readl(&ep->regs->ep_rsp) & 0xff;
|
|
|
|
t = scnprintf(next, size,
|
2005-04-17 05:20:36 +07:00
|
|
|
"\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
|
|
|
|
"irqenb %02x\n",
|
|
|
|
ep->ep.name, t1, t2,
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_NAK_OUT_PACKETS))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "NAK " : "",
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "hide " : "",
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "CRC " : "",
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_INTERRUPT_MODE))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "interrupt " : "",
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "status " : "",
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "NAKmode " : "",
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "DATA1 " : "DATA0 ",
|
2014-05-20 23:30:05 +07:00
|
|
|
(t2 & BIT(CLEAR_ENDPOINT_HALT))
|
2005-04-17 05:20:36 +07:00
|
|
|
? "HALT " : "",
|
2014-05-20 23:30:09 +07:00
|
|
|
readl(&ep->regs->ep_irqenb));
|
2005-04-17 05:20:36 +07:00
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size,
|
2005-04-17 05:20:36 +07:00
|
|
|
"\tstat %08x avail %04x "
|
|
|
|
"(ep%d%s-%s)%s\n",
|
2014-05-20 23:30:09 +07:00
|
|
|
readl(&ep->regs->ep_stat),
|
|
|
|
readl(&ep->regs->ep_avail),
|
|
|
|
t1 & 0x0f, DIR_STRING(t1),
|
|
|
|
type_string(t1 >> 8),
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->stopped ? "*" : "");
|
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
|
|
|
if (!ep->dma)
|
|
|
|
continue;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size,
|
2005-04-17 05:20:36 +07:00
|
|
|
" dma\tctl %08x stat %08x count %08x\n"
|
|
|
|
"\taddr %08x desc %08x\n",
|
2014-05-20 23:30:09 +07:00
|
|
|
readl(&ep->dma->dmactl),
|
|
|
|
readl(&ep->dma->dmastat),
|
|
|
|
readl(&ep->dma->dmacount),
|
|
|
|
readl(&ep->dma->dmaaddr),
|
|
|
|
readl(&ep->dma->dmadesc));
|
2005-04-17 05:20:36 +07:00
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
/* Indexed Registers (none yet) */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Statistics */
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size, "\nirqs: ");
|
2005-04-17 05:20:36 +07:00
|
|
|
size -= t;
|
|
|
|
next += t;
|
2014-05-20 23:30:03 +07:00
|
|
|
for (i = 0; i < dev->n_ep; i++) {
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_ep *ep;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = &dev->ep[i];
|
2005-04-17 05:20:36 +07:00
|
|
|
if (i && !ep->irqs)
|
|
|
|
continue;
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
|
2005-04-17 05:20:36 +07:00
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size, "\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return PAGE_SIZE - size;
|
|
|
|
}
|
2013-08-24 06:34:43 +07:00
|
|
|
static DEVICE_ATTR_RO(registers);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-08-24 06:34:43 +07:00
|
|
|
static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
|
|
|
|
char *buf)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
char *next;
|
|
|
|
unsigned size;
|
|
|
|
unsigned long flags;
|
|
|
|
int i;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = dev_get_drvdata(_dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
next = buf;
|
|
|
|
size = PAGE_SIZE;
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
for (i = 0; i < dev->n_ep; i++) {
|
2014-05-20 23:30:09 +07:00
|
|
|
struct net2280_ep *ep = &dev->ep[i];
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request *req;
|
|
|
|
int t;
|
|
|
|
|
|
|
|
if (i != 0) {
|
|
|
|
const struct usb_endpoint_descriptor *d;
|
|
|
|
|
|
|
|
d = ep->desc;
|
|
|
|
if (!d)
|
|
|
|
continue;
|
|
|
|
t = d->bEndpointAddress;
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size,
|
2005-04-17 05:20:36 +07:00
|
|
|
"\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
|
|
|
|
ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
|
|
|
|
(t & USB_DIR_IN) ? "in" : "out",
|
2014-05-20 23:30:08 +07:00
|
|
|
type_string(d->bmAttributes),
|
2016-09-28 18:17:38 +07:00
|
|
|
usb_endpoint_maxp(d),
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->dma ? "dma" : "pio", ep->fifo_size
|
|
|
|
);
|
|
|
|
} else /* ep0 should only have one transfer queued */
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size, "ep0 max 64 pio %s\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->is_in ? "in" : "out");
|
|
|
|
if (t <= 0 || t > size)
|
|
|
|
goto done;
|
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
if (list_empty(&ep->queue)) {
|
|
|
|
t = scnprintf(next, size, "\t(nothing queued)\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
if (t <= 0 || t > size)
|
|
|
|
goto done;
|
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
continue;
|
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
list_for_each_entry(req, &ep->queue, queue) {
|
|
|
|
if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
|
|
|
|
t = scnprintf(next, size,
|
2005-04-17 05:20:36 +07:00
|
|
|
"\treq %p len %d/%d "
|
|
|
|
"buf %p (dmacount %08x)\n",
|
|
|
|
&req->req, req->req.actual,
|
|
|
|
req->req.length, req->req.buf,
|
2014-05-20 23:30:09 +07:00
|
|
|
readl(&ep->dma->dmacount));
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size,
|
2005-04-17 05:20:36 +07:00
|
|
|
"\treq %p len %d/%d buf %p\n",
|
|
|
|
&req->req, req->req.actual,
|
|
|
|
req->req.length, req->req.buf);
|
|
|
|
if (t <= 0 || t > size)
|
|
|
|
goto done;
|
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
|
|
|
|
if (ep->dma) {
|
|
|
|
struct net2280_dma *td;
|
|
|
|
|
|
|
|
td = req->td;
|
2014-05-20 23:30:09 +07:00
|
|
|
t = scnprintf(next, size, "\t td %08x "
|
2005-04-17 05:20:36 +07:00
|
|
|
" count %08x buf %08x desc %08x\n",
|
|
|
|
(u32) req->td_dma,
|
2014-05-20 23:30:09 +07:00
|
|
|
le32_to_cpu(td->dmacount),
|
|
|
|
le32_to_cpu(td->dmaaddr),
|
|
|
|
le32_to_cpu(td->dmadesc));
|
2005-04-17 05:20:36 +07:00
|
|
|
if (t <= 0 || t > size)
|
|
|
|
goto done;
|
|
|
|
size -= t;
|
|
|
|
next += t;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
return PAGE_SIZE - size;
|
|
|
|
}
|
2013-08-24 06:34:43 +07:00
|
|
|
static DEVICE_ATTR_RO(queues);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
#define device_create_file(a, b) (0)
|
|
|
|
#define device_remove_file(a, b) do { } while (0)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* another driver-specific mode might be a request type doing dma
|
|
|
|
* to/from another device fifo instead of to/from memory.
|
|
|
|
*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void set_fifo_mode(struct net2280 *dev, int mode)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
/* keeping high bits preserves BAR2 */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
|
2014-05-20 23:30:09 +07:00
|
|
|
INIT_LIST_HEAD(&dev->gadget.ep_list);
|
|
|
|
list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
|
|
|
|
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (mode) {
|
|
|
|
case 0:
|
2014-05-20 23:30:09 +07:00
|
|
|
list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
|
|
|
|
list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
|
|
|
|
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case 1:
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case 2:
|
2014-05-20 23:30:09 +07:00
|
|
|
list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
|
|
|
|
dev->ep[1].fifo_size = 2048;
|
|
|
|
dev->ep[2].fifo_size = 1024;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
|
2014-05-20 23:30:09 +07:00
|
|
|
list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
|
|
|
|
list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void defect7374_disable_data_eps(struct net2280 *dev)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* For Defect 7374, disable data EPs (and more):
|
|
|
|
* - This phase undoes the earlier phase of the Defect 7374 workaround,
|
|
|
|
* returing ep regs back to normal.
|
|
|
|
*/
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
int i;
|
|
|
|
unsigned char ep_sel;
|
|
|
|
u32 tmp_reg;
|
|
|
|
|
|
|
|
for (i = 1; i < 5; i++) {
|
|
|
|
ep = &dev->ep[i];
|
2015-10-19 21:25:15 +07:00
|
|
|
writel(i, &ep->cfg->ep_cfg);
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
|
|
|
|
for (i = 0; i < 6; i++)
|
|
|
|
writel(0, &dev->dep[i].dep_cfg);
|
|
|
|
|
|
|
|
for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
|
|
|
|
/* Select an endpoint for subsequent operations: */
|
|
|
|
tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
|
|
|
|
writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
|
|
|
|
|
|
|
|
if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
|
|
|
|
ep_sel == 18 || ep_sel == 20)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Change settings on some selected endpoints */
|
|
|
|
tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
|
|
|
|
tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp_reg |= BIT(EP_INITIALIZED);
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void defect7374_enable_data_eps_zero(struct net2280 *dev)
|
|
|
|
{
|
|
|
|
u32 tmp = 0, tmp_reg;
|
2014-11-28 20:50:57 +07:00
|
|
|
u32 scratch;
|
2014-05-20 23:30:03 +07:00
|
|
|
int i;
|
|
|
|
unsigned char ep_sel;
|
|
|
|
|
|
|
|
scratch = get_idx_reg(dev->regs, SCRATCH);
|
2014-11-28 20:50:57 +07:00
|
|
|
|
|
|
|
WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
|
|
|
|
== DEFECT7374_FSM_SS_CONTROL_READ);
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
|
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
ep_warn(dev, "Operate Defect 7374 workaround soft this time");
|
|
|
|
ep_warn(dev, "It will operate on cold-reboot and SS connect");
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
/*GPEPs:*/
|
|
|
|
tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
|
|
|
|
(2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
|
|
|
|
((dev->enhanced_mode) ?
|
2015-05-17 03:33:30 +07:00
|
|
|
BIT(OUT_ENDPOINT_ENABLE) | BIT(IN_ENDPOINT_ENABLE) :
|
|
|
|
BIT(ENDPOINT_ENABLE)));
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
for (i = 1; i < 5; i++)
|
|
|
|
writel(tmp, &dev->ep[i].cfg->ep_cfg);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
/* CSRIN, PCIIN, STATIN, RCIN*/
|
|
|
|
tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
|
|
|
|
writel(tmp, &dev->dep[1].dep_cfg);
|
|
|
|
writel(tmp, &dev->dep[3].dep_cfg);
|
|
|
|
writel(tmp, &dev->dep[4].dep_cfg);
|
|
|
|
writel(tmp, &dev->dep[5].dep_cfg);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
/*Implemented for development and debug.
|
|
|
|
* Can be refined/tuned later.*/
|
|
|
|
for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
|
|
|
|
/* Select an endpoint for subsequent operations: */
|
|
|
|
tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
|
|
|
|
writel(((tmp_reg & ~0x1f) | ep_sel),
|
|
|
|
&dev->plregs->pl_ep_ctrl);
|
|
|
|
|
|
|
|
if (ep_sel == 1) {
|
|
|
|
tmp =
|
|
|
|
(readl(&dev->plregs->pl_ep_ctrl) |
|
|
|
|
BIT(CLEAR_ACK_ERROR_CODE) | 0);
|
|
|
|
writel(tmp, &dev->plregs->pl_ep_ctrl);
|
|
|
|
continue;
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
|
|
|
|
ep_sel == 18 || ep_sel == 20)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
|
|
|
|
BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
|
|
|
|
writel(tmp, &dev->plregs->pl_ep_cfg_4);
|
|
|
|
|
|
|
|
tmp = readl(&dev->plregs->pl_ep_ctrl) &
|
|
|
|
~BIT(EP_INITIALIZED);
|
|
|
|
writel(tmp, &dev->plregs->pl_ep_ctrl);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
|
|
|
}
|
2014-11-28 20:50:57 +07:00
|
|
|
|
|
|
|
/* Set FSM to focus on the first Control Read:
|
|
|
|
* - Tip: Connection speed is known upon the first
|
|
|
|
* setup request.*/
|
|
|
|
scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
|
|
|
|
set_idx_reg(dev->regs, SCRATCH, scratch);
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* keeping it simple:
|
|
|
|
* - one bus driver, initted first;
|
|
|
|
* - one function driver, initted second
|
|
|
|
*
|
|
|
|
* most of the work to support multiple net2280 controllers would
|
|
|
|
* be to associate this gadget driver (yes?) with all of them, or
|
|
|
|
* perhaps to bind specific drivers to specific devices.
|
|
|
|
*/
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void usb_reset_228x(struct net2280 *dev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
dev->gadget.speed = USB_SPEED_UNKNOWN;
|
2014-05-20 23:30:09 +07:00
|
|
|
(void) readl(&dev->usb->usbctl);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
net2280_led_init(dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* disable automatic responses, and irqs */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(0, &dev->usb->stdrsp);
|
|
|
|
writel(0, &dev->regs->pciirqenb0);
|
|
|
|
writel(0, &dev->regs->pciirqenb1);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* clear old dma and irq state */
|
|
|
|
for (tmp = 0; tmp < 4; tmp++) {
|
2014-05-20 23:30:03 +07:00
|
|
|
struct net2280_ep *ep = &dev->ep[tmp + 1];
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ep->dma)
|
2014-05-20 23:30:03 +07:00
|
|
|
abort_dma(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(~0, &dev->regs->irqstat0),
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* reset, and enable pci */
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = readl(&dev->regs->devinit) |
|
|
|
|
BIT(PCI_ENABLE) |
|
|
|
|
BIT(FIFO_SOFT_RESET) |
|
|
|
|
BIT(USB_SOFT_RESET) |
|
|
|
|
BIT(M8051_RESET);
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, &dev->regs->devinit);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* standard fifo and endpoint allocations */
|
2014-05-20 23:30:09 +07:00
|
|
|
set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void usb_reset_338x(struct net2280 *dev)
|
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
dev->gadget.speed = USB_SPEED_UNKNOWN;
|
|
|
|
(void)readl(&dev->usb->usbctl);
|
|
|
|
|
|
|
|
net2280_led_init(dev);
|
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
if (dev->bug7734_patched) {
|
2014-05-20 23:30:03 +07:00
|
|
|
/* disable automatic responses, and irqs */
|
|
|
|
writel(0, &dev->usb->stdrsp);
|
|
|
|
writel(0, &dev->regs->pciirqenb0);
|
|
|
|
writel(0, &dev->regs->pciirqenb1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear old dma and irq state */
|
|
|
|
for (tmp = 0; tmp < 4; tmp++) {
|
|
|
|
struct net2280_ep *ep = &dev->ep[tmp + 1];
|
2015-05-17 03:33:32 +07:00
|
|
|
struct net2280_dma_regs __iomem *dma;
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2015-05-17 03:33:32 +07:00
|
|
|
if (ep->dma) {
|
2014-05-20 23:30:03 +07:00
|
|
|
abort_dma(ep);
|
2015-05-17 03:33:32 +07:00
|
|
|
} else {
|
|
|
|
dma = &dev->dma[tmp];
|
|
|
|
writel(BIT(DMA_ABORT), &dma->dmastat);
|
|
|
|
writel(0, &dma->dmactl);
|
|
|
|
}
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
|
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
if (dev->bug7734_patched) {
|
2014-05-20 23:30:03 +07:00
|
|
|
/* reset, and enable pci */
|
|
|
|
tmp = readl(&dev->regs->devinit) |
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(PCI_ENABLE) |
|
|
|
|
BIT(FIFO_SOFT_RESET) |
|
|
|
|
BIT(USB_SOFT_RESET) |
|
|
|
|
BIT(M8051_RESET);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
|
|
|
writel(tmp, &dev->regs->devinit);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
|
|
|
|
INIT_LIST_HEAD(&dev->gadget.ep_list);
|
|
|
|
|
|
|
|
for (tmp = 1; tmp < dev->n_ep; tmp++)
|
|
|
|
list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void usb_reset(struct net2280 *dev)
|
|
|
|
{
|
2014-05-20 23:30:12 +07:00
|
|
|
if (dev->quirks & PLX_LEGACY)
|
2014-05-20 23:30:03 +07:00
|
|
|
return usb_reset_228x(dev);
|
|
|
|
return usb_reset_338x(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void usb_reinit_228x(struct net2280 *dev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
u32 tmp;
|
|
|
|
|
|
|
|
/* basic endpoint init */
|
|
|
|
for (tmp = 0; tmp < 7; tmp++) {
|
2014-05-20 23:30:09 +07:00
|
|
|
struct net2280_ep *ep = &dev->ep[tmp];
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-07-31 21:00:36 +07:00
|
|
|
ep->ep.name = ep_info_dft[tmp].name;
|
|
|
|
ep->ep.caps = ep_info_dft[tmp].caps;
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->dev = dev;
|
|
|
|
ep->num = tmp;
|
|
|
|
|
|
|
|
if (tmp > 0 && tmp <= 4) {
|
|
|
|
ep->fifo_size = 1024;
|
2014-11-28 20:50:49 +07:00
|
|
|
ep->dma = &dev->dma[tmp - 1];
|
2005-04-17 05:20:36 +07:00
|
|
|
} else
|
|
|
|
ep->fifo_size = 64;
|
2014-05-20 23:30:09 +07:00
|
|
|
ep->regs = &dev->epregs[tmp];
|
2014-05-20 23:30:03 +07:00
|
|
|
ep->cfg = &dev->epregs[tmp];
|
|
|
|
ep_reset_228x(dev->regs, ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
|
|
|
|
usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
|
|
|
|
usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->gadget.ep0 = &dev->ep[0].ep;
|
|
|
|
dev->ep[0].stopped = 0;
|
|
|
|
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* we want to prevent lowlevel/insecure access from the USB host,
|
|
|
|
* but erratum 0119 means this enable bit is ignored
|
|
|
|
*/
|
|
|
|
for (tmp = 0; tmp < 5; tmp++)
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void usb_reinit_338x(struct net2280 *dev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u32 tmp, val;
|
|
|
|
static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
|
|
|
|
static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
|
|
|
|
0x00, 0xC0, 0x00, 0xC0 };
|
|
|
|
|
|
|
|
/* basic endpoint init */
|
|
|
|
for (i = 0; i < dev->n_ep; i++) {
|
|
|
|
struct net2280_ep *ep = &dev->ep[i];
|
|
|
|
|
2015-07-31 21:00:36 +07:00
|
|
|
ep->ep.name = dev->enhanced_mode ? ep_info_adv[i].name :
|
|
|
|
ep_info_dft[i].name;
|
|
|
|
ep->ep.caps = dev->enhanced_mode ? ep_info_adv[i].caps :
|
|
|
|
ep_info_dft[i].caps;
|
2014-05-20 23:30:03 +07:00
|
|
|
ep->dev = dev;
|
|
|
|
ep->num = i;
|
|
|
|
|
2014-11-28 20:50:49 +07:00
|
|
|
if (i > 0 && i <= 4)
|
2014-05-20 23:30:03 +07:00
|
|
|
ep->dma = &dev->dma[i - 1];
|
|
|
|
|
|
|
|
if (dev->enhanced_mode) {
|
|
|
|
ep->cfg = &dev->epregs[ne[i]];
|
2015-05-17 03:33:36 +07:00
|
|
|
/*
|
|
|
|
* Set USB endpoint number, hardware allows same number
|
|
|
|
* in both directions.
|
|
|
|
*/
|
|
|
|
if (i > 0 && i < 5)
|
|
|
|
writel(ne[i], &ep->cfg->ep_cfg);
|
2014-05-20 23:30:03 +07:00
|
|
|
ep->regs = (struct net2280_ep_regs __iomem *)
|
2014-07-17 00:20:25 +07:00
|
|
|
(((void __iomem *)&dev->epregs[ne[i]]) +
|
2014-05-20 23:30:03 +07:00
|
|
|
ep_reg_addr[i]);
|
|
|
|
} else {
|
|
|
|
ep->cfg = &dev->epregs[i];
|
|
|
|
ep->regs = &dev->epregs[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
ep->fifo_size = (i != 0) ? 2048 : 512;
|
|
|
|
|
|
|
|
ep_reset_338x(dev->regs, ep);
|
|
|
|
}
|
|
|
|
usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
|
|
|
|
|
|
|
|
dev->gadget.ep0 = &dev->ep[0].ep;
|
|
|
|
dev->ep[0].stopped = 0;
|
|
|
|
|
|
|
|
/* Link layer set up */
|
2014-11-28 20:50:57 +07:00
|
|
|
if (dev->bug7734_patched) {
|
2014-05-20 23:30:03 +07:00
|
|
|
tmp = readl(&dev->usb_ext->usbctl2) &
|
2014-05-20 23:30:05 +07:00
|
|
|
~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(tmp, &dev->usb_ext->usbctl2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Hardware Defect and Workaround */
|
|
|
|
val = readl(&dev->ll_lfps_regs->ll_lfps_5);
|
|
|
|
val &= ~(0xf << TIMER_LFPS_6US);
|
|
|
|
val |= 0x5 << TIMER_LFPS_6US;
|
|
|
|
writel(val, &dev->ll_lfps_regs->ll_lfps_5);
|
|
|
|
|
|
|
|
val = readl(&dev->ll_lfps_regs->ll_lfps_6);
|
|
|
|
val &= ~(0xffff << TIMER_LFPS_80US);
|
|
|
|
val |= 0x0100 << TIMER_LFPS_80US;
|
|
|
|
writel(val, &dev->ll_lfps_regs->ll_lfps_6);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
|
|
|
|
* Hot Reset Exit Handshake may Fail in Specific Case using
|
|
|
|
* Default Register Settings. Workaround for Enumeration test.
|
|
|
|
*/
|
|
|
|
val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
|
|
|
|
val &= ~(0x1f << HOT_TX_NORESET_TS2);
|
|
|
|
val |= 0x10 << HOT_TX_NORESET_TS2;
|
|
|
|
writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
|
|
|
|
|
|
|
|
val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
|
|
|
|
val &= ~(0x1f << HOT_RX_RESET_TS2);
|
|
|
|
val |= 0x3 << HOT_RX_RESET_TS2;
|
|
|
|
writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set Recovery Idle to Recover bit:
|
|
|
|
* - On SS connections, setting Recovery Idle to Recover Fmw improves
|
|
|
|
* link robustness with various hosts and hubs.
|
|
|
|
* - It is safe to set for all connection speeds; all chip revisions.
|
|
|
|
* - R-M-W to leave other bits undisturbed.
|
|
|
|
* - Reference PLX TT-7372
|
|
|
|
*/
|
|
|
|
val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
|
2014-05-20 23:30:05 +07:00
|
|
|
val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
|
|
|
|
|
|
|
|
/* disable dedicated endpoints */
|
|
|
|
writel(0x0D, &dev->dep[0].dep_cfg);
|
|
|
|
writel(0x0D, &dev->dep[1].dep_cfg);
|
|
|
|
writel(0x0E, &dev->dep[2].dep_cfg);
|
|
|
|
writel(0x0E, &dev->dep[3].dep_cfg);
|
|
|
|
writel(0x0F, &dev->dep[4].dep_cfg);
|
|
|
|
writel(0x0C, &dev->dep[5].dep_cfg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void usb_reinit(struct net2280 *dev)
|
|
|
|
{
|
2014-05-20 23:30:12 +07:00
|
|
|
if (dev->quirks & PLX_LEGACY)
|
2014-05-20 23:30:03 +07:00
|
|
|
return usb_reinit_228x(dev);
|
|
|
|
return usb_reinit_338x(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ep0_start_228x(struct net2280 *dev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
|
|
|
|
BIT(CLEAR_NAK_OUT_PACKETS) |
|
2014-05-20 23:30:10 +07:00
|
|
|
BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
|
|
|
|
&dev->epregs[0].ep_rsp);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* hardware optionally handles a bunch of standard requests
|
|
|
|
* that the API hides from drivers anyway. have it do so.
|
|
|
|
* endpoint status/features are handled in software, to
|
|
|
|
* help pass tests for some dubious behavior.
|
|
|
|
*/
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SET_TEST_MODE) |
|
|
|
|
BIT(SET_ADDRESS) |
|
|
|
|
BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
|
|
|
|
BIT(GET_DEVICE_STATUS) |
|
2014-05-20 23:30:10 +07:00
|
|
|
BIT(GET_INTERFACE_STATUS),
|
|
|
|
&dev->usb->stdrsp);
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
|
|
|
|
BIT(SELF_POWERED_USB_DEVICE) |
|
|
|
|
BIT(REMOTE_WAKEUP_SUPPORT) |
|
|
|
|
(dev->softconnect << USB_DETECT_ENABLE) |
|
|
|
|
BIT(SELF_POWERED_STATUS),
|
|
|
|
&dev->usb->usbctl);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* enable irqs so we can see ep0 and general operation */
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
|
|
|
|
BIT(ENDPOINT_0_INTERRUPT_ENABLE),
|
|
|
|
&dev->regs->pciirqenb0);
|
|
|
|
writel(BIT(PCI_INTERRUPT_ENABLE) |
|
|
|
|
BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
|
|
|
|
BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
|
|
|
|
BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
|
|
|
|
BIT(VBUS_INTERRUPT_ENABLE) |
|
|
|
|
BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
|
|
|
|
BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
|
|
|
|
&dev->regs->pciirqenb1);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* don't leave any writes posted */
|
2014-05-20 23:30:09 +07:00
|
|
|
(void) readl(&dev->usb->usbctl);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void ep0_start_338x(struct net2280 *dev)
|
|
|
|
{
|
|
|
|
|
2014-11-28 20:50:57 +07:00
|
|
|
if (dev->bug7734_patched)
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
|
|
|
|
BIT(SET_EP_HIDE_STATUS_PHASE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->epregs[0].ep_rsp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hardware optionally handles a bunch of standard requests
|
|
|
|
* that the API hides from drivers anyway. have it do so.
|
|
|
|
* endpoint status/features are handled in software, to
|
|
|
|
* help pass tests for some dubious behavior.
|
|
|
|
*/
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SET_ISOCHRONOUS_DELAY) |
|
|
|
|
BIT(SET_SEL) |
|
|
|
|
BIT(SET_TEST_MODE) |
|
|
|
|
BIT(SET_ADDRESS) |
|
|
|
|
BIT(GET_INTERFACE_STATUS) |
|
|
|
|
BIT(GET_DEVICE_STATUS),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb->stdrsp);
|
|
|
|
dev->wakeup_enable = 1;
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
|
2014-05-20 23:30:03 +07:00
|
|
|
(dev->softconnect << USB_DETECT_ENABLE) |
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb->usbctl);
|
|
|
|
|
|
|
|
/* enable irqs so we can see ep0 and general operation */
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
|
2014-05-20 23:30:10 +07:00
|
|
|
BIT(ENDPOINT_0_INTERRUPT_ENABLE),
|
|
|
|
&dev->regs->pciirqenb0);
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(PCI_INTERRUPT_ENABLE) |
|
|
|
|
BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
|
|
|
|
BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
|
|
|
|
BIT(VBUS_INTERRUPT_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->regs->pciirqenb1);
|
|
|
|
|
|
|
|
/* don't leave any writes posted */
|
|
|
|
(void)readl(&dev->usb->usbctl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ep0_start(struct net2280 *dev)
|
|
|
|
{
|
2014-05-20 23:30:12 +07:00
|
|
|
if (dev->quirks & PLX_LEGACY)
|
2014-05-20 23:30:03 +07:00
|
|
|
return ep0_start_228x(dev);
|
|
|
|
return ep0_start_338x(dev);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* when a driver is successfully registered, it will receive
|
|
|
|
* control requests including set_configuration(), which enables
|
|
|
|
* non-control requests. then usb traffic follows until a
|
|
|
|
* disconnect is reported. then a host may connect again, or
|
|
|
|
* the driver might get unbound.
|
|
|
|
*/
|
2011-10-10 14:37:17 +07:00
|
|
|
static int net2280_start(struct usb_gadget *_gadget,
|
|
|
|
struct usb_gadget_driver *driver)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-10-10 14:37:17 +07:00
|
|
|
struct net2280 *dev;
|
2005-04-17 05:20:36 +07:00
|
|
|
int retval;
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
/* insist on high speed support from the driver, since
|
|
|
|
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
|
|
|
|
* "must not be used in normal operation"
|
|
|
|
*/
|
2014-05-20 23:30:10 +07:00
|
|
|
if (!driver || driver->max_speed < USB_SPEED_HIGH ||
|
|
|
|
!driver->setup)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
2011-10-10 14:37:17 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = container_of(_gadget, struct net2280, gadget);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
for (i = 0; i < dev->n_ep; i++)
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->ep[i].irqs = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* hook up the driver ... */
|
|
|
|
driver->driver.bus = NULL;
|
|
|
|
dev->driver = driver;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
|
|
|
|
if (retval)
|
|
|
|
goto err_unbind;
|
|
|
|
retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
|
|
|
|
if (retval)
|
|
|
|
goto err_func;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-11-28 20:50:47 +07:00
|
|
|
/* enable host detection and ep0; and we're ready
|
2005-04-17 05:20:36 +07:00
|
|
|
* for set_configuration as well as eventual disconnect.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
net2280_led_active(dev, 1);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
|
2014-05-20 23:30:03 +07:00
|
|
|
defect7374_enable_data_eps_zero(dev);
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep0_start(dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* pci writes may still be posted */
|
|
|
|
return 0;
|
2006-10-12 08:50:24 +07:00
|
|
|
|
|
|
|
err_func:
|
2014-05-20 23:30:09 +07:00
|
|
|
device_remove_file(&dev->pdev->dev, &dev_attr_function);
|
2006-10-12 08:50:24 +07:00
|
|
|
err_unbind:
|
|
|
|
dev->driver = NULL;
|
|
|
|
return retval;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* don't disconnect if it's not connected */
|
|
|
|
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
|
|
|
|
driver = NULL;
|
|
|
|
|
|
|
|
/* stop hardware; prevent new request submissions;
|
|
|
|
* and kill any outstanding requests.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
usb_reset(dev);
|
2014-05-20 23:30:03 +07:00
|
|
|
for (i = 0; i < dev->n_ep; i++)
|
2014-05-20 23:30:09 +07:00
|
|
|
nuke(&dev->ep[i]);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-03-18 15:14:47 +07:00
|
|
|
/* report disconnect; the driver is already quiesced */
|
USB: gadgetfs, dummy-hcd, net2280: fix locking for callbacks
Using the syzkaller kernel fuzzer, Andrey Konovalov generated the
following error in gadgetfs:
> BUG: KASAN: use-after-free in __lock_acquire+0x3069/0x3690
> kernel/locking/lockdep.c:3246
> Read of size 8 at addr ffff88003a2bdaf8 by task kworker/3:1/903
>
> CPU: 3 PID: 903 Comm: kworker/3:1 Not tainted 4.12.0-rc4+ #35
> Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011
> Workqueue: usb_hub_wq hub_event
> Call Trace:
> __dump_stack lib/dump_stack.c:16 [inline]
> dump_stack+0x292/0x395 lib/dump_stack.c:52
> print_address_description+0x78/0x280 mm/kasan/report.c:252
> kasan_report_error mm/kasan/report.c:351 [inline]
> kasan_report+0x230/0x340 mm/kasan/report.c:408
> __asan_report_load8_noabort+0x19/0x20 mm/kasan/report.c:429
> __lock_acquire+0x3069/0x3690 kernel/locking/lockdep.c:3246
> lock_acquire+0x22d/0x560 kernel/locking/lockdep.c:3855
> __raw_spin_lock include/linux/spinlock_api_smp.h:142 [inline]
> _raw_spin_lock+0x2f/0x40 kernel/locking/spinlock.c:151
> spin_lock include/linux/spinlock.h:299 [inline]
> gadgetfs_suspend+0x89/0x130 drivers/usb/gadget/legacy/inode.c:1682
> set_link_state+0x88e/0xae0 drivers/usb/gadget/udc/dummy_hcd.c:455
> dummy_hub_control+0xd7e/0x1fb0 drivers/usb/gadget/udc/dummy_hcd.c:2074
> rh_call_control drivers/usb/core/hcd.c:689 [inline]
> rh_urb_enqueue drivers/usb/core/hcd.c:846 [inline]
> usb_hcd_submit_urb+0x92f/0x20b0 drivers/usb/core/hcd.c:1650
> usb_submit_urb+0x8b2/0x12c0 drivers/usb/core/urb.c:542
> usb_start_wait_urb+0x148/0x5b0 drivers/usb/core/message.c:56
> usb_internal_control_msg drivers/usb/core/message.c:100 [inline]
> usb_control_msg+0x341/0x4d0 drivers/usb/core/message.c:151
> usb_clear_port_feature+0x74/0xa0 drivers/usb/core/hub.c:412
> hub_port_disable+0x123/0x510 drivers/usb/core/hub.c:4177
> hub_port_init+0x1ed/0x2940 drivers/usb/core/hub.c:4648
> hub_port_connect drivers/usb/core/hub.c:4826 [inline]
> hub_port_connect_change drivers/usb/core/hub.c:4999 [inline]
> port_event drivers/usb/core/hub.c:5105 [inline]
> hub_event+0x1ae1/0x3d40 drivers/usb/core/hub.c:5185
> process_one_work+0xc08/0x1bd0 kernel/workqueue.c:2097
> process_scheduled_works kernel/workqueue.c:2157 [inline]
> worker_thread+0xb2b/0x1860 kernel/workqueue.c:2233
> kthread+0x363/0x440 kernel/kthread.c:231
> ret_from_fork+0x2a/0x40 arch/x86/entry/entry_64.S:424
>
> Allocated by task 9958:
> save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
> save_stack+0x43/0xd0 mm/kasan/kasan.c:513
> set_track mm/kasan/kasan.c:525 [inline]
> kasan_kmalloc+0xad/0xe0 mm/kasan/kasan.c:617
> kmem_cache_alloc_trace+0x87/0x280 mm/slub.c:2745
> kmalloc include/linux/slab.h:492 [inline]
> kzalloc include/linux/slab.h:665 [inline]
> dev_new drivers/usb/gadget/legacy/inode.c:170 [inline]
> gadgetfs_fill_super+0x24f/0x540 drivers/usb/gadget/legacy/inode.c:1993
> mount_single+0xf6/0x160 fs/super.c:1192
> gadgetfs_mount+0x31/0x40 drivers/usb/gadget/legacy/inode.c:2019
> mount_fs+0x9c/0x2d0 fs/super.c:1223
> vfs_kern_mount.part.25+0xcb/0x490 fs/namespace.c:976
> vfs_kern_mount fs/namespace.c:2509 [inline]
> do_new_mount fs/namespace.c:2512 [inline]
> do_mount+0x41b/0x2d90 fs/namespace.c:2834
> SYSC_mount fs/namespace.c:3050 [inline]
> SyS_mount+0xb0/0x120 fs/namespace.c:3027
> entry_SYSCALL_64_fastpath+0x1f/0xbe
>
> Freed by task 9960:
> save_stack_trace+0x1b/0x20 arch/x86/kernel/stacktrace.c:59
> save_stack+0x43/0xd0 mm/kasan/kasan.c:513
> set_track mm/kasan/kasan.c:525 [inline]
> kasan_slab_free+0x72/0xc0 mm/kasan/kasan.c:590
> slab_free_hook mm/slub.c:1357 [inline]
> slab_free_freelist_hook mm/slub.c:1379 [inline]
> slab_free mm/slub.c:2961 [inline]
> kfree+0xed/0x2b0 mm/slub.c:3882
> put_dev+0x124/0x160 drivers/usb/gadget/legacy/inode.c:163
> gadgetfs_kill_sb+0x33/0x60 drivers/usb/gadget/legacy/inode.c:2027
> deactivate_locked_super+0x8d/0xd0 fs/super.c:309
> deactivate_super+0x21e/0x310 fs/super.c:340
> cleanup_mnt+0xb7/0x150 fs/namespace.c:1112
> __cleanup_mnt+0x1b/0x20 fs/namespace.c:1119
> task_work_run+0x1a0/0x280 kernel/task_work.c:116
> exit_task_work include/linux/task_work.h:21 [inline]
> do_exit+0x18a8/0x2820 kernel/exit.c:878
> do_group_exit+0x14e/0x420 kernel/exit.c:982
> get_signal+0x784/0x1780 kernel/signal.c:2318
> do_signal+0xd7/0x2130 arch/x86/kernel/signal.c:808
> exit_to_usermode_loop+0x1ac/0x240 arch/x86/entry/common.c:157
> prepare_exit_to_usermode arch/x86/entry/common.c:194 [inline]
> syscall_return_slowpath+0x3ba/0x410 arch/x86/entry/common.c:263
> entry_SYSCALL_64_fastpath+0xbc/0xbe
>
> The buggy address belongs to the object at ffff88003a2bdae0
> which belongs to the cache kmalloc-1024 of size 1024
> The buggy address is located 24 bytes inside of
> 1024-byte region [ffff88003a2bdae0, ffff88003a2bdee0)
> The buggy address belongs to the page:
> page:ffffea0000e8ae00 count:1 mapcount:0 mapping: (null)
> index:0x0 compound_mapcount: 0
> flags: 0x100000000008100(slab|head)
> raw: 0100000000008100 0000000000000000 0000000000000000 0000000100170017
> raw: ffffea0000ed3020 ffffea0000f5f820 ffff88003e80efc0 0000000000000000
> page dumped because: kasan: bad access detected
>
> Memory state around the buggy address:
> ffff88003a2bd980: fb fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> ffff88003a2bda00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
> >ffff88003a2bda80: fc fc fc fc fc fc fc fc fc fc fc fc fb fb fb fb
> ^
> ffff88003a2bdb00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
> ffff88003a2bdb80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
> ==================================================================
What this means is that the gadgetfs_suspend() routine was trying to
access dev->lock after it had been deallocated. The root cause is a
race in the dummy_hcd driver; the dummy_udc_stop() routine can race
with the rest of the driver because it contains no locking. And even
when proper locking is added, it can still race with the
set_link_state() function because that function incorrectly drops the
private spinlock before invoking any gadget driver callbacks.
The result of this race, as seen above, is that set_link_state() can
invoke a callback in gadgetfs even after gadgetfs has been unbound
from dummy_hcd's UDC and its private data structures have been
deallocated.
include/linux/usb/gadget.h documents that the ->reset, ->disconnect,
->suspend, and ->resume callbacks may be invoked in interrupt context.
In general this is necessary, to prevent races with gadget driver
removal. This patch fixes dummy_hcd to retain the spinlock across
these calls, and it adds a spinlock acquisition to dummy_udc_stop() to
prevent the race.
The net2280 driver makes the same mistake of dropping the private
spinlock for its ->disconnect and ->reset callback invocations. The
patch fixes it too.
Lastly, since gadgetfs_suspend() may be invoked in interrupt context,
it cannot assume that interrupts are enabled when it runs. It must
use spin_lock_irqsave() instead of spin_lock_irq(). The patch fixes
that bug as well.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Reported-and-tested-by: Andrey Konovalov <andreyknvl@google.com>
CC: <stable@vger.kernel.org>
Acked-by: Felipe Balbi <felipe.balbi@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2017-06-14 02:23:42 +07:00
|
|
|
if (driver)
|
2013-03-18 15:14:47 +07:00
|
|
|
driver->disconnect(&dev->gadget);
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
usb_reinit(dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-10-18 00:05:12 +07:00
|
|
|
static int net2280_stop(struct usb_gadget *_gadget)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2011-10-10 14:37:17 +07:00
|
|
|
struct net2280 *dev;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = container_of(_gadget, struct net2280, gadget);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
2014-10-17 23:23:33 +07:00
|
|
|
stop_activity(dev, NULL);
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
net2280_led_active(dev, 0);
|
2013-01-31 04:40:14 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
device_remove_file(&dev->pdev->dev, &dev_attr_function);
|
|
|
|
device_remove_file(&dev->pdev->dev, &dev_attr_queues);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-10-17 23:23:33 +07:00
|
|
|
dev->driver = NULL;
|
2014-05-15 19:28:45 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
|
|
|
|
* also works for dma-capable endpoints, in pio mode or just
|
|
|
|
* to manually advance the queue after short OUT transfers.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
static void handle_ep_small(struct net2280_ep *ep)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_request *req;
|
|
|
|
u32 t;
|
|
|
|
/* 0 error, 1 mid-data, 2 done */
|
|
|
|
int mode = 1;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
if (!list_empty(&ep->queue))
|
|
|
|
req = list_entry(ep->queue.next,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request, queue);
|
|
|
|
else
|
|
|
|
req = NULL;
|
|
|
|
|
|
|
|
/* ack all, and handle what we care about */
|
2014-05-20 23:30:09 +07:00
|
|
|
t = readl(&ep->regs->ep_stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->irqs++;
|
2014-11-28 20:51:02 +07:00
|
|
|
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
|
2015-01-13 16:54:58 +07:00
|
|
|
ep->ep.name, t, req ? &req->req : NULL);
|
2014-11-28 20:51:02 +07:00
|
|
|
|
2014-05-20 23:30:12 +07:00
|
|
|
if (!ep->is_in || (ep->dev->quirks & PLX_2280))
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
|
2006-03-20 02:49:14 +07:00
|
|
|
else
|
|
|
|
/* Added for 2282 */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(t, &ep->regs->ep_stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* for ep0, monitor token irqs to catch data stage length errors
|
|
|
|
* and to synchronize on status.
|
|
|
|
*
|
|
|
|
* also, to defer reporting of protocol stalls ... here's where
|
|
|
|
* data or status first appears, handling stalls here should never
|
|
|
|
* cause trouble on the host side..
|
|
|
|
*
|
|
|
|
* control requests could be slightly faster without token synch for
|
|
|
|
* status, but status can jam up that way.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
if (unlikely(ep->num == 0)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ep->is_in) {
|
|
|
|
/* status; stop NAKing */
|
2014-05-20 23:30:05 +07:00
|
|
|
if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ep->dev->protocol_stall) {
|
|
|
|
ep->stopped = 1;
|
2014-05-20 23:30:09 +07:00
|
|
|
set_halt(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (!req)
|
2014-05-20 23:30:09 +07:00
|
|
|
allow_status(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
mode = 2;
|
|
|
|
/* reply to extra IN data tokens with a zlp */
|
2014-05-20 23:30:05 +07:00
|
|
|
} else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ep->dev->protocol_stall) {
|
|
|
|
ep->stopped = 1;
|
2014-05-20 23:30:09 +07:00
|
|
|
set_halt(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
mode = 2;
|
2006-11-16 22:16:00 +07:00
|
|
|
} else if (ep->responded &&
|
|
|
|
!req && !ep->stopped)
|
2014-05-20 23:30:09 +07:00
|
|
|
write_fifo(ep, NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* status; stop NAKing */
|
2014-05-20 23:30:05 +07:00
|
|
|
if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (ep->dev->protocol_stall) {
|
|
|
|
ep->stopped = 1;
|
2014-05-20 23:30:09 +07:00
|
|
|
set_halt(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
mode = 2;
|
|
|
|
/* an extra OUT token is an error */
|
2014-05-20 23:30:10 +07:00
|
|
|
} else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
|
|
|
|
req &&
|
|
|
|
req->req.actual == req->req.length) ||
|
|
|
|
(ep->responded && !req)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->dev->protocol_stall = 1;
|
2014-05-20 23:30:09 +07:00
|
|
|
set_halt(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->stopped = 1;
|
|
|
|
if (req)
|
2014-05-20 23:30:09 +07:00
|
|
|
done(ep, req, -EOVERFLOW);
|
2005-04-17 05:20:36 +07:00
|
|
|
req = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
if (unlikely(!req))
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* manual DMA queue advance after short OUT */
|
2014-05-20 23:30:09 +07:00
|
|
|
if (likely(ep->dma)) {
|
2014-05-20 23:30:05 +07:00
|
|
|
if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
|
2016-08-12 21:29:34 +07:00
|
|
|
struct net2280_request *stuck_req = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
int stopped = ep->stopped;
|
2016-08-12 21:29:34 +07:00
|
|
|
int num_completed;
|
|
|
|
int stuck = 0;
|
|
|
|
u32 count;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* TRANSFERRED works around OUT_DONE erratum 0112.
|
|
|
|
* we expect (N <= maxpacket) bytes; host wrote M.
|
|
|
|
* iff (M < N) we won't ever see a DMA interrupt.
|
|
|
|
*/
|
|
|
|
ep->stopped = 1;
|
2014-05-20 23:30:09 +07:00
|
|
|
for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* any preceding dma transfers must finish.
|
|
|
|
* dma handles (M >= N), may empty the queue
|
|
|
|
*/
|
2016-08-12 21:29:34 +07:00
|
|
|
num_completed = scan_dma_completions(ep);
|
2014-05-20 23:30:10 +07:00
|
|
|
if (unlikely(list_empty(&ep->queue) ||
|
|
|
|
ep->out_overflow)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
req = NULL;
|
|
|
|
break;
|
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
req = list_entry(ep->queue.next,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request, queue);
|
|
|
|
|
|
|
|
/* here either (M < N), a "real" short rx;
|
|
|
|
* or (M == N) and the queue didn't empty
|
|
|
|
*/
|
2014-05-20 23:30:05 +07:00
|
|
|
if (likely(t & BIT(FIFO_EMPTY))) {
|
2014-05-20 23:30:09 +07:00
|
|
|
count = readl(&ep->dma->dmacount);
|
2005-04-17 05:20:36 +07:00
|
|
|
count &= DMA_BYTE_COUNT_MASK;
|
2014-05-20 23:30:09 +07:00
|
|
|
if (readl(&ep->dma->dmadesc)
|
2005-04-17 05:20:36 +07:00
|
|
|
!= req->td_dma)
|
|
|
|
req = NULL;
|
|
|
|
break;
|
|
|
|
}
|
2016-08-12 21:29:34 +07:00
|
|
|
|
|
|
|
/* Escape loop if no dma transfers completed
|
|
|
|
* after few retries.
|
|
|
|
*/
|
|
|
|
if (num_completed == 0) {
|
|
|
|
if (stuck_req == req &&
|
|
|
|
readl(&ep->dma->dmadesc) !=
|
|
|
|
req->td_dma && stuck++ > 5) {
|
|
|
|
count = readl(
|
|
|
|
&ep->dma->dmacount);
|
|
|
|
count &= DMA_BYTE_COUNT_MASK;
|
|
|
|
req = NULL;
|
|
|
|
ep_dbg(ep->dev, "%s escape stuck %d, count %u\n",
|
|
|
|
ep->ep.name, stuck,
|
|
|
|
count);
|
|
|
|
break;
|
|
|
|
} else if (stuck_req != req) {
|
|
|
|
stuck_req = req;
|
|
|
|
stuck = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
stuck_req = NULL;
|
|
|
|
stuck = 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
udelay(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* stop DMA, leave ep NAKing */
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DMA_ABORT), &ep->dma->dmastat);
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_stop_dma(ep->dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
if (likely(req)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
req->td->dmacount = 0;
|
2014-05-20 23:30:09 +07:00
|
|
|
t = readl(&ep->regs->ep_avail);
|
|
|
|
dma_done(ep, req, count,
|
2006-09-02 17:13:45 +07:00
|
|
|
(ep->out_overflow || t)
|
|
|
|
? -EOVERFLOW : 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* also flush to prevent erratum 0106 trouble */
|
2014-05-20 23:30:10 +07:00
|
|
|
if (unlikely(ep->out_overflow ||
|
|
|
|
(ep->dev->chiprev == 0x0100 &&
|
|
|
|
ep->dev->gadget.speed
|
|
|
|
== USB_SPEED_FULL))) {
|
2014-05-20 23:30:09 +07:00
|
|
|
out_flush(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->out_overflow = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* (re)start dma if needed, stop NAKing */
|
|
|
|
ep->stopped = stopped;
|
2014-05-20 23:30:09 +07:00
|
|
|
if (!list_empty(&ep->queue))
|
|
|
|
restart_dma(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->ep.name, t);
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* data packet(s) received (in the fifo, OUT) */
|
2014-05-20 23:30:05 +07:00
|
|
|
} else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
|
2014-05-20 23:30:09 +07:00
|
|
|
if (read_fifo(ep, req) && ep->num != 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
mode = 2;
|
|
|
|
|
|
|
|
/* data packet(s) transmitted (IN) */
|
2014-05-20 23:30:05 +07:00
|
|
|
} else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned len;
|
|
|
|
|
|
|
|
len = req->req.length - req->req.actual;
|
|
|
|
if (len > ep->ep.maxpacket)
|
|
|
|
len = ep->ep.maxpacket;
|
|
|
|
req->req.actual += len;
|
|
|
|
|
|
|
|
/* if we wrote it all, we're usually done */
|
2014-05-20 23:30:09 +07:00
|
|
|
/* send zlps until the status stage */
|
|
|
|
if ((req->req.actual == req->req.length) &&
|
|
|
|
(!req->req.zero || len != ep->ep.maxpacket) && ep->num)
|
2005-04-17 05:20:36 +07:00
|
|
|
mode = 2;
|
|
|
|
|
|
|
|
/* there was nothing to do ... */
|
|
|
|
} else if (mode == 1)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* done */
|
|
|
|
if (mode == 2) {
|
|
|
|
/* stream endpoints often resubmit/unlink in completion */
|
2014-05-20 23:30:09 +07:00
|
|
|
done(ep, req, 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* maybe advance queue to next request */
|
|
|
|
if (ep->num == 0) {
|
|
|
|
/* NOTE: net2280 could let gadget driver start the
|
|
|
|
* status stage later. since not all controllers let
|
|
|
|
* them control that, the api doesn't (yet) allow it.
|
|
|
|
*/
|
|
|
|
if (!ep->stopped)
|
2014-05-20 23:30:09 +07:00
|
|
|
allow_status(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
req = NULL;
|
|
|
|
} else {
|
2014-05-20 23:30:09 +07:00
|
|
|
if (!list_empty(&ep->queue) && !ep->stopped)
|
|
|
|
req = list_entry(ep->queue.next,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request, queue);
|
|
|
|
else
|
|
|
|
req = NULL;
|
|
|
|
if (req && !ep->is_in)
|
2014-05-20 23:30:09 +07:00
|
|
|
stop_out_naking(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* is there a buffer for the next packet?
|
|
|
|
* for best streaming performance, make sure there is one.
|
|
|
|
*/
|
|
|
|
if (req && !ep->stopped) {
|
|
|
|
|
|
|
|
/* load IN fifo with next packet (may be zlp) */
|
2014-05-20 23:30:05 +07:00
|
|
|
if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
|
2014-05-20 23:30:09 +07:00
|
|
|
write_fifo(ep, &req->req);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
|
|
|
|
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
|
2014-05-20 23:30:09 +07:00
|
|
|
return &dev->ep[0];
|
|
|
|
list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
|
2005-04-17 05:20:36 +07:00
|
|
|
u8 bEndpointAddress;
|
|
|
|
|
|
|
|
if (!ep->desc)
|
|
|
|
continue;
|
|
|
|
bEndpointAddress = ep->desc->bEndpointAddress;
|
|
|
|
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
|
|
|
|
continue;
|
|
|
|
if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
|
|
|
|
return ep;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
|
|
|
|
{
|
|
|
|
u32 scratch, fsmvalue;
|
|
|
|
u32 ack_wait_timeout, state;
|
|
|
|
|
|
|
|
/* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
|
|
|
|
scratch = get_idx_reg(dev->regs, SCRATCH);
|
|
|
|
fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
|
|
|
|
scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
|
|
|
|
|
|
|
|
if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
|
|
|
|
(r.bRequestType & USB_DIR_IN)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* This is the first Control Read for this connection: */
|
2014-05-20 23:30:05 +07:00
|
|
|
if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
|
2014-05-20 23:30:03 +07:00
|
|
|
/*
|
|
|
|
* Connection is NOT SS:
|
|
|
|
* - Connection must be FS or HS.
|
|
|
|
* - This FSM state should allow workaround software to
|
|
|
|
* run after the next USB connection.
|
|
|
|
*/
|
|
|
|
scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
|
2014-11-28 20:50:57 +07:00
|
|
|
dev->bug7734_patched = 1;
|
2014-05-20 23:30:03 +07:00
|
|
|
goto restore_data_eps;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Connection is SS: */
|
|
|
|
for (ack_wait_timeout = 0;
|
|
|
|
ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
|
|
|
|
ack_wait_timeout++) {
|
|
|
|
|
|
|
|
state = readl(&dev->plregs->pl_ep_status_1)
|
|
|
|
& (0xff << STATE);
|
|
|
|
if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
|
|
|
|
(state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
|
|
|
|
scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
|
2014-11-28 20:50:57 +07:00
|
|
|
dev->bug7734_patched = 1;
|
2014-05-20 23:30:03 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have not yet received host's Data Phase ACK
|
|
|
|
* - Wait and try again.
|
|
|
|
*/
|
|
|
|
udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
|
2014-05-20 23:30:03 +07:00
|
|
|
"to detect SS host's data phase ACK.");
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
|
2014-05-20 23:30:03 +07:00
|
|
|
"got 0x%2.2x.\n", state >> STATE);
|
|
|
|
} else {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
|
2014-05-20 23:30:03 +07:00
|
|
|
"%duSec for Control Read Data Phase ACK\n",
|
|
|
|
DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
|
|
|
|
}
|
|
|
|
|
|
|
|
restore_data_eps:
|
|
|
|
/*
|
|
|
|
* Restore data EPs to their pre-workaround settings (disabled,
|
|
|
|
* initialized, and other details).
|
|
|
|
*/
|
|
|
|
defect7374_disable_data_eps(dev);
|
|
|
|
|
|
|
|
set_idx_reg(dev->regs, SCRATCH, scratch);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-28 20:50:54 +07:00
|
|
|
static void ep_clear_seqnum(struct net2280_ep *ep)
|
2014-05-20 23:30:03 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev = ep->dev;
|
|
|
|
u32 val;
|
|
|
|
static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
|
|
|
|
|
2014-11-28 20:50:54 +07:00
|
|
|
val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
|
|
|
|
val |= ep_pl[ep->num];
|
|
|
|
writel(val, &dev->plregs->pl_ep_ctrl);
|
|
|
|
val |= BIT(SEQUENCE_NUMBER_RESET);
|
|
|
|
writel(val, &dev->plregs->pl_ep_ctrl);
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2014-11-28 20:50:54 +07:00
|
|
|
return;
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void handle_stat0_irqs_superspeed(struct net2280 *dev,
|
|
|
|
struct net2280_ep *ep, struct usb_ctrlrequest r)
|
|
|
|
{
|
|
|
|
int tmp = 0;
|
|
|
|
|
|
|
|
#define w_value le16_to_cpu(r.wValue)
|
|
|
|
#define w_index le16_to_cpu(r.wIndex)
|
|
|
|
#define w_length le16_to_cpu(r.wLength)
|
|
|
|
|
|
|
|
switch (r.bRequest) {
|
|
|
|
struct net2280_ep *e;
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
case USB_REQ_SET_CONFIGURATION:
|
|
|
|
dev->addressed_state = !w_value;
|
|
|
|
goto usb3_delegate;
|
|
|
|
|
|
|
|
case USB_REQ_GET_STATUS:
|
|
|
|
switch (r.bRequestType) {
|
|
|
|
case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
|
|
|
|
status = dev->wakeup_enable ? 0x02 : 0x00;
|
2015-01-28 15:32:35 +07:00
|
|
|
if (dev->gadget.is_selfpowered)
|
2014-05-20 23:30:05 +07:00
|
|
|
status |= BIT(0);
|
2014-05-20 23:30:03 +07:00
|
|
|
status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
|
|
|
|
dev->ltm_enable << 4);
|
|
|
|
writel(0, &dev->epregs[0].ep_irqenb);
|
|
|
|
set_fifo_bytecount(ep, sizeof(status));
|
|
|
|
writel((__force u32) status, &dev->epregs[0].ep_data);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
|
|
|
|
e = get_ep_by_addr(dev, w_index);
|
|
|
|
if (!e)
|
|
|
|
goto do_stall3;
|
|
|
|
status = readl(&e->regs->ep_rsp) &
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(CLEAR_ENDPOINT_HALT);
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(0, &dev->epregs[0].ep_irqenb);
|
|
|
|
set_fifo_bytecount(ep, sizeof(status));
|
|
|
|
writel((__force u32) status, &dev->epregs[0].ep_data);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto usb3_delegate;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case USB_REQ_CLEAR_FEATURE:
|
|
|
|
switch (r.bRequestType) {
|
|
|
|
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
|
|
|
|
if (!dev->addressed_state) {
|
|
|
|
switch (w_value) {
|
|
|
|
case USB_DEVICE_U1_ENABLE:
|
|
|
|
dev->u1_enable = 0;
|
|
|
|
writel(readl(&dev->usb_ext->usbctl2) &
|
2014-05-20 23:30:05 +07:00
|
|
|
~BIT(U1_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb_ext->usbctl2);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
goto next_endpoints3;
|
|
|
|
|
|
|
|
case USB_DEVICE_U2_ENABLE:
|
|
|
|
dev->u2_enable = 0;
|
|
|
|
writel(readl(&dev->usb_ext->usbctl2) &
|
2014-05-20 23:30:05 +07:00
|
|
|
~BIT(U2_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb_ext->usbctl2);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
goto next_endpoints3;
|
|
|
|
|
|
|
|
case USB_DEVICE_LTM_ENABLE:
|
|
|
|
dev->ltm_enable = 0;
|
|
|
|
writel(readl(&dev->usb_ext->usbctl2) &
|
2014-05-20 23:30:05 +07:00
|
|
|
~BIT(LTM_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb_ext->usbctl2);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
goto next_endpoints3;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
|
|
|
|
dev->wakeup_enable = 0;
|
|
|
|
writel(readl(&dev->usb->usbctl) &
|
2014-05-20 23:30:05 +07:00
|
|
|
~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb->usbctl);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
goto usb3_delegate;
|
|
|
|
|
|
|
|
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
|
|
|
|
e = get_ep_by_addr(dev, w_index);
|
|
|
|
if (!e)
|
|
|
|
goto do_stall3;
|
|
|
|
if (w_value != USB_ENDPOINT_HALT)
|
|
|
|
goto do_stall3;
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
|
2014-11-28 20:50:54 +07:00
|
|
|
/*
|
|
|
|
* Workaround for SS SeqNum not cleared via
|
|
|
|
* Endpoint Halt (Clear) bit. select endpoint
|
|
|
|
*/
|
|
|
|
ep_clear_seqnum(e);
|
|
|
|
clear_halt(e);
|
2014-05-20 23:30:03 +07:00
|
|
|
if (!list_empty(&e->queue) && e->td_dma)
|
|
|
|
restart_dma(e);
|
|
|
|
allow_status(ep);
|
|
|
|
ep->stopped = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto usb3_delegate;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case USB_REQ_SET_FEATURE:
|
|
|
|
switch (r.bRequestType) {
|
|
|
|
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
|
|
|
|
if (!dev->addressed_state) {
|
|
|
|
switch (w_value) {
|
|
|
|
case USB_DEVICE_U1_ENABLE:
|
|
|
|
dev->u1_enable = 1;
|
|
|
|
writel(readl(&dev->usb_ext->usbctl2) |
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(U1_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb_ext->usbctl2);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
goto next_endpoints3;
|
|
|
|
|
|
|
|
case USB_DEVICE_U2_ENABLE:
|
|
|
|
dev->u2_enable = 1;
|
|
|
|
writel(readl(&dev->usb_ext->usbctl2) |
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(U2_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb_ext->usbctl2);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
goto next_endpoints3;
|
|
|
|
|
|
|
|
case USB_DEVICE_LTM_ENABLE:
|
|
|
|
dev->ltm_enable = 1;
|
|
|
|
writel(readl(&dev->usb_ext->usbctl2) |
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(LTM_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb_ext->usbctl2);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
goto next_endpoints3;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
|
|
|
|
dev->wakeup_enable = 1;
|
|
|
|
writel(readl(&dev->usb->usbctl) |
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
|
2014-05-20 23:30:03 +07:00
|
|
|
&dev->usb->usbctl);
|
|
|
|
allow_status_338x(ep);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
goto usb3_delegate;
|
|
|
|
|
|
|
|
case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
|
|
|
|
e = get_ep_by_addr(dev, w_index);
|
|
|
|
if (!e || (w_value != USB_ENDPOINT_HALT))
|
|
|
|
goto do_stall3;
|
2014-11-28 20:50:52 +07:00
|
|
|
ep->stopped = 1;
|
|
|
|
if (ep->num == 0)
|
|
|
|
ep->dev->protocol_stall = 1;
|
|
|
|
else {
|
|
|
|
if (ep->dma)
|
2014-11-28 20:50:55 +07:00
|
|
|
abort_dma(ep);
|
2014-11-28 20:50:54 +07:00
|
|
|
set_halt(ep);
|
2014-11-28 20:50:52 +07:00
|
|
|
}
|
2014-05-20 23:30:03 +07:00
|
|
|
allow_status_338x(ep);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto usb3_delegate;
|
|
|
|
}
|
|
|
|
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
|
|
|
|
usb3_delegate:
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
|
2014-05-20 23:30:03 +07:00
|
|
|
r.bRequestType, r.bRequest,
|
|
|
|
w_value, w_index, w_length,
|
|
|
|
readl(&ep->cfg->ep_cfg));
|
|
|
|
|
|
|
|
ep->responded = 0;
|
|
|
|
spin_unlock(&dev->lock);
|
|
|
|
tmp = dev->driver->setup(&dev->gadget, &r);
|
|
|
|
spin_lock(&dev->lock);
|
|
|
|
}
|
|
|
|
do_stall3:
|
|
|
|
if (tmp < 0) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
|
2014-05-20 23:30:03 +07:00
|
|
|
r.bRequestType, r.bRequest, tmp);
|
|
|
|
dev->protocol_stall = 1;
|
|
|
|
/* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
|
2014-11-28 20:50:54 +07:00
|
|
|
set_halt(ep);
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
next_endpoints3:
|
|
|
|
|
|
|
|
#undef w_value
|
|
|
|
#undef w_index
|
|
|
|
#undef w_length
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-05-17 03:33:35 +07:00
|
|
|
static void usb338x_handle_ep_intr(struct net2280 *dev, u32 stat0)
|
|
|
|
{
|
|
|
|
u32 index;
|
|
|
|
u32 bit;
|
|
|
|
|
|
|
|
for (index = 0; index < ARRAY_SIZE(ep_bit); index++) {
|
|
|
|
bit = BIT(ep_bit[index]);
|
|
|
|
|
|
|
|
if (!stat0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!(stat0 & bit))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
stat0 &= ~bit;
|
|
|
|
|
|
|
|
handle_ep_small(&dev->ep[index]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
u32 num, scratch;
|
|
|
|
|
|
|
|
/* most of these don't need individual acks */
|
2014-05-20 23:30:05 +07:00
|
|
|
stat &= ~BIT(INTA_ASSERTED);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!stat)
|
|
|
|
return;
|
2014-05-20 23:30:11 +07:00
|
|
|
/* ep_dbg(dev, "irqstat0 %04x\n", stat); */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* starting a control request? */
|
2014-05-20 23:30:05 +07:00
|
|
|
if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
|
2005-04-17 05:20:36 +07:00
|
|
|
union {
|
2014-05-20 23:30:09 +07:00
|
|
|
u32 raw[2];
|
2005-04-17 05:20:36 +07:00
|
|
|
struct usb_ctrlrequest r;
|
|
|
|
} u;
|
2006-03-20 02:49:14 +07:00
|
|
|
int tmp;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request *req;
|
|
|
|
|
|
|
|
if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
|
2014-05-20 23:30:03 +07:00
|
|
|
u32 val = readl(&dev->usb->usbstat);
|
2014-05-20 23:30:05 +07:00
|
|
|
if (val & BIT(SUPER_SPEED)) {
|
2014-05-20 23:30:03 +07:00
|
|
|
dev->gadget.speed = USB_SPEED_SUPER;
|
|
|
|
usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
|
|
|
|
EP0_SS_MAX_PACKET_SIZE);
|
2014-05-20 23:30:05 +07:00
|
|
|
} else if (val & BIT(HIGH_SPEED)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
dev->gadget.speed = USB_SPEED_HIGH;
|
2014-05-20 23:30:03 +07:00
|
|
|
usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
|
|
|
|
EP0_HS_MAX_PACKET_SIZE);
|
|
|
|
} else {
|
2005-04-17 05:20:36 +07:00
|
|
|
dev->gadget.speed = USB_SPEED_FULL;
|
2014-05-20 23:30:03 +07:00
|
|
|
usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
|
|
|
|
EP0_HS_MAX_PACKET_SIZE);
|
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
net2280_led_speed(dev, dev->gadget.speed);
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "%s\n",
|
2014-05-20 23:30:09 +07:00
|
|
|
usb_speed_string(dev->gadget.speed));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = &dev->ep[0];
|
2005-04-17 05:20:36 +07:00
|
|
|
ep->irqs++;
|
|
|
|
|
|
|
|
/* make sure any leftover request state is cleared */
|
2014-05-20 23:30:05 +07:00
|
|
|
stat &= ~BIT(ENDPOINT_0_INTERRUPT);
|
2014-05-20 23:30:09 +07:00
|
|
|
while (!list_empty(&ep->queue)) {
|
|
|
|
req = list_entry(ep->queue.next,
|
2005-04-17 05:20:36 +07:00
|
|
|
struct net2280_request, queue);
|
2014-05-20 23:30:09 +07:00
|
|
|
done(ep, req, (req->req.actual == req->req.length)
|
2005-04-17 05:20:36 +07:00
|
|
|
? 0 : -EPROTO);
|
|
|
|
}
|
|
|
|
ep->stopped = 0;
|
|
|
|
dev->protocol_stall = 0;
|
2016-05-23 20:58:41 +07:00
|
|
|
if (!(dev->quirks & PLX_PCIE)) {
|
2014-05-20 23:30:12 +07:00
|
|
|
if (ep->dev->quirks & PLX_2280)
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(FIFO_OVERFLOW) |
|
|
|
|
BIT(FIFO_UNDERFLOW);
|
2014-05-20 23:30:03 +07:00
|
|
|
else
|
|
|
|
tmp = 0;
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(tmp | BIT(TIMEOUT) |
|
|
|
|
BIT(USB_STALL_SENT) |
|
|
|
|
BIT(USB_IN_NAK_SENT) |
|
|
|
|
BIT(USB_IN_ACK_RCVD) |
|
|
|
|
BIT(USB_OUT_PING_NAK_SENT) |
|
|
|
|
BIT(USB_OUT_ACK_SENT) |
|
|
|
|
BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
|
|
|
|
BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
|
|
|
|
BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
|
|
|
|
BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
|
|
|
|
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
|
2014-05-20 23:30:10 +07:00
|
|
|
BIT(DATA_IN_TOKEN_INTERRUPT),
|
|
|
|
&ep->regs->ep_stat);
|
2014-05-20 23:30:03 +07:00
|
|
|
}
|
|
|
|
u.raw[0] = readl(&dev->usb->setup0123);
|
|
|
|
u.raw[1] = readl(&dev->usb->setup4567);
|
2006-09-02 17:13:45 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
cpu_to_le32s(&u.raw[0]);
|
|
|
|
cpu_to_le32s(&u.raw[1]);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
|
2014-05-20 23:30:03 +07:00
|
|
|
defect7374_workaround(dev, u.r);
|
|
|
|
|
2006-03-20 02:49:14 +07:00
|
|
|
tmp = 0;
|
|
|
|
|
2007-05-26 10:40:14 +07:00
|
|
|
#define w_value le16_to_cpu(u.r.wValue)
|
|
|
|
#define w_index le16_to_cpu(u.r.wIndex)
|
|
|
|
#define w_length le16_to_cpu(u.r.wLength)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* ack the irq */
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
|
|
|
|
stat ^= BIT(SETUP_PACKET_INTERRUPT);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* watch control traffic at the token level, and force
|
|
|
|
* synchronization before letting the status stage happen.
|
|
|
|
* FIXME ignore tokens we'll NAK, until driver responds.
|
|
|
|
* that'll mean a lot less irqs for some drivers.
|
|
|
|
*/
|
|
|
|
ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
|
|
|
|
if (ep->is_in) {
|
2014-05-20 23:30:05 +07:00
|
|
|
scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
|
|
|
|
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
|
|
|
|
BIT(DATA_IN_TOKEN_INTERRUPT);
|
2014-05-20 23:30:09 +07:00
|
|
|
stop_out_naking(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else
|
2014-05-20 23:30:05 +07:00
|
|
|
scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
|
|
|
|
BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
|
|
|
|
BIT(DATA_IN_TOKEN_INTERRUPT);
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(scratch, &dev->epregs[0].ep_irqenb);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* we made the hardware handle most lowlevel requests;
|
|
|
|
* everything else goes uplevel to the gadget code.
|
|
|
|
*/
|
2006-11-16 22:16:00 +07:00
|
|
|
ep->responded = 1;
|
2014-05-20 23:30:03 +07:00
|
|
|
|
|
|
|
if (dev->gadget.speed == USB_SPEED_SUPER) {
|
|
|
|
handle_stat0_irqs_superspeed(dev, ep, u.r);
|
|
|
|
goto next_endpoints;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (u.r.bRequest) {
|
|
|
|
case USB_REQ_GET_STATUS: {
|
|
|
|
struct net2280_ep *e;
|
2005-05-08 03:05:18 +07:00
|
|
|
__le32 status;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* hw handles device and interface status */
|
|
|
|
if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
|
|
|
|
goto delegate;
|
2014-05-20 23:30:09 +07:00
|
|
|
e = get_ep_by_addr(dev, w_index);
|
|
|
|
if (!e || w_length > 2)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto do_stall;
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
|
2014-05-20 23:30:09 +07:00
|
|
|
status = cpu_to_le32(1);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
2014-05-20 23:30:09 +07:00
|
|
|
status = cpu_to_le32(0);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* don't bother with a request object! */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(0, &dev->epregs[0].ep_irqenb);
|
|
|
|
set_fifo_bytecount(ep, w_length);
|
|
|
|
writel((__force u32)status, &dev->epregs[0].ep_data);
|
|
|
|
allow_status(ep);
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto next_endpoints;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case USB_REQ_CLEAR_FEATURE: {
|
|
|
|
struct net2280_ep *e;
|
|
|
|
|
|
|
|
/* hw handles device features */
|
|
|
|
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
|
|
|
|
goto delegate;
|
2014-05-20 23:30:10 +07:00
|
|
|
if (w_value != USB_ENDPOINT_HALT || w_length != 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto do_stall;
|
2014-05-20 23:30:09 +07:00
|
|
|
e = get_ep_by_addr(dev, w_index);
|
|
|
|
if (!e)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto do_stall;
|
2008-08-15 02:49:11 +07:00
|
|
|
if (e->wedged) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "%s wedged, halt not cleared\n",
|
2008-08-15 02:49:11 +07:00
|
|
|
ep->ep.name);
|
|
|
|
} else {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "%s clear halt\n", e->ep.name);
|
2008-08-15 02:49:11 +07:00
|
|
|
clear_halt(e);
|
2016-05-23 20:58:41 +07:00
|
|
|
if ((ep->dev->quirks & PLX_PCIE) &&
|
2014-05-20 23:30:03 +07:00
|
|
|
!list_empty(&e->queue) && e->td_dma)
|
|
|
|
restart_dma(e);
|
2008-08-15 02:49:11 +07:00
|
|
|
}
|
2014-05-20 23:30:09 +07:00
|
|
|
allow_status(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto next_endpoints;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case USB_REQ_SET_FEATURE: {
|
|
|
|
struct net2280_ep *e;
|
|
|
|
|
|
|
|
/* hw handles device features */
|
|
|
|
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
|
|
|
|
goto delegate;
|
2014-05-20 23:30:10 +07:00
|
|
|
if (w_value != USB_ENDPOINT_HALT || w_length != 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto do_stall;
|
2014-05-20 23:30:09 +07:00
|
|
|
e = get_ep_by_addr(dev, w_index);
|
|
|
|
if (!e)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto do_stall;
|
2008-08-15 02:49:11 +07:00
|
|
|
if (e->ep.name == ep0name)
|
|
|
|
goto do_stall;
|
2014-05-20 23:30:09 +07:00
|
|
|
set_halt(e);
|
2016-05-23 20:58:41 +07:00
|
|
|
if ((dev->quirks & PLX_PCIE) && e->dma)
|
2014-05-20 23:30:03 +07:00
|
|
|
abort_dma(e);
|
2014-05-20 23:30:09 +07:00
|
|
|
allow_status(ep);
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "%s set halt\n", ep->ep.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto next_endpoints;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
delegate:
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
|
2005-04-17 05:20:36 +07:00
|
|
|
"ep_cfg %08x\n",
|
|
|
|
u.r.bRequestType, u.r.bRequest,
|
2005-05-08 03:05:18 +07:00
|
|
|
w_value, w_index, w_length,
|
2014-05-20 23:30:03 +07:00
|
|
|
readl(&ep->cfg->ep_cfg));
|
2006-11-16 22:16:00 +07:00
|
|
|
ep->responded = 0;
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock(&dev->lock);
|
|
|
|
tmp = dev->driver->setup(&dev->gadget, &u.r);
|
|
|
|
spin_lock(&dev->lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* stall ep0 on error */
|
|
|
|
if (tmp < 0) {
|
|
|
|
do_stall:
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
|
2005-04-17 05:20:36 +07:00
|
|
|
u.r.bRequestType, u.r.bRequest, tmp);
|
|
|
|
dev->protocol_stall = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* some in/out token irq should follow; maybe stall then.
|
|
|
|
* driver must queue a request (even zlp) or halt ep0
|
|
|
|
* before the host times out.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2005-05-08 03:05:18 +07:00
|
|
|
#undef w_value
|
|
|
|
#undef w_index
|
|
|
|
#undef w_length
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
next_endpoints:
|
2016-05-23 20:58:41 +07:00
|
|
|
if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
|
2015-05-17 03:33:35 +07:00
|
|
|
u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
|
|
|
|
USB3380_IRQSTAT0_EP_INTR_MASK_IN |
|
|
|
|
USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
|
|
|
|
|
|
|
|
if (stat & mask) {
|
|
|
|
usb338x_handle_ep_intr(dev, stat & mask);
|
|
|
|
stat &= ~mask;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* endpoint data irq ? */
|
|
|
|
scratch = stat & 0x7f;
|
|
|
|
stat &= ~0x7f;
|
|
|
|
for (num = 0; scratch; num++) {
|
|
|
|
u32 t;
|
|
|
|
|
|
|
|
/* do this endpoint's FIFO and queue need tending? */
|
|
|
|
t = BIT(num);
|
|
|
|
if ((scratch & t) == 0)
|
|
|
|
continue;
|
|
|
|
scratch ^= t;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-05-17 03:33:35 +07:00
|
|
|
ep = &dev->ep[num];
|
|
|
|
handle_ep_small(ep);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (stat)
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
|
|
|
|
BIT(DMA_C_INTERRUPT) | \
|
|
|
|
BIT(DMA_B_INTERRUPT) | \
|
|
|
|
BIT(DMA_A_INTERRUPT))
|
2005-04-17 05:20:36 +07:00
|
|
|
#define PCI_ERROR_INTERRUPTS ( \
|
2014-05-20 23:30:05 +07:00
|
|
|
BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
|
|
|
|
BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
|
|
|
|
BIT(PCI_RETRY_ABORT_INTERRUPT))
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280_ep *ep;
|
|
|
|
u32 tmp, num, mask, scratch;
|
|
|
|
|
|
|
|
/* after disconnect there's nothing else to do! */
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
|
|
|
|
mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
|
2011-06-24 00:01:55 +07:00
|
|
|
* Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
|
2006-09-02 17:13:45 +07:00
|
|
|
* both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
|
2005-04-17 05:20:36 +07:00
|
|
|
* only indicates a change in the reset state).
|
|
|
|
*/
|
|
|
|
if (stat & tmp) {
|
2014-11-06 13:27:56 +07:00
|
|
|
bool reset = false;
|
|
|
|
bool disconnect = false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ignore disconnects and resets if the speed hasn't been set.
|
|
|
|
* VBUS can bounce and there's always an initial reset.
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, &dev->regs->irqstat1);
|
2014-11-06 13:27:56 +07:00
|
|
|
if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
|
|
|
|
if ((stat & BIT(VBUS_INTERRUPT)) &&
|
|
|
|
(readl(&dev->usb->usbctl) &
|
|
|
|
BIT(VBUS_PIN)) == 0) {
|
|
|
|
disconnect = true;
|
|
|
|
ep_dbg(dev, "disconnect %s\n",
|
|
|
|
dev->driver->driver.name);
|
|
|
|
} else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
|
|
|
|
(readl(&dev->usb->usbstat) & mask)
|
|
|
|
== 0) {
|
|
|
|
reset = true;
|
|
|
|
ep_dbg(dev, "reset %s\n",
|
|
|
|
dev->driver->driver.name);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (disconnect || reset) {
|
|
|
|
stop_activity(dev, dev->driver);
|
|
|
|
ep0_start(dev);
|
|
|
|
if (reset)
|
|
|
|
usb_gadget_udc_reset
|
|
|
|
(&dev->gadget, dev->driver);
|
|
|
|
else
|
|
|
|
(dev->driver->disconnect)
|
|
|
|
(&dev->gadget);
|
|
|
|
return;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
stat &= ~tmp;
|
|
|
|
|
|
|
|
/* vBUS can bounce ... one of many reasons to ignore the
|
|
|
|
* notion of hotplug events on bus connect/disconnect!
|
|
|
|
*/
|
|
|
|
if (!stat)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE: chip stays in PCI D0 state for now, but it could
|
|
|
|
* enter D1 to save more power
|
|
|
|
*/
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (stat & tmp) {
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(tmp, &dev->regs->irqstat1);
|
2014-05-20 23:30:05 +07:00
|
|
|
if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
if (dev->driver->suspend)
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->driver->suspend(&dev->gadget);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!enable_suspend)
|
2014-05-20 23:30:05 +07:00
|
|
|
stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
|
|
|
if (dev->driver->resume)
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->driver->resume(&dev->gadget);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* at high speed, note erratum 0133 */
|
|
|
|
}
|
|
|
|
stat &= ~tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear any other status/irqs */
|
|
|
|
if (stat)
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(stat, &dev->regs->irqstat1);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* some status we can just ignore */
|
2014-05-20 23:30:12 +07:00
|
|
|
if (dev->quirks & PLX_2280)
|
2014-05-20 23:30:05 +07:00
|
|
|
stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
|
|
|
|
BIT(SUSPEND_REQUEST_INTERRUPT) |
|
|
|
|
BIT(RESUME_INTERRUPT) |
|
|
|
|
BIT(SOF_INTERRUPT));
|
2006-03-20 02:49:14 +07:00
|
|
|
else
|
2014-05-20 23:30:05 +07:00
|
|
|
stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
|
|
|
|
BIT(RESUME_INTERRUPT) |
|
|
|
|
BIT(SOF_DOWN_INTERRUPT) |
|
|
|
|
BIT(SOF_INTERRUPT));
|
2006-03-20 02:49:14 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!stat)
|
|
|
|
return;
|
2014-05-20 23:30:11 +07:00
|
|
|
/* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* DMA status, for ep-{a,b,c,d} */
|
|
|
|
scratch = stat & DMA_INTERRUPTS;
|
|
|
|
stat &= ~DMA_INTERRUPTS;
|
|
|
|
scratch >>= 9;
|
|
|
|
for (num = 0; scratch; num++) {
|
|
|
|
struct net2280_dma_regs __iomem *dma;
|
|
|
|
|
2014-05-20 23:30:05 +07:00
|
|
|
tmp = BIT(num);
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((tmp & scratch) == 0)
|
|
|
|
continue;
|
|
|
|
scratch ^= tmp;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
ep = &dev->ep[num + 1];
|
2005-04-17 05:20:36 +07:00
|
|
|
dma = ep->dma;
|
|
|
|
|
|
|
|
if (!dma)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* clear ep's dma status */
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = readl(&dma->dmastat);
|
|
|
|
writel(tmp, &dma->dmastat);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:03 +07:00
|
|
|
/* dma sync*/
|
2016-05-23 20:58:41 +07:00
|
|
|
if (dev->quirks & PLX_PCIE) {
|
2014-05-20 23:30:03 +07:00
|
|
|
u32 r_dmacount = readl(&dma->dmacount);
|
|
|
|
if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
|
2014-05-20 23:30:05 +07:00
|
|
|
(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
|
2014-05-20 23:30:03 +07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-11-28 20:50:46 +07:00
|
|
|
if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
|
|
|
|
ep_dbg(ep->dev, "%s no xact done? %08x\n",
|
|
|
|
ep->ep.name, tmp);
|
|
|
|
continue;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2014-11-28 20:50:46 +07:00
|
|
|
stop_dma(ep->dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* OUT transfers terminate when the data from the
|
|
|
|
* host is in our memory. Process whatever's done.
|
|
|
|
* On this path, we know transfer's last packet wasn't
|
|
|
|
* less than req->length. NAK_OUT_PACKETS may be set,
|
|
|
|
* or the FIFO may already be holding new packets.
|
|
|
|
*
|
|
|
|
* IN transfers can linger in the FIFO for a very
|
|
|
|
* long time ... we ignore that for now, accounting
|
|
|
|
* precisely (like PIO does) needs per-packet irqs
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
scan_dma_completions(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* disable dma on inactive queues; else maybe restart */
|
2014-11-28 20:50:46 +07:00
|
|
|
if (!list_empty(&ep->queue)) {
|
2014-05-20 23:30:09 +07:00
|
|
|
tmp = readl(&dma->dmactl);
|
2014-11-28 20:50:46 +07:00
|
|
|
restart_dma(ep);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
ep->irqs++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NOTE: there are other PCI errors we might usefully notice.
|
|
|
|
* if they appear very often, here's where to try recovering.
|
|
|
|
*/
|
|
|
|
if (stat & PCI_ERROR_INTERRUPTS) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_err(dev, "pci dma error; stat %08x\n", stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
stat &= ~PCI_ERROR_INTERRUPTS;
|
|
|
|
/* these are fatal errors, but "maybe" they won't
|
|
|
|
* happen again ...
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
stop_activity(dev, dev->driver);
|
|
|
|
ep0_start(dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
stat = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stat)
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static irqreturn_t net2280_irq(int irq, void *_dev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev = _dev;
|
|
|
|
|
2006-04-15 03:44:11 +07:00
|
|
|
/* shared interrupt, not ours */
|
2014-05-20 23:30:12 +07:00
|
|
|
if ((dev->quirks & PLX_LEGACY) &&
|
2014-05-20 23:30:05 +07:00
|
|
|
(!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
|
2006-04-15 03:44:11 +07:00
|
|
|
return IRQ_NONE;
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_lock(&dev->lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* handle disconnect, dma, and more */
|
2014-05-20 23:30:09 +07:00
|
|
|
handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* control requests and PIO */
|
2014-05-20 23:30:09 +07:00
|
|
|
handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if (dev->quirks & PLX_PCIE) {
|
2014-05-20 23:30:03 +07:00
|
|
|
/* re-enable interrupt to trigger any possible new interrupt */
|
|
|
|
u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
|
|
|
|
writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
|
|
|
|
writel(pciirqenb1, &dev->regs->pciirqenb1);
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
spin_unlock(&dev->lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void gadget_release(struct device *_dev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-05-20 23:30:09 +07:00
|
|
|
struct net2280 *dev = dev_get_drvdata(_dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
kfree(dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* tear down the binding between this driver and the pci device */
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void net2280_remove(struct pci_dev *pdev)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2014-05-20 23:30:09 +07:00
|
|
|
struct net2280 *dev = pci_get_drvdata(pdev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-06-28 20:33:47 +07:00
|
|
|
usb_del_gadget_udc(&dev->gadget);
|
|
|
|
|
2006-12-05 18:15:33 +07:00
|
|
|
BUG_ON(dev->driver);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* then clean up the resources we allocated during probe() */
|
|
|
|
if (dev->requests) {
|
|
|
|
int i;
|
|
|
|
for (i = 1; i < 5; i++) {
|
2014-05-20 23:30:09 +07:00
|
|
|
if (!dev->ep[i].dummy)
|
2005-04-17 05:20:36 +07:00
|
|
|
continue;
|
2017-03-08 23:19:54 +07:00
|
|
|
dma_pool_free(dev->requests, dev->ep[i].dummy,
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->ep[i].td_dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2017-03-08 23:19:54 +07:00
|
|
|
dma_pool_destroy(dev->requests);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (dev->got_irq)
|
2014-05-20 23:30:09 +07:00
|
|
|
free_irq(pdev->irq, dev);
|
2016-05-23 20:58:41 +07:00
|
|
|
if (dev->quirks & PLX_PCIE)
|
2014-05-20 23:30:03 +07:00
|
|
|
pci_disable_msi(pdev);
|
2017-05-02 23:37:22 +07:00
|
|
|
if (dev->regs) {
|
|
|
|
net2280_led_shutdown(dev);
|
2014-05-20 23:30:09 +07:00
|
|
|
iounmap(dev->regs);
|
2017-05-02 23:37:22 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
if (dev->region)
|
2014-05-20 23:30:09 +07:00
|
|
|
release_mem_region(pci_resource_start(pdev, 0),
|
|
|
|
pci_resource_len(pdev, 0));
|
2005-04-17 05:20:36 +07:00
|
|
|
if (dev->enabled)
|
2014-05-20 23:30:09 +07:00
|
|
|
pci_disable_device(pdev);
|
|
|
|
device_remove_file(&pdev->dev, &dev_attr_registers);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_info(dev, "unbind\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* wrap this driver around the specified device, but
|
|
|
|
* don't respond over USB until a gadget driver binds to us.
|
|
|
|
*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct net2280 *dev;
|
|
|
|
unsigned long resource, len;
|
|
|
|
void __iomem *base = NULL;
|
|
|
|
int retval, i;
|
|
|
|
|
|
|
|
/* alloc, and start init */
|
2014-05-20 23:30:09 +07:00
|
|
|
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
|
|
if (dev == NULL) {
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
pci_set_drvdata(pdev, dev);
|
|
|
|
spin_lock_init(&dev->lock);
|
2014-05-20 23:30:12 +07:00
|
|
|
dev->quirks = id->driver_data;
|
2005-04-17 05:20:36 +07:00
|
|
|
dev->pdev = pdev;
|
|
|
|
dev->gadget.ops = &net2280_ops;
|
2014-05-20 23:30:12 +07:00
|
|
|
dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
|
2014-05-20 23:30:03 +07:00
|
|
|
USB_SPEED_SUPER : USB_SPEED_HIGH;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* the "gadget" abstracts/virtualizes the controller */
|
|
|
|
dev->gadget.name = driver_name;
|
|
|
|
|
|
|
|
/* now all the pci goodies ... */
|
2014-05-20 23:30:09 +07:00
|
|
|
if (pci_enable_device(pdev) < 0) {
|
|
|
|
retval = -ENODEV;
|
2005-04-17 05:20:36 +07:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
dev->enabled = 1;
|
|
|
|
|
|
|
|
/* BAR 0 holds all the registers
|
|
|
|
* BAR 1 is 8051 memory; unused here (note erratum 0103)
|
|
|
|
* BAR 2 is fifo memory; unused here
|
|
|
|
*/
|
2014-05-20 23:30:09 +07:00
|
|
|
resource = pci_resource_start(pdev, 0);
|
|
|
|
len = pci_resource_len(pdev, 0);
|
|
|
|
if (!request_mem_region(resource, len, driver_name)) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "controller already in use\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -EBUSY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
dev->region = 1;
|
|
|
|
|
2006-09-02 17:13:45 +07:00
|
|
|
/* FIXME provide firmware download interface to put
|
|
|
|
* 8051 code into the chip, e.g. to turn on PCI PM.
|
|
|
|
*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
base = ioremap_nocache(resource, len);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (base == NULL) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "can't map memory\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -EFAULT;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
dev->regs = (struct net2280_regs __iomem *) base;
|
|
|
|
dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
|
|
|
|
dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
|
|
|
|
dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
|
|
|
|
dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
|
|
|
|
dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
|
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if (dev->quirks & PLX_PCIE) {
|
2014-05-20 23:30:03 +07:00
|
|
|
u32 fsmvalue;
|
|
|
|
u32 usbstat;
|
|
|
|
dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
|
|
|
|
(base + 0x00b4);
|
|
|
|
dev->llregs = (struct usb338x_ll_regs __iomem *)
|
|
|
|
(base + 0x0700);
|
|
|
|
dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
|
|
|
|
(base + 0x0748);
|
|
|
|
dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
|
|
|
|
(base + 0x077c);
|
|
|
|
dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
|
|
|
|
(base + 0x079c);
|
|
|
|
dev->plregs = (struct usb338x_pl_regs __iomem *)
|
|
|
|
(base + 0x0800);
|
|
|
|
usbstat = readl(&dev->usb->usbstat);
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->enhanced_mode = !!(usbstat & BIT(11));
|
2014-05-20 23:30:03 +07:00
|
|
|
dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
|
|
|
|
/* put into initial config, link up all endpoints */
|
|
|
|
fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
|
|
|
|
(0xf << DEFECT7374_FSM_FIELD);
|
|
|
|
/* See if firmware needs to set up for workaround: */
|
2014-11-28 20:50:57 +07:00
|
|
|
if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
|
|
|
|
dev->bug7734_patched = 1;
|
2014-05-20 23:30:03 +07:00
|
|
|
writel(0, &dev->usb->usbctl);
|
2014-11-28 20:50:57 +07:00
|
|
|
} else
|
|
|
|
dev->bug7734_patched = 0;
|
|
|
|
} else {
|
2014-05-20 23:30:03 +07:00
|
|
|
dev->enhanced_mode = 0;
|
|
|
|
dev->n_ep = 7;
|
|
|
|
/* put into initial config, link up all endpoints */
|
|
|
|
writel(0, &dev->usb->usbctl);
|
|
|
|
}
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
usb_reset(dev);
|
|
|
|
usb_reinit(dev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* irq setup after old hardware is cleaned up */
|
|
|
|
if (!pdev->irq) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_err(dev, "No IRQ. Check PCI setup!\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -ENODEV;
|
|
|
|
goto done;
|
|
|
|
}
|
2006-06-20 15:21:29 +07:00
|
|
|
|
2016-05-23 20:58:41 +07:00
|
|
|
if (dev->quirks & PLX_PCIE)
|
2014-05-20 23:30:03 +07:00
|
|
|
if (pci_enable_msi(pdev))
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_err(dev, "Failed to enable MSI mode\n");
|
2014-05-20 23:30:03 +07:00
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
|
|
|
|
driver_name, dev)) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_err(dev, "request interrupt %d failed\n", pdev->irq);
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -EBUSY;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
dev->got_irq = 1;
|
|
|
|
|
|
|
|
/* DMA setup */
|
|
|
|
/* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
|
2017-03-08 23:19:54 +07:00
|
|
|
dev->requests = dma_pool_create("requests", &pdev->dev,
|
2014-05-20 23:30:09 +07:00
|
|
|
sizeof(struct net2280_dma),
|
2005-04-17 05:20:36 +07:00
|
|
|
0 /* no alignment requirements */,
|
|
|
|
0 /* or page-crossing issues */);
|
|
|
|
if (!dev->requests) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "can't get request pool\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
for (i = 1; i < 5; i++) {
|
|
|
|
struct net2280_dma *td;
|
|
|
|
|
2017-03-08 23:19:54 +07:00
|
|
|
td = dma_pool_alloc(dev->requests, GFP_KERNEL,
|
2014-05-20 23:30:09 +07:00
|
|
|
&dev->ep[i].td_dma);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!td) {
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_dbg(dev, "can't get dummy %d\n", i);
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = -ENOMEM;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
td->dmacount = 0; /* not VALID */
|
|
|
|
td->dmadesc = td->dmaaddr;
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->ep[i].dummy = td;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* enable lower-overhead pci memory bursts during DMA */
|
2014-05-20 23:30:12 +07:00
|
|
|
if (dev->quirks & PLX_LEGACY)
|
2014-05-20 23:30:05 +07:00
|
|
|
writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
|
|
|
|
/*
|
|
|
|
* 256 write retries may not be enough...
|
|
|
|
BIT(PCI_RETRY_ABORT_ENABLE) |
|
|
|
|
*/
|
|
|
|
BIT(DMA_READ_MULTIPLE_ENABLE) |
|
|
|
|
BIT(DMA_READ_LINE_ENABLE),
|
|
|
|
&dev->pci->pcimstctl);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
|
2014-05-20 23:30:09 +07:00
|
|
|
pci_set_master(pdev);
|
|
|
|
pci_try_set_mwi(pdev);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* ... also flushes any posted pci writes */
|
2014-05-20 23:30:09 +07:00
|
|
|
dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* done */
|
2014-05-20 23:30:11 +07:00
|
|
|
ep_info(dev, "%s\n", driver_desc);
|
|
|
|
ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
|
2006-06-20 15:21:29 +07:00
|
|
|
pdev->irq, base, dev->chiprev);
|
2014-11-28 20:50:49 +07:00
|
|
|
ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
|
2014-05-20 23:30:03 +07:00
|
|
|
dev->enhanced_mode ? "enhanced mode" : "legacy mode");
|
2014-05-20 23:30:09 +07:00
|
|
|
retval = device_create_file(&pdev->dev, &dev_attr_registers);
|
|
|
|
if (retval)
|
|
|
|
goto done;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2013-02-26 20:15:27 +07:00
|
|
|
retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
|
|
|
|
gadget_release);
|
2011-06-28 20:33:47 +07:00
|
|
|
if (retval)
|
|
|
|
goto done;
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (dev)
|
2014-05-20 23:30:09 +07:00
|
|
|
net2280_remove(pdev);
|
2005-04-17 05:20:36 +07:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2006-05-06 03:23:42 +07:00
|
|
|
/* make sure the board is quiescent; otherwise it will continue
|
|
|
|
* generating IRQs across the upcoming reboot.
|
|
|
|
*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static void net2280_shutdown(struct pci_dev *pdev)
|
2006-05-06 03:23:42 +07:00
|
|
|
{
|
2014-05-20 23:30:09 +07:00
|
|
|
struct net2280 *dev = pci_get_drvdata(pdev);
|
2006-05-06 03:23:42 +07:00
|
|
|
|
|
|
|
/* disable IRQs */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(0, &dev->regs->pciirqenb0);
|
|
|
|
writel(0, &dev->regs->pciirqenb1);
|
2006-05-06 03:23:42 +07:00
|
|
|
|
|
|
|
/* disable the pullup so the host will think we're gone */
|
2014-05-20 23:30:09 +07:00
|
|
|
writel(0, &dev->usb->usbctl);
|
2013-01-31 04:40:14 +07:00
|
|
|
|
2006-05-06 03:23:42 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
static const struct pci_device_id pci_ids[] = { {
|
2016-03-15 19:06:00 +07:00
|
|
|
.class = PCI_CLASS_SERIAL_USB_DEVICE,
|
2006-09-02 17:13:45 +07:00
|
|
|
.class_mask = ~0,
|
2014-05-20 23:30:04 +07:00
|
|
|
.vendor = PCI_VENDOR_ID_PLX_LEGACY,
|
2005-04-17 05:20:36 +07:00
|
|
|
.device = 0x2280,
|
|
|
|
.subvendor = PCI_ANY_ID,
|
|
|
|
.subdevice = PCI_ANY_ID,
|
2014-05-20 23:30:12 +07:00
|
|
|
.driver_data = PLX_LEGACY | PLX_2280,
|
2014-05-20 23:30:10 +07:00
|
|
|
}, {
|
2016-03-15 19:06:00 +07:00
|
|
|
.class = PCI_CLASS_SERIAL_USB_DEVICE,
|
2006-09-02 17:13:45 +07:00
|
|
|
.class_mask = ~0,
|
2014-05-20 23:30:04 +07:00
|
|
|
.vendor = PCI_VENDOR_ID_PLX_LEGACY,
|
2006-03-20 02:49:14 +07:00
|
|
|
.device = 0x2282,
|
|
|
|
.subvendor = PCI_ANY_ID,
|
|
|
|
.subdevice = PCI_ANY_ID,
|
2014-05-20 23:30:12 +07:00
|
|
|
.driver_data = PLX_LEGACY,
|
2014-05-20 23:30:10 +07:00
|
|
|
},
|
2014-05-20 23:30:03 +07:00
|
|
|
{
|
2016-03-15 19:06:00 +07:00
|
|
|
.class = PCI_CLASS_SERIAL_USB_DEVICE,
|
2014-05-20 23:30:10 +07:00
|
|
|
.class_mask = ~0,
|
|
|
|
.vendor = PCI_VENDOR_ID_PLX,
|
2016-05-23 20:58:41 +07:00
|
|
|
.device = 0x2380,
|
|
|
|
.subvendor = PCI_ANY_ID,
|
|
|
|
.subdevice = PCI_ANY_ID,
|
|
|
|
.driver_data = PLX_PCIE,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
|
|
|
|
.class_mask = ~0,
|
|
|
|
.vendor = PCI_VENDOR_ID_PLX,
|
2014-05-20 23:30:10 +07:00
|
|
|
.device = 0x3380,
|
|
|
|
.subvendor = PCI_ANY_ID,
|
|
|
|
.subdevice = PCI_ANY_ID,
|
2016-05-23 20:58:41 +07:00
|
|
|
.driver_data = PLX_PCIE | PLX_SUPERSPEED,
|
2014-05-20 23:30:03 +07:00
|
|
|
},
|
|
|
|
{
|
2016-03-15 19:06:00 +07:00
|
|
|
.class = PCI_CLASS_SERIAL_USB_DEVICE,
|
2014-05-20 23:30:10 +07:00
|
|
|
.class_mask = ~0,
|
|
|
|
.vendor = PCI_VENDOR_ID_PLX,
|
|
|
|
.device = 0x3382,
|
|
|
|
.subvendor = PCI_ANY_ID,
|
|
|
|
.subdevice = PCI_ANY_ID,
|
2016-05-23 20:58:41 +07:00
|
|
|
.driver_data = PLX_PCIE | PLX_SUPERSPEED,
|
2014-05-20 23:30:03 +07:00
|
|
|
},
|
|
|
|
{ /* end: all zeroes */ }
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
2014-05-20 23:30:09 +07:00
|
|
|
MODULE_DEVICE_TABLE(pci, pci_ids);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* pci driver glue; this is a "new style" PCI driver module */
|
|
|
|
static struct pci_driver net2280_pci_driver = {
|
|
|
|
.name = (char *) driver_name,
|
|
|
|
.id_table = pci_ids,
|
|
|
|
|
|
|
|
.probe = net2280_probe,
|
|
|
|
.remove = net2280_remove,
|
2006-05-06 03:23:42 +07:00
|
|
|
.shutdown = net2280_shutdown,
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* FIXME add power management support */
|
|
|
|
};
|
|
|
|
|
2014-05-20 23:30:07 +07:00
|
|
|
module_pci_driver(net2280_pci_driver);
|
|
|
|
|
2014-05-20 23:30:09 +07:00
|
|
|
MODULE_DESCRIPTION(DRIVER_DESC);
|
|
|
|
MODULE_AUTHOR("David Brownell");
|
|
|
|
MODULE_LICENSE("GPL");
|