2017-11-03 17:28:30 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2008-05-13 22:01:25 +07:00
|
|
|
/*
|
|
|
|
* cdc-wdm.c
|
|
|
|
*
|
|
|
|
* This driver supports USB CDC WCM Device Management.
|
|
|
|
*
|
2009-04-20 22:24:49 +07:00
|
|
|
* Copyright (c) 2007-2009 Oliver Neukum
|
2008-05-13 22:01:25 +07:00
|
|
|
*
|
|
|
|
* Some code taken from cdc-acm.c
|
|
|
|
*
|
|
|
|
* Released under the GPLv2.
|
|
|
|
*
|
|
|
|
* Many thanks to Carl Nordbeck
|
|
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/errno.h>
|
USB: cdc-wdm: implement IOCTL_WDM_MAX_COMMAND
Userspace applications need to know the maximum supported message
size.
The cdc-wdm driver translates between a character device stream
and a message based protocol. Each message is transported as a
usb control message with no further encapsulation or syncronization.
Each read or write on the character device should translate to
exactly one usb control message to ensure that message boundaries
are kept intact. That means that the userspace application must
know the maximum message size supported by the device and driver,
making this size a vital part of the cdc-wdm character device API.
CDC WDM and CDC MBIM functions export the maximum supported
message size through CDC functional descriptors. The cdc-wdm and
cdc_mbim drivers will parse these descriptors and use the value
chosen by the device. The only current way for a userspace
application to retrive the value is by duplicating the descriptor
parsing. This is an unnecessary complex task, and application
writers are likely to postpone it, using a fixed value and adding
a "todo" item.
QMI functions have no way to tell the host what message size they
support. The qmi_wwan driver use a fixed value based on protocol
recommendations and observed device behaviour. Userspace
applications must know and hard code the same value. This scheme
will break if we ever encounter a QMI device needing a device
specific message size quirk. We are currently unable to support
such a device because using a non default size would break the
implicit userspace API.
The message size is currently a hidden attribute of the cdc-wdm
userspace API. Retrieving it is unnecessarily complex, increasing
the possibility of drivers and applications using different limits.
The resulting errors are hard to debug, and can only be replicated
on identical hardware.
Exporting the maximum message size from the driver simplifies the
task for the userspace application, and creates a unified
information source independent of device and function class. It also
serves to document that the message size is part of the cdc-wdm
userspace API.
This proposed API extension has been presented for the authors of
userspace applications and libraries using the current API: libmbim,
libqmi, uqmi, oFono and ModemManager. The replies were:
Aleksander Morgado:
"We do really need max message size for MBIM; and as you say, it may be
good to have the max message size info also for QMI, so the new ioctl
seems a good addition. So +1 from my side, for what it's worth."
Dan Williams:
"Yeah, +1 here. I'd prefer the sysfs file, but the fact that that
doesn't work for fd passing pretty much kills it."
No negative replies are so far received.
Cc: Aleksander Morgado <aleksander@lanedo.com>
Cc: Dan Williams <dcbw@redhat.com>
Signed-off-by: Bjørn Mork <bjorn@mork.no>
Acked-by: Oliver Neukum <oliver@neukum.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-03-18 03:00:06 +07:00
|
|
|
#include <linux/ioctl.h>
|
2008-05-13 22:01:25 +07:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
#include <linux/poll.h>
|
|
|
|
#include <linux/usb.h>
|
|
|
|
#include <linux/usb/cdc.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
#include <asm/unaligned.h>
|
2012-03-06 23:29:22 +07:00
|
|
|
#include <linux/usb/cdc-wdm.h>
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
#define DRIVER_AUTHOR "Oliver Neukum"
|
2008-06-19 19:20:18 +07:00
|
|
|
#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
|
2008-05-13 22:01:25 +07:00
|
|
|
|
2010-01-10 21:33:45 +07:00
|
|
|
static const struct usb_device_id wdm_ids[] = {
|
2008-05-13 22:01:25 +07:00
|
|
|
{
|
|
|
|
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
|
|
|
|
USB_DEVICE_ID_MATCH_INT_SUBCLASS,
|
|
|
|
.bInterfaceClass = USB_CLASS_COMM,
|
|
|
|
.bInterfaceSubClass = USB_CDC_SUBCLASS_DMM
|
|
|
|
},
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
2008-10-13 19:05:20 +07:00
|
|
|
MODULE_DEVICE_TABLE (usb, wdm_ids);
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
#define WDM_MINOR_BASE 176
|
|
|
|
|
|
|
|
|
|
|
|
#define WDM_IN_USE 1
|
|
|
|
#define WDM_DISCONNECTING 2
|
|
|
|
#define WDM_RESULT 3
|
|
|
|
#define WDM_READ 4
|
|
|
|
#define WDM_INT_STALL 5
|
|
|
|
#define WDM_POLL_RUNNING 6
|
2010-02-28 02:54:59 +07:00
|
|
|
#define WDM_RESPONDING 7
|
2010-02-28 02:55:52 +07:00
|
|
|
#define WDM_SUSPENDING 8
|
2012-02-10 15:44:08 +07:00
|
|
|
#define WDM_RESETTING 9
|
2013-03-12 20:52:42 +07:00
|
|
|
#define WDM_OVERFLOW 10
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
#define WDM_MAX 16
|
|
|
|
|
2012-01-20 07:49:57 +07:00
|
|
|
/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
|
|
|
|
#define WDM_DEFAULT_BUFSIZE 256
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
static DEFINE_MUTEX(wdm_mutex);
|
2012-03-06 23:29:21 +07:00
|
|
|
static DEFINE_SPINLOCK(wdm_device_list_lock);
|
|
|
|
static LIST_HEAD(wdm_device_list);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
/* --- method tables --- */
|
|
|
|
|
|
|
|
struct wdm_device {
|
|
|
|
u8 *inbuf; /* buffer for response */
|
|
|
|
u8 *outbuf; /* buffer for command */
|
|
|
|
u8 *sbuf; /* buffer for status */
|
|
|
|
u8 *ubuf; /* buffer for copy to user space */
|
|
|
|
|
|
|
|
struct urb *command;
|
|
|
|
struct urb *response;
|
|
|
|
struct urb *validity;
|
|
|
|
struct usb_interface *intf;
|
|
|
|
struct usb_ctrlrequest *orq;
|
|
|
|
struct usb_ctrlrequest *irq;
|
|
|
|
spinlock_t iuspin;
|
|
|
|
|
|
|
|
unsigned long flags;
|
|
|
|
u16 bufsize;
|
|
|
|
u16 wMaxCommand;
|
|
|
|
u16 wMaxPacketSize;
|
|
|
|
__le16 inum;
|
|
|
|
int reslength;
|
|
|
|
int length;
|
|
|
|
int read;
|
|
|
|
int count;
|
|
|
|
dma_addr_t shandle;
|
|
|
|
dma_addr_t ihandle;
|
2012-01-16 18:41:48 +07:00
|
|
|
struct mutex wlock;
|
|
|
|
struct mutex rlock;
|
2008-05-13 22:01:25 +07:00
|
|
|
wait_queue_head_t wait;
|
|
|
|
struct work_struct rxwork;
|
2018-06-14 23:36:46 +07:00
|
|
|
struct work_struct service_outs_intr;
|
2008-05-13 22:01:25 +07:00
|
|
|
int werr;
|
|
|
|
int rerr;
|
2013-10-30 00:29:10 +07:00
|
|
|
int resp_count;
|
2012-03-06 23:29:21 +07:00
|
|
|
|
|
|
|
struct list_head device_list;
|
2012-03-06 23:29:22 +07:00
|
|
|
int (*manage_power)(struct usb_interface *, int);
|
2008-05-13 22:01:25 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct usb_driver wdm_driver;
|
|
|
|
|
2012-03-06 23:29:21 +07:00
|
|
|
/* return intfdata if we own the interface, else look up intf in the list */
|
|
|
|
static struct wdm_device *wdm_find_device(struct usb_interface *intf)
|
|
|
|
{
|
2012-09-11 03:17:34 +07:00
|
|
|
struct wdm_device *desc;
|
2012-03-06 23:29:21 +07:00
|
|
|
|
|
|
|
spin_lock(&wdm_device_list_lock);
|
|
|
|
list_for_each_entry(desc, &wdm_device_list, device_list)
|
|
|
|
if (desc->intf == intf)
|
2012-09-11 03:17:34 +07:00
|
|
|
goto found;
|
|
|
|
desc = NULL;
|
|
|
|
found:
|
2012-03-06 23:29:21 +07:00
|
|
|
spin_unlock(&wdm_device_list_lock);
|
|
|
|
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct wdm_device *wdm_find_device_by_minor(int minor)
|
|
|
|
{
|
2012-09-11 03:17:34 +07:00
|
|
|
struct wdm_device *desc;
|
2012-03-06 23:29:21 +07:00
|
|
|
|
|
|
|
spin_lock(&wdm_device_list_lock);
|
|
|
|
list_for_each_entry(desc, &wdm_device_list, device_list)
|
|
|
|
if (desc->intf->minor == minor)
|
2012-09-11 03:17:34 +07:00
|
|
|
goto found;
|
|
|
|
desc = NULL;
|
|
|
|
found:
|
2012-03-06 23:29:21 +07:00
|
|
|
spin_unlock(&wdm_device_list_lock);
|
|
|
|
|
|
|
|
return desc;
|
|
|
|
}
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
/* --- callbacks --- */
|
|
|
|
static void wdm_out_callback(struct urb *urb)
|
|
|
|
{
|
|
|
|
struct wdm_device *desc;
|
2018-06-25 05:08:35 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
desc = urb->context;
|
2018-06-25 05:08:35 +07:00
|
|
|
spin_lock_irqsave(&desc->iuspin, flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
desc->werr = urb->status;
|
2018-06-25 05:08:35 +07:00
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
kfree(desc->outbuf);
|
2012-04-27 02:59:10 +07:00
|
|
|
desc->outbuf = NULL;
|
|
|
|
clear_bit(WDM_IN_USE, &desc->flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
wake_up(&desc->wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wdm_in_callback(struct urb *urb)
|
|
|
|
{
|
2018-06-25 05:08:35 +07:00
|
|
|
unsigned long flags;
|
2008-05-13 22:01:25 +07:00
|
|
|
struct wdm_device *desc = urb->context;
|
|
|
|
int status = urb->status;
|
2013-03-12 20:52:42 +07:00
|
|
|
int length = urb->actual_length;
|
2008-05-13 22:01:25 +07:00
|
|
|
|
2018-06-25 05:08:35 +07:00
|
|
|
spin_lock_irqsave(&desc->iuspin, flags);
|
2010-02-28 02:54:59 +07:00
|
|
|
clear_bit(WDM_RESPONDING, &desc->flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
if (status) {
|
|
|
|
switch (status) {
|
|
|
|
case -ENOENT:
|
|
|
|
dev_dbg(&desc->intf->dev,
|
2016-08-16 20:12:22 +07:00
|
|
|
"nonzero urb status received: -ENOENT\n");
|
2010-02-28 02:54:59 +07:00
|
|
|
goto skip_error;
|
2008-05-13 22:01:25 +07:00
|
|
|
case -ECONNRESET:
|
|
|
|
dev_dbg(&desc->intf->dev,
|
2016-08-16 20:12:22 +07:00
|
|
|
"nonzero urb status received: -ECONNRESET\n");
|
2010-02-28 02:54:59 +07:00
|
|
|
goto skip_error;
|
2008-05-13 22:01:25 +07:00
|
|
|
case -ESHUTDOWN:
|
|
|
|
dev_dbg(&desc->intf->dev,
|
2016-08-16 20:12:22 +07:00
|
|
|
"nonzero urb status received: -ESHUTDOWN\n");
|
2010-02-28 02:54:59 +07:00
|
|
|
goto skip_error;
|
2008-05-13 22:01:25 +07:00
|
|
|
case -EPIPE:
|
2017-04-21 15:01:29 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
2008-08-14 23:37:34 +07:00
|
|
|
"nonzero urb status received: -EPIPE\n");
|
2008-05-13 22:01:25 +07:00
|
|
|
break;
|
|
|
|
default:
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"Unexpected error %d\n", status);
|
2008-05-13 22:01:25 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
/*
|
|
|
|
* only set a new error if there is no previous error.
|
|
|
|
* Errors are only cleared during read/open
|
2017-09-23 03:18:18 +07:00
|
|
|
* Avoid propagating -EPIPE (stall) to userspace since it is
|
|
|
|
* better handled as an empty read
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
*/
|
2017-09-23 03:18:18 +07:00
|
|
|
if (desc->rerr == 0 && status != -EPIPE)
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
desc->rerr = status;
|
|
|
|
|
2013-03-12 20:52:42 +07:00
|
|
|
if (length + desc->length > desc->wMaxCommand) {
|
|
|
|
/* The buffer would overflow */
|
|
|
|
set_bit(WDM_OVERFLOW, &desc->flags);
|
|
|
|
} else {
|
|
|
|
/* we may already be in overflow */
|
|
|
|
if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
|
|
|
|
memmove(desc->ubuf + desc->length, desc->inbuf, length);
|
|
|
|
desc->length += length;
|
|
|
|
desc->reslength = length;
|
|
|
|
}
|
|
|
|
}
|
2010-02-28 02:54:59 +07:00
|
|
|
skip_error:
|
2008-05-13 22:01:25 +07:00
|
|
|
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
if (desc->rerr) {
|
|
|
|
/*
|
|
|
|
* Since there was an error, userspace may decide to not read
|
|
|
|
* any data after poll'ing.
|
|
|
|
* We should respond to further attempts from the device to send
|
|
|
|
* data, so that we can get unstuck.
|
|
|
|
*/
|
2018-06-14 23:36:46 +07:00
|
|
|
schedule_work(&desc->service_outs_intr);
|
|
|
|
} else {
|
|
|
|
set_bit(WDM_READ, &desc->flags);
|
|
|
|
wake_up(&desc->wait);
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
}
|
2018-06-25 05:08:35 +07:00
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void wdm_int_callback(struct urb *urb)
|
|
|
|
{
|
2018-06-25 05:08:35 +07:00
|
|
|
unsigned long flags;
|
2008-05-13 22:01:25 +07:00
|
|
|
int rv = 0;
|
2013-08-06 19:22:59 +07:00
|
|
|
int responding;
|
2008-05-13 22:01:25 +07:00
|
|
|
int status = urb->status;
|
|
|
|
struct wdm_device *desc;
|
|
|
|
struct usb_cdc_notification *dr;
|
|
|
|
|
|
|
|
desc = urb->context;
|
|
|
|
dr = (struct usb_cdc_notification *)desc->sbuf;
|
|
|
|
|
|
|
|
if (status) {
|
|
|
|
switch (status) {
|
|
|
|
case -ESHUTDOWN:
|
|
|
|
case -ENOENT:
|
|
|
|
case -ECONNRESET:
|
|
|
|
return; /* unplug */
|
|
|
|
case -EPIPE:
|
|
|
|
set_bit(WDM_INT_STALL, &desc->flags);
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev, "Stall on int endpoint\n");
|
2008-05-13 22:01:25 +07:00
|
|
|
goto sw; /* halt is cleared in work */
|
|
|
|
default:
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"nonzero urb status received: %d\n", status);
|
2008-05-13 22:01:25 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (urb->actual_length < sizeof(struct usb_cdc_notification)) {
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n",
|
|
|
|
urb->actual_length);
|
2008-05-13 22:01:25 +07:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (dr->bNotificationType) {
|
|
|
|
case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
|
|
|
|
dev_dbg(&desc->intf->dev,
|
2016-08-16 20:12:22 +07:00
|
|
|
"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d\n",
|
2015-03-20 20:29:34 +07:00
|
|
|
le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
|
2008-05-13 22:01:25 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
|
|
|
|
|
|
|
|
dev_dbg(&desc->intf->dev,
|
2016-08-16 20:12:22 +07:00
|
|
|
"NOTIFY_NETWORK_CONNECTION %s network\n",
|
2008-05-13 22:01:25 +07:00
|
|
|
dr->wValue ? "connected to" : "disconnected from");
|
|
|
|
goto exit;
|
2013-10-29 15:52:57 +07:00
|
|
|
case USB_CDC_NOTIFY_SPEED_CHANGE:
|
2016-08-16 20:12:22 +07:00
|
|
|
dev_dbg(&desc->intf->dev, "SPEED_CHANGE received (len %u)\n",
|
2013-10-29 15:52:57 +07:00
|
|
|
urb->actual_length);
|
|
|
|
goto exit;
|
2008-05-13 22:01:25 +07:00
|
|
|
default:
|
|
|
|
clear_bit(WDM_POLL_RUNNING, &desc->flags);
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"unknown notification %d received: index %d len %d\n",
|
2015-03-20 20:29:34 +07:00
|
|
|
dr->bNotificationType,
|
|
|
|
le16_to_cpu(dr->wIndex),
|
|
|
|
le16_to_cpu(dr->wLength));
|
2008-05-13 22:01:25 +07:00
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2018-06-25 05:08:35 +07:00
|
|
|
spin_lock_irqsave(&desc->iuspin, flags);
|
2013-08-06 19:22:59 +07:00
|
|
|
responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
|
2013-10-30 00:29:10 +07:00
|
|
|
if (!desc->resp_count++ && !responding
|
|
|
|
&& !test_bit(WDM_DISCONNECTING, &desc->flags)
|
2010-02-28 02:55:52 +07:00
|
|
|
&& !test_bit(WDM_SUSPENDING, &desc->flags)) {
|
2008-05-13 22:01:25 +07:00
|
|
|
rv = usb_submit_urb(desc->response, GFP_ATOMIC);
|
2016-08-16 20:12:22 +07:00
|
|
|
dev_dbg(&desc->intf->dev, "submit response URB %d\n", rv);
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
2018-06-25 05:08:35 +07:00
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
if (rv < 0) {
|
2010-02-28 02:54:59 +07:00
|
|
|
clear_bit(WDM_RESPONDING, &desc->flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
if (rv == -EPERM)
|
|
|
|
return;
|
|
|
|
if (rv == -ENOMEM) {
|
|
|
|
sw:
|
|
|
|
rv = schedule_work(&desc->rxwork);
|
|
|
|
if (rv)
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"Cannot schedule work\n");
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
exit:
|
|
|
|
rv = usb_submit_urb(urb, GFP_ATOMIC);
|
|
|
|
if (rv)
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"%s - usb_submit_urb failed with result %d\n",
|
|
|
|
__func__, rv);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kill_urbs(struct wdm_device *desc)
|
|
|
|
{
|
2008-06-24 20:56:10 +07:00
|
|
|
/* the order here is essential */
|
2008-05-13 22:01:25 +07:00
|
|
|
usb_kill_urb(desc->command);
|
|
|
|
usb_kill_urb(desc->validity);
|
|
|
|
usb_kill_urb(desc->response);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_urbs(struct wdm_device *desc)
|
|
|
|
{
|
|
|
|
usb_free_urb(desc->validity);
|
|
|
|
usb_free_urb(desc->response);
|
|
|
|
usb_free_urb(desc->command);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cleanup(struct wdm_device *desc)
|
|
|
|
{
|
2012-01-16 21:12:00 +07:00
|
|
|
kfree(desc->sbuf);
|
|
|
|
kfree(desc->inbuf);
|
2008-05-13 22:01:25 +07:00
|
|
|
kfree(desc->orq);
|
|
|
|
kfree(desc->irq);
|
|
|
|
kfree(desc->ubuf);
|
|
|
|
free_urbs(desc);
|
|
|
|
kfree(desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t wdm_write
|
|
|
|
(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
u8 *buf;
|
|
|
|
int rv = -EMSGSIZE, r, we;
|
|
|
|
struct wdm_device *desc = file->private_data;
|
|
|
|
struct usb_ctrlrequest *req;
|
|
|
|
|
|
|
|
if (count > desc->wMaxCommand)
|
|
|
|
count = desc->wMaxCommand;
|
|
|
|
|
|
|
|
spin_lock_irq(&desc->iuspin);
|
|
|
|
we = desc->werr;
|
|
|
|
desc->werr = 0;
|
|
|
|
spin_unlock_irq(&desc->iuspin);
|
|
|
|
if (we < 0)
|
2015-03-20 20:28:56 +07:00
|
|
|
return usb_translate_errors(we);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
2017-05-08 22:14:39 +07:00
|
|
|
buf = memdup_user(buffer, count);
|
|
|
|
if (IS_ERR(buf))
|
|
|
|
return PTR_ERR(buf);
|
2010-02-28 02:54:24 +07:00
|
|
|
|
|
|
|
/* concurrent writes and disconnect */
|
2012-01-16 18:41:48 +07:00
|
|
|
r = mutex_lock_interruptible(&desc->wlock);
|
2008-05-13 22:01:25 +07:00
|
|
|
rv = -ERESTARTSYS;
|
2015-03-20 20:29:18 +07:00
|
|
|
if (r)
|
|
|
|
goto out_free_mem;
|
2010-02-28 02:54:24 +07:00
|
|
|
|
|
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
|
|
|
|
rv = -ENODEV;
|
2015-03-20 20:29:18 +07:00
|
|
|
goto out_free_mem_lock;
|
2010-02-28 02:54:24 +07:00
|
|
|
}
|
2008-05-13 22:01:25 +07:00
|
|
|
|
2008-06-24 20:56:10 +07:00
|
|
|
r = usb_autopm_get_interface(desc->intf);
|
2010-02-28 02:54:24 +07:00
|
|
|
if (r < 0) {
|
2012-04-30 14:57:31 +07:00
|
|
|
rv = usb_translate_errors(r);
|
2015-03-20 20:29:18 +07:00
|
|
|
goto out_free_mem_lock;
|
2010-02-28 02:54:24 +07:00
|
|
|
}
|
2009-09-09 15:12:48 +07:00
|
|
|
|
2010-12-28 00:49:58 +07:00
|
|
|
if (!(file->f_flags & O_NONBLOCK))
|
2009-09-09 15:12:48 +07:00
|
|
|
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
|
|
|
|
&desc->flags));
|
|
|
|
else
|
|
|
|
if (test_bit(WDM_IN_USE, &desc->flags))
|
|
|
|
r = -EAGAIN;
|
2012-02-10 15:44:08 +07:00
|
|
|
|
|
|
|
if (test_bit(WDM_RESETTING, &desc->flags))
|
|
|
|
r = -EIO;
|
|
|
|
|
2010-02-28 02:54:24 +07:00
|
|
|
if (r < 0) {
|
2012-04-30 14:57:31 +07:00
|
|
|
rv = r;
|
2015-03-20 20:29:18 +07:00
|
|
|
goto out_free_mem_pm;
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
req = desc->orq;
|
|
|
|
usb_fill_control_urb(
|
|
|
|
desc->command,
|
|
|
|
interface_to_usbdev(desc->intf),
|
|
|
|
/* using common endpoint 0 */
|
|
|
|
usb_sndctrlpipe(interface_to_usbdev(desc->intf), 0),
|
|
|
|
(unsigned char *)req,
|
|
|
|
buf,
|
|
|
|
count,
|
|
|
|
wdm_out_callback,
|
|
|
|
desc
|
|
|
|
);
|
|
|
|
|
|
|
|
req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS |
|
|
|
|
USB_RECIP_INTERFACE);
|
|
|
|
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
|
|
|
|
req->wValue = 0;
|
2015-03-20 20:29:34 +07:00
|
|
|
req->wIndex = desc->inum; /* already converted */
|
2008-05-13 22:01:25 +07:00
|
|
|
req->wLength = cpu_to_le16(count);
|
|
|
|
set_bit(WDM_IN_USE, &desc->flags);
|
2012-04-27 02:59:10 +07:00
|
|
|
desc->outbuf = buf;
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
rv = usb_submit_urb(desc->command, GFP_KERNEL);
|
|
|
|
if (rv < 0) {
|
2012-04-27 02:59:10 +07:00
|
|
|
desc->outbuf = NULL;
|
2008-05-13 22:01:25 +07:00
|
|
|
clear_bit(WDM_IN_USE, &desc->flags);
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv);
|
2012-04-30 14:57:31 +07:00
|
|
|
rv = usb_translate_errors(rv);
|
2015-03-20 20:29:18 +07:00
|
|
|
goto out_free_mem_pm;
|
2008-05-13 22:01:25 +07:00
|
|
|
} else {
|
2016-08-16 20:12:22 +07:00
|
|
|
dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d\n",
|
2015-03-20 20:29:34 +07:00
|
|
|
le16_to_cpu(req->wIndex));
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
2015-03-20 20:29:18 +07:00
|
|
|
|
2008-06-24 20:56:10 +07:00
|
|
|
usb_autopm_put_interface(desc->intf);
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_unlock(&desc->wlock);
|
2017-05-08 22:14:39 +07:00
|
|
|
return count;
|
2015-03-20 20:29:18 +07:00
|
|
|
|
|
|
|
out_free_mem_pm:
|
|
|
|
usb_autopm_put_interface(desc->intf);
|
|
|
|
out_free_mem_lock:
|
|
|
|
mutex_unlock(&desc->wlock);
|
|
|
|
out_free_mem:
|
|
|
|
kfree(buf);
|
|
|
|
return rv;
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
|
|
|
|
2013-12-20 20:07:24 +07:00
|
|
|
/*
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
* Submit the read urb if resp_count is non-zero.
|
2013-12-20 20:07:24 +07:00
|
|
|
*
|
|
|
|
* Called with desc->iuspin locked
|
|
|
|
*/
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
static int service_outstanding_interrupt(struct wdm_device *desc)
|
2013-12-20 20:07:24 +07:00
|
|
|
{
|
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
/* submit read urb only if the device is waiting for it */
|
2014-01-13 03:48:53 +07:00
|
|
|
if (!desc->resp_count || !--desc->resp_count)
|
2013-12-20 20:07:24 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
set_bit(WDM_RESPONDING, &desc->flags);
|
|
|
|
spin_unlock_irq(&desc->iuspin);
|
2018-09-11 15:00:44 +07:00
|
|
|
rv = usb_submit_urb(desc->response, GFP_KERNEL);
|
2013-12-20 20:07:24 +07:00
|
|
|
spin_lock_irq(&desc->iuspin);
|
|
|
|
if (rv) {
|
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"usb_submit_urb failed with result %d\n", rv);
|
|
|
|
|
|
|
|
/* make sure the next notification trigger a submit */
|
|
|
|
clear_bit(WDM_RESPONDING, &desc->flags);
|
|
|
|
desc->resp_count = 0;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
static ssize_t wdm_read
|
|
|
|
(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
|
|
|
|
{
|
2012-02-12 13:00:41 +07:00
|
|
|
int rv, cntr;
|
2008-05-13 22:01:25 +07:00
|
|
|
int i = 0;
|
|
|
|
struct wdm_device *desc = file->private_data;
|
|
|
|
|
|
|
|
|
2012-01-16 18:41:48 +07:00
|
|
|
rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
|
2008-05-13 22:01:25 +07:00
|
|
|
if (rv < 0)
|
|
|
|
return -ERESTARTSYS;
|
|
|
|
|
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()
Please do not apply this to mainline directly, instead please re-run the
coccinelle script shown below and apply its output.
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't harmful, and changing them results in
churn.
However, for some features, the read/write distinction is critical to
correct operation. To distinguish these cases, separate read/write
accessors must be used. This patch migrates (most) remaining
ACCESS_ONCE() instances to {READ,WRITE}_ONCE(), using the following
coccinelle script:
----
// Convert trivial ACCESS_ONCE() uses to equivalent READ_ONCE() and
// WRITE_ONCE()
// $ make coccicheck COCCI=/home/mark/once.cocci SPFLAGS="--include-headers" MODE=patch
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-19-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 04:07:29 +07:00
|
|
|
cntr = READ_ONCE(desc->length);
|
2012-02-12 13:00:41 +07:00
|
|
|
if (cntr == 0) {
|
2008-05-13 22:01:25 +07:00
|
|
|
desc->read = 0;
|
|
|
|
retry:
|
2009-09-09 15:12:48 +07:00
|
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
|
|
|
|
rv = -ENODEV;
|
|
|
|
goto err;
|
|
|
|
}
|
2013-03-12 20:52:42 +07:00
|
|
|
if (test_bit(WDM_OVERFLOW, &desc->flags)) {
|
|
|
|
clear_bit(WDM_OVERFLOW, &desc->flags);
|
|
|
|
rv = -ENOBUFS;
|
|
|
|
goto err;
|
|
|
|
}
|
2008-05-13 22:01:25 +07:00
|
|
|
i++;
|
2009-09-09 15:12:48 +07:00
|
|
|
if (file->f_flags & O_NONBLOCK) {
|
|
|
|
if (!test_bit(WDM_READ, &desc->flags)) {
|
2017-02-15 10:10:52 +07:00
|
|
|
rv = -EAGAIN;
|
2009-09-09 15:12:48 +07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
rv = 0;
|
|
|
|
} else {
|
|
|
|
rv = wait_event_interruptible(desc->wait,
|
|
|
|
test_bit(WDM_READ, &desc->flags));
|
|
|
|
}
|
2008-05-13 22:01:25 +07:00
|
|
|
|
2009-09-09 15:12:48 +07:00
|
|
|
/* may have happened while we slept */
|
2008-06-24 20:56:10 +07:00
|
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
|
|
|
|
rv = -ENODEV;
|
|
|
|
goto err;
|
|
|
|
}
|
2012-02-10 15:44:08 +07:00
|
|
|
if (test_bit(WDM_RESETTING, &desc->flags)) {
|
|
|
|
rv = -EIO;
|
|
|
|
goto err;
|
|
|
|
}
|
2008-06-24 20:56:10 +07:00
|
|
|
usb_mark_last_busy(interface_to_usbdev(desc->intf));
|
2008-05-13 22:01:25 +07:00
|
|
|
if (rv < 0) {
|
|
|
|
rv = -ERESTARTSYS;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(&desc->iuspin);
|
|
|
|
|
|
|
|
if (desc->rerr) { /* read completed, error happened */
|
2015-03-23 20:34:43 +07:00
|
|
|
rv = usb_translate_errors(desc->rerr);
|
2008-05-13 22:01:25 +07:00
|
|
|
desc->rerr = 0;
|
|
|
|
spin_unlock_irq(&desc->iuspin);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* recheck whether we've lost the race
|
|
|
|
* against the completion handler
|
|
|
|
*/
|
|
|
|
if (!test_bit(WDM_READ, &desc->flags)) { /* lost race */
|
|
|
|
spin_unlock_irq(&desc->iuspin);
|
|
|
|
goto retry;
|
|
|
|
}
|
2013-03-12 20:52:42 +07:00
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
if (!desc->reslength) { /* zero length read */
|
2016-08-16 20:12:22 +07:00
|
|
|
dev_dbg(&desc->intf->dev, "zero length - clearing WDM_READ\n");
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
clear_bit(WDM_READ, &desc->flags);
|
|
|
|
rv = service_outstanding_interrupt(desc);
|
2008-05-13 22:01:25 +07:00
|
|
|
spin_unlock_irq(&desc->iuspin);
|
2013-12-20 20:07:24 +07:00
|
|
|
if (rv < 0)
|
|
|
|
goto err;
|
2008-05-13 22:01:25 +07:00
|
|
|
goto retry;
|
|
|
|
}
|
2012-02-12 13:00:41 +07:00
|
|
|
cntr = desc->length;
|
2008-05-13 22:01:25 +07:00
|
|
|
spin_unlock_irq(&desc->iuspin);
|
|
|
|
}
|
|
|
|
|
2012-02-12 13:00:41 +07:00
|
|
|
if (cntr > count)
|
|
|
|
cntr = count;
|
2008-05-13 22:01:25 +07:00
|
|
|
rv = copy_to_user(buffer, desc->ubuf, cntr);
|
|
|
|
if (rv > 0) {
|
|
|
|
rv = -EFAULT;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2012-02-12 13:00:41 +07:00
|
|
|
spin_lock_irq(&desc->iuspin);
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
for (i = 0; i < desc->length - cntr; i++)
|
|
|
|
desc->ubuf[i] = desc->ubuf[i + cntr];
|
|
|
|
|
|
|
|
desc->length -= cntr;
|
2008-06-19 19:20:18 +07:00
|
|
|
/* in case we had outstanding data */
|
cdc-wdm: Clear read pipeline in case of error
Implemented queued response handling. This queue is processed every time the
WDM_READ flag is cleared.
In case of a read error, userspace may not actually read the data, since the
driver returns an error through wdm_poll. After this, the underlying device may
attempt to send us more data, but the queue is not processed. While userspace is
also blocked, because the read error is never cleared.
After this patch, we proactively process the queue on a read error. If there was
an outstanding response to handle, that will clear the error (or go through the
same logic again, if another read error occurs). If there was no outstanding
response, this will bring the queue size back to 0, unblocking a future response
from the underlying device.
Signed-off-by: Robert Foss <robert.foss@collabora.com>
Tested-by: Robert Foss <robert.foss@collabora.com>
Acked-by: Oliver Neukum <oneukum@suse.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2016-08-09 21:54:52 +07:00
|
|
|
if (!desc->length) {
|
|
|
|
clear_bit(WDM_READ, &desc->flags);
|
|
|
|
service_outstanding_interrupt(desc);
|
|
|
|
}
|
2013-12-20 20:07:24 +07:00
|
|
|
spin_unlock_irq(&desc->iuspin);
|
2008-05-13 22:01:25 +07:00
|
|
|
rv = cntr;
|
|
|
|
|
|
|
|
err:
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_unlock(&desc->rlock);
|
2008-05-13 22:01:25 +07:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wdm_flush(struct file *file, fl_owner_t id)
|
|
|
|
{
|
|
|
|
struct wdm_device *desc = file->private_data;
|
|
|
|
|
2019-08-27 17:34:36 +07:00
|
|
|
wait_event(desc->wait,
|
|
|
|
/*
|
|
|
|
* needs both flags. We cannot do with one
|
|
|
|
* because resetting it would cause a race
|
|
|
|
* with write() yet we need to signal
|
|
|
|
* a disconnect
|
|
|
|
*/
|
|
|
|
!test_bit(WDM_IN_USE, &desc->flags) ||
|
|
|
|
test_bit(WDM_DISCONNECTING, &desc->flags));
|
2012-05-09 18:53:22 +07:00
|
|
|
|
|
|
|
/* cannot dereference desc->intf if WDM_DISCONNECTING */
|
2019-08-27 17:34:36 +07:00
|
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags))
|
|
|
|
return -ENODEV;
|
|
|
|
if (desc->werr < 0)
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev, "Error in flush path: %d\n",
|
|
|
|
desc->werr);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
2012-04-27 19:23:54 +07:00
|
|
|
return usb_translate_errors(desc->werr);
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
|
|
|
|
2017-07-03 17:39:46 +07:00
|
|
|
static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait)
|
2008-05-13 22:01:25 +07:00
|
|
|
{
|
|
|
|
struct wdm_device *desc = file->private_data;
|
|
|
|
unsigned long flags;
|
2017-07-03 17:39:46 +07:00
|
|
|
__poll_t mask = 0;
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&desc->iuspin, flags);
|
|
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
|
2018-02-12 05:34:03 +07:00
|
|
|
mask = EPOLLHUP | EPOLLERR;
|
2008-05-13 22:01:25 +07:00
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
|
|
|
goto desc_out;
|
|
|
|
}
|
|
|
|
if (test_bit(WDM_READ, &desc->flags))
|
2018-02-12 05:34:03 +07:00
|
|
|
mask = EPOLLIN | EPOLLRDNORM;
|
2008-05-13 22:01:25 +07:00
|
|
|
if (desc->rerr || desc->werr)
|
2018-02-12 05:34:03 +07:00
|
|
|
mask |= EPOLLERR;
|
2008-05-13 22:01:25 +07:00
|
|
|
if (!test_bit(WDM_IN_USE, &desc->flags))
|
2018-02-12 05:34:03 +07:00
|
|
|
mask |= EPOLLOUT | EPOLLWRNORM;
|
2008-05-13 22:01:25 +07:00
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
|
|
|
|
|
|
|
poll_wait(file, &desc->wait, wait);
|
|
|
|
|
|
|
|
desc_out:
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wdm_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
int minor = iminor(inode);
|
|
|
|
int rv = -ENODEV;
|
|
|
|
struct usb_interface *intf;
|
|
|
|
struct wdm_device *desc;
|
|
|
|
|
|
|
|
mutex_lock(&wdm_mutex);
|
2012-03-06 23:29:21 +07:00
|
|
|
desc = wdm_find_device_by_minor(minor);
|
|
|
|
if (!desc)
|
2008-05-13 22:01:25 +07:00
|
|
|
goto out;
|
|
|
|
|
2012-03-06 23:29:21 +07:00
|
|
|
intf = desc->intf;
|
2008-05-13 22:01:25 +07:00
|
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags))
|
|
|
|
goto out;
|
|
|
|
file->private_data = desc;
|
|
|
|
|
2008-06-24 20:56:10 +07:00
|
|
|
rv = usb_autopm_get_interface(desc->intf);
|
2008-05-13 22:01:25 +07:00
|
|
|
if (rv < 0) {
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev, "Error autopm - %d\n", rv);
|
2008-05-13 22:01:25 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2012-01-16 18:41:48 +07:00
|
|
|
/* using write lock to protect desc->count */
|
|
|
|
mutex_lock(&desc->wlock);
|
2008-06-24 20:56:10 +07:00
|
|
|
if (!desc->count++) {
|
2011-04-29 19:12:21 +07:00
|
|
|
desc->werr = 0;
|
|
|
|
desc->rerr = 0;
|
2008-06-24 20:56:10 +07:00
|
|
|
rv = usb_submit_urb(desc->validity, GFP_KERNEL);
|
|
|
|
if (rv < 0) {
|
|
|
|
desc->count--;
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"Error submitting int urb - %d\n", rv);
|
2012-04-30 14:57:31 +07:00
|
|
|
rv = usb_translate_errors(rv);
|
2008-06-24 20:56:10 +07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rv = 0;
|
|
|
|
}
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_unlock(&desc->wlock);
|
2012-03-06 23:29:22 +07:00
|
|
|
if (desc->count == 1)
|
|
|
|
desc->manage_power(intf, 1);
|
2008-06-24 20:56:10 +07:00
|
|
|
usb_autopm_put_interface(desc->intf);
|
2008-05-13 22:01:25 +07:00
|
|
|
out:
|
|
|
|
mutex_unlock(&wdm_mutex);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wdm_release(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
struct wdm_device *desc = file->private_data;
|
|
|
|
|
|
|
|
mutex_lock(&wdm_mutex);
|
2012-01-16 18:41:48 +07:00
|
|
|
|
|
|
|
/* using write lock to protect desc->count */
|
|
|
|
mutex_lock(&desc->wlock);
|
2008-05-13 22:01:25 +07:00
|
|
|
desc->count--;
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_unlock(&desc->wlock);
|
2008-06-24 20:56:10 +07:00
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
if (!desc->count) {
|
2012-04-30 14:26:11 +07:00
|
|
|
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
|
2016-08-16 20:12:22 +07:00
|
|
|
dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
|
2012-05-09 18:53:22 +07:00
|
|
|
kill_urbs(desc);
|
2013-10-30 00:29:10 +07:00
|
|
|
spin_lock_irq(&desc->iuspin);
|
|
|
|
desc->resp_count = 0;
|
|
|
|
spin_unlock_irq(&desc->iuspin);
|
2012-03-06 23:29:22 +07:00
|
|
|
desc->manage_power(desc->intf, 0);
|
2012-04-30 14:26:11 +07:00
|
|
|
} else {
|
2012-05-09 18:53:22 +07:00
|
|
|
/* must avoid dev_printk here as desc->intf is invalid */
|
|
|
|
pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
|
2012-04-27 19:36:37 +07:00
|
|
|
cleanup(desc);
|
2012-04-30 14:26:11 +07:00
|
|
|
}
|
2008-05-13 22:01:25 +07:00
|
|
|
}
|
|
|
|
mutex_unlock(&wdm_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
USB: cdc-wdm: implement IOCTL_WDM_MAX_COMMAND
Userspace applications need to know the maximum supported message
size.
The cdc-wdm driver translates between a character device stream
and a message based protocol. Each message is transported as a
usb control message with no further encapsulation or syncronization.
Each read or write on the character device should translate to
exactly one usb control message to ensure that message boundaries
are kept intact. That means that the userspace application must
know the maximum message size supported by the device and driver,
making this size a vital part of the cdc-wdm character device API.
CDC WDM and CDC MBIM functions export the maximum supported
message size through CDC functional descriptors. The cdc-wdm and
cdc_mbim drivers will parse these descriptors and use the value
chosen by the device. The only current way for a userspace
application to retrive the value is by duplicating the descriptor
parsing. This is an unnecessary complex task, and application
writers are likely to postpone it, using a fixed value and adding
a "todo" item.
QMI functions have no way to tell the host what message size they
support. The qmi_wwan driver use a fixed value based on protocol
recommendations and observed device behaviour. Userspace
applications must know and hard code the same value. This scheme
will break if we ever encounter a QMI device needing a device
specific message size quirk. We are currently unable to support
such a device because using a non default size would break the
implicit userspace API.
The message size is currently a hidden attribute of the cdc-wdm
userspace API. Retrieving it is unnecessarily complex, increasing
the possibility of drivers and applications using different limits.
The resulting errors are hard to debug, and can only be replicated
on identical hardware.
Exporting the maximum message size from the driver simplifies the
task for the userspace application, and creates a unified
information source independent of device and function class. It also
serves to document that the message size is part of the cdc-wdm
userspace API.
This proposed API extension has been presented for the authors of
userspace applications and libraries using the current API: libmbim,
libqmi, uqmi, oFono and ModemManager. The replies were:
Aleksander Morgado:
"We do really need max message size for MBIM; and as you say, it may be
good to have the max message size info also for QMI, so the new ioctl
seems a good addition. So +1 from my side, for what it's worth."
Dan Williams:
"Yeah, +1 here. I'd prefer the sysfs file, but the fact that that
doesn't work for fd passing pretty much kills it."
No negative replies are so far received.
Cc: Aleksander Morgado <aleksander@lanedo.com>
Cc: Dan Williams <dcbw@redhat.com>
Signed-off-by: Bjørn Mork <bjorn@mork.no>
Acked-by: Oliver Neukum <oliver@neukum.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-03-18 03:00:06 +07:00
|
|
|
static long wdm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
|
|
|
struct wdm_device *desc = file->private_data;
|
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case IOCTL_WDM_MAX_COMMAND:
|
|
|
|
if (copy_to_user((void __user *)arg, &desc->wMaxCommand, sizeof(desc->wMaxCommand)))
|
|
|
|
rv = -EFAULT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
rv = -ENOTTY;
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
static const struct file_operations wdm_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.read = wdm_read,
|
|
|
|
.write = wdm_write,
|
|
|
|
.open = wdm_open,
|
|
|
|
.flush = wdm_flush,
|
|
|
|
.release = wdm_release,
|
llseek: automatically add .llseek fop
All file_operations should get a .llseek operation so we can make
nonseekable_open the default for future file operations without a
.llseek pointer.
The three cases that we can automatically detect are no_llseek, seq_lseek
and default_llseek. For cases where we can we can automatically prove that
the file offset is always ignored, we use noop_llseek, which maintains
the current behavior of not returning an error from a seek.
New drivers should normally not use noop_llseek but instead use no_llseek
and call nonseekable_open at open time. Existing drivers can be converted
to do the same when the maintainer knows for certain that no user code
relies on calling seek on the device file.
The generated code is often incorrectly indented and right now contains
comments that clarify for each added line why a specific variant was
chosen. In the version that gets submitted upstream, the comments will
be gone and I will manually fix the indentation, because there does not
seem to be a way to do that using coccinelle.
Some amount of new code is currently sitting in linux-next that should get
the same modifications, which I will do at the end of the merge window.
Many thanks to Julia Lawall for helping me learn to write a semantic
patch that does all this.
===== begin semantic patch =====
// This adds an llseek= method to all file operations,
// as a preparation for making no_llseek the default.
//
// The rules are
// - use no_llseek explicitly if we do nonseekable_open
// - use seq_lseek for sequential files
// - use default_llseek if we know we access f_pos
// - use noop_llseek if we know we don't access f_pos,
// but we still want to allow users to call lseek
//
@ open1 exists @
identifier nested_open;
@@
nested_open(...)
{
<+...
nonseekable_open(...)
...+>
}
@ open exists@
identifier open_f;
identifier i, f;
identifier open1.nested_open;
@@
int open_f(struct inode *i, struct file *f)
{
<+...
(
nonseekable_open(...)
|
nested_open(...)
)
...+>
}
@ read disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ read_no_fpos disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
... when != off
}
@ write @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ write_no_fpos @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
... when != off
}
@ fops0 @
identifier fops;
@@
struct file_operations fops = {
...
};
@ has_llseek depends on fops0 @
identifier fops0.fops;
identifier llseek_f;
@@
struct file_operations fops = {
...
.llseek = llseek_f,
...
};
@ has_read depends on fops0 @
identifier fops0.fops;
identifier read_f;
@@
struct file_operations fops = {
...
.read = read_f,
...
};
@ has_write depends on fops0 @
identifier fops0.fops;
identifier write_f;
@@
struct file_operations fops = {
...
.write = write_f,
...
};
@ has_open depends on fops0 @
identifier fops0.fops;
identifier open_f;
@@
struct file_operations fops = {
...
.open = open_f,
...
};
// use no_llseek if we call nonseekable_open
////////////////////////////////////////////
@ nonseekable1 depends on !has_llseek && has_open @
identifier fops0.fops;
identifier nso ~= "nonseekable_open";
@@
struct file_operations fops = {
... .open = nso, ...
+.llseek = no_llseek, /* nonseekable */
};
@ nonseekable2 depends on !has_llseek @
identifier fops0.fops;
identifier open.open_f;
@@
struct file_operations fops = {
... .open = open_f, ...
+.llseek = no_llseek, /* open uses nonseekable */
};
// use seq_lseek for sequential files
/////////////////////////////////////
@ seq depends on !has_llseek @
identifier fops0.fops;
identifier sr ~= "seq_read";
@@
struct file_operations fops = {
... .read = sr, ...
+.llseek = seq_lseek, /* we have seq_read */
};
// use default_llseek if there is a readdir
///////////////////////////////////////////
@ fops1 depends on !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier readdir_e;
@@
// any other fop is used that changes pos
struct file_operations fops = {
... .readdir = readdir_e, ...
+.llseek = default_llseek, /* readdir is present */
};
// use default_llseek if at least one of read/write touches f_pos
/////////////////////////////////////////////////////////////////
@ fops2 depends on !fops1 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read.read_f;
@@
// read fops use offset
struct file_operations fops = {
... .read = read_f, ...
+.llseek = default_llseek, /* read accesses f_pos */
};
@ fops3 depends on !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write.write_f;
@@
// write fops use offset
struct file_operations fops = {
... .write = write_f, ...
+ .llseek = default_llseek, /* write accesses f_pos */
};
// Use noop_llseek if neither read nor write accesses f_pos
///////////////////////////////////////////////////////////
@ fops4 depends on !fops1 && !fops2 && !fops3 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
identifier write_no_fpos.write_f;
@@
// write fops use offset
struct file_operations fops = {
...
.write = write_f,
.read = read_f,
...
+.llseek = noop_llseek, /* read and write both use no f_pos */
};
@ depends on has_write && !has_read && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write_no_fpos.write_f;
@@
struct file_operations fops = {
... .write = write_f, ...
+.llseek = noop_llseek, /* write uses no f_pos */
};
@ depends on has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
@@
struct file_operations fops = {
... .read = read_f, ...
+.llseek = noop_llseek, /* read uses no f_pos */
};
@ depends on !has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
@@
struct file_operations fops = {
...
+.llseek = noop_llseek, /* no read or write fn */
};
===== End semantic patch =====
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Julia Lawall <julia@diku.dk>
Cc: Christoph Hellwig <hch@infradead.org>
2010-08-15 23:52:59 +07:00
|
|
|
.poll = wdm_poll,
|
USB: cdc-wdm: implement IOCTL_WDM_MAX_COMMAND
Userspace applications need to know the maximum supported message
size.
The cdc-wdm driver translates between a character device stream
and a message based protocol. Each message is transported as a
usb control message with no further encapsulation or syncronization.
Each read or write on the character device should translate to
exactly one usb control message to ensure that message boundaries
are kept intact. That means that the userspace application must
know the maximum message size supported by the device and driver,
making this size a vital part of the cdc-wdm character device API.
CDC WDM and CDC MBIM functions export the maximum supported
message size through CDC functional descriptors. The cdc-wdm and
cdc_mbim drivers will parse these descriptors and use the value
chosen by the device. The only current way for a userspace
application to retrive the value is by duplicating the descriptor
parsing. This is an unnecessary complex task, and application
writers are likely to postpone it, using a fixed value and adding
a "todo" item.
QMI functions have no way to tell the host what message size they
support. The qmi_wwan driver use a fixed value based on protocol
recommendations and observed device behaviour. Userspace
applications must know and hard code the same value. This scheme
will break if we ever encounter a QMI device needing a device
specific message size quirk. We are currently unable to support
such a device because using a non default size would break the
implicit userspace API.
The message size is currently a hidden attribute of the cdc-wdm
userspace API. Retrieving it is unnecessarily complex, increasing
the possibility of drivers and applications using different limits.
The resulting errors are hard to debug, and can only be replicated
on identical hardware.
Exporting the maximum message size from the driver simplifies the
task for the userspace application, and creates a unified
information source independent of device and function class. It also
serves to document that the message size is part of the cdc-wdm
userspace API.
This proposed API extension has been presented for the authors of
userspace applications and libraries using the current API: libmbim,
libqmi, uqmi, oFono and ModemManager. The replies were:
Aleksander Morgado:
"We do really need max message size for MBIM; and as you say, it may be
good to have the max message size info also for QMI, so the new ioctl
seems a good addition. So +1 from my side, for what it's worth."
Dan Williams:
"Yeah, +1 here. I'd prefer the sysfs file, but the fact that that
doesn't work for fd passing pretty much kills it."
No negative replies are so far received.
Cc: Aleksander Morgado <aleksander@lanedo.com>
Cc: Dan Williams <dcbw@redhat.com>
Signed-off-by: Bjørn Mork <bjorn@mork.no>
Acked-by: Oliver Neukum <oliver@neukum.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-03-18 03:00:06 +07:00
|
|
|
.unlocked_ioctl = wdm_ioctl,
|
2018-09-12 02:59:08 +07:00
|
|
|
.compat_ioctl = compat_ptr_ioctl,
|
llseek: automatically add .llseek fop
All file_operations should get a .llseek operation so we can make
nonseekable_open the default for future file operations without a
.llseek pointer.
The three cases that we can automatically detect are no_llseek, seq_lseek
and default_llseek. For cases where we can we can automatically prove that
the file offset is always ignored, we use noop_llseek, which maintains
the current behavior of not returning an error from a seek.
New drivers should normally not use noop_llseek but instead use no_llseek
and call nonseekable_open at open time. Existing drivers can be converted
to do the same when the maintainer knows for certain that no user code
relies on calling seek on the device file.
The generated code is often incorrectly indented and right now contains
comments that clarify for each added line why a specific variant was
chosen. In the version that gets submitted upstream, the comments will
be gone and I will manually fix the indentation, because there does not
seem to be a way to do that using coccinelle.
Some amount of new code is currently sitting in linux-next that should get
the same modifications, which I will do at the end of the merge window.
Many thanks to Julia Lawall for helping me learn to write a semantic
patch that does all this.
===== begin semantic patch =====
// This adds an llseek= method to all file operations,
// as a preparation for making no_llseek the default.
//
// The rules are
// - use no_llseek explicitly if we do nonseekable_open
// - use seq_lseek for sequential files
// - use default_llseek if we know we access f_pos
// - use noop_llseek if we know we don't access f_pos,
// but we still want to allow users to call lseek
//
@ open1 exists @
identifier nested_open;
@@
nested_open(...)
{
<+...
nonseekable_open(...)
...+>
}
@ open exists@
identifier open_f;
identifier i, f;
identifier open1.nested_open;
@@
int open_f(struct inode *i, struct file *f)
{
<+...
(
nonseekable_open(...)
|
nested_open(...)
)
...+>
}
@ read disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ read_no_fpos disable optional_qualifier exists @
identifier read_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t read_f(struct file *f, char *p, size_t s, loff_t *off)
{
... when != off
}
@ write @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
expression E;
identifier func;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
<+...
(
*off = E
|
*off += E
|
func(..., off, ...)
|
E = *off
)
...+>
}
@ write_no_fpos @
identifier write_f;
identifier f, p, s, off;
type ssize_t, size_t, loff_t;
@@
ssize_t write_f(struct file *f, const char *p, size_t s, loff_t *off)
{
... when != off
}
@ fops0 @
identifier fops;
@@
struct file_operations fops = {
...
};
@ has_llseek depends on fops0 @
identifier fops0.fops;
identifier llseek_f;
@@
struct file_operations fops = {
...
.llseek = llseek_f,
...
};
@ has_read depends on fops0 @
identifier fops0.fops;
identifier read_f;
@@
struct file_operations fops = {
...
.read = read_f,
...
};
@ has_write depends on fops0 @
identifier fops0.fops;
identifier write_f;
@@
struct file_operations fops = {
...
.write = write_f,
...
};
@ has_open depends on fops0 @
identifier fops0.fops;
identifier open_f;
@@
struct file_operations fops = {
...
.open = open_f,
...
};
// use no_llseek if we call nonseekable_open
////////////////////////////////////////////
@ nonseekable1 depends on !has_llseek && has_open @
identifier fops0.fops;
identifier nso ~= "nonseekable_open";
@@
struct file_operations fops = {
... .open = nso, ...
+.llseek = no_llseek, /* nonseekable */
};
@ nonseekable2 depends on !has_llseek @
identifier fops0.fops;
identifier open.open_f;
@@
struct file_operations fops = {
... .open = open_f, ...
+.llseek = no_llseek, /* open uses nonseekable */
};
// use seq_lseek for sequential files
/////////////////////////////////////
@ seq depends on !has_llseek @
identifier fops0.fops;
identifier sr ~= "seq_read";
@@
struct file_operations fops = {
... .read = sr, ...
+.llseek = seq_lseek, /* we have seq_read */
};
// use default_llseek if there is a readdir
///////////////////////////////////////////
@ fops1 depends on !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier readdir_e;
@@
// any other fop is used that changes pos
struct file_operations fops = {
... .readdir = readdir_e, ...
+.llseek = default_llseek, /* readdir is present */
};
// use default_llseek if at least one of read/write touches f_pos
/////////////////////////////////////////////////////////////////
@ fops2 depends on !fops1 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read.read_f;
@@
// read fops use offset
struct file_operations fops = {
... .read = read_f, ...
+.llseek = default_llseek, /* read accesses f_pos */
};
@ fops3 depends on !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write.write_f;
@@
// write fops use offset
struct file_operations fops = {
... .write = write_f, ...
+ .llseek = default_llseek, /* write accesses f_pos */
};
// Use noop_llseek if neither read nor write accesses f_pos
///////////////////////////////////////////////////////////
@ fops4 depends on !fops1 && !fops2 && !fops3 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
identifier write_no_fpos.write_f;
@@
// write fops use offset
struct file_operations fops = {
...
.write = write_f,
.read = read_f,
...
+.llseek = noop_llseek, /* read and write both use no f_pos */
};
@ depends on has_write && !has_read && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier write_no_fpos.write_f;
@@
struct file_operations fops = {
... .write = write_f, ...
+.llseek = noop_llseek, /* write uses no f_pos */
};
@ depends on has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
identifier read_no_fpos.read_f;
@@
struct file_operations fops = {
... .read = read_f, ...
+.llseek = noop_llseek, /* read uses no f_pos */
};
@ depends on !has_read && !has_write && !fops1 && !fops2 && !has_llseek && !nonseekable1 && !nonseekable2 && !seq @
identifier fops0.fops;
@@
struct file_operations fops = {
...
+.llseek = noop_llseek, /* no read or write fn */
};
===== End semantic patch =====
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Julia Lawall <julia@diku.dk>
Cc: Christoph Hellwig <hch@infradead.org>
2010-08-15 23:52:59 +07:00
|
|
|
.llseek = noop_llseek,
|
2008-05-13 22:01:25 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct usb_class_driver wdm_class = {
|
|
|
|
.name = "cdc-wdm%d",
|
|
|
|
.fops = &wdm_fops,
|
|
|
|
.minor_base = WDM_MINOR_BASE,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* --- error handling --- */
|
|
|
|
static void wdm_rxwork(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
|
|
|
|
unsigned long flags;
|
2013-08-06 19:22:59 +07:00
|
|
|
int rv = 0;
|
|
|
|
int responding;
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&desc->iuspin, flags);
|
|
|
|
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
|
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
|
|
|
} else {
|
2013-08-06 19:22:59 +07:00
|
|
|
responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
2013-08-06 19:22:59 +07:00
|
|
|
if (!responding)
|
|
|
|
rv = usb_submit_urb(desc->response, GFP_KERNEL);
|
2008-05-13 22:01:25 +07:00
|
|
|
if (rv < 0 && rv != -EPERM) {
|
|
|
|
spin_lock_irqsave(&desc->iuspin, flags);
|
2013-08-06 19:22:59 +07:00
|
|
|
clear_bit(WDM_RESPONDING, &desc->flags);
|
2008-05-13 22:01:25 +07:00
|
|
|
if (!test_bit(WDM_DISCONNECTING, &desc->flags))
|
|
|
|
schedule_work(&desc->rxwork);
|
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-14 23:36:46 +07:00
|
|
|
static void service_interrupt_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct wdm_device *desc;
|
|
|
|
|
|
|
|
desc = container_of(work, struct wdm_device, service_outs_intr);
|
|
|
|
|
|
|
|
spin_lock_irq(&desc->iuspin);
|
|
|
|
service_outstanding_interrupt(desc);
|
|
|
|
if (!desc->resp_count) {
|
|
|
|
set_bit(WDM_READ, &desc->flags);
|
|
|
|
wake_up(&desc->wait);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&desc->iuspin);
|
|
|
|
}
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
/* --- hotplug --- */
|
|
|
|
|
2012-03-06 23:29:22 +07:00
|
|
|
static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor *ep,
|
2017-04-21 15:01:29 +07:00
|
|
|
u16 bufsize, int (*manage_power)(struct usb_interface *, int))
|
2008-05-13 22:01:25 +07:00
|
|
|
{
|
2012-03-06 23:29:20 +07:00
|
|
|
int rv = -ENOMEM;
|
2008-05-13 22:01:25 +07:00
|
|
|
struct wdm_device *desc;
|
|
|
|
|
|
|
|
desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
|
|
|
|
if (!desc)
|
|
|
|
goto out;
|
2012-03-06 23:29:21 +07:00
|
|
|
INIT_LIST_HEAD(&desc->device_list);
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_init(&desc->rlock);
|
|
|
|
mutex_init(&desc->wlock);
|
2008-05-13 22:01:25 +07:00
|
|
|
spin_lock_init(&desc->iuspin);
|
|
|
|
init_waitqueue_head(&desc->wait);
|
2012-03-06 23:29:20 +07:00
|
|
|
desc->wMaxCommand = bufsize;
|
2009-04-20 22:24:49 +07:00
|
|
|
/* this will be expanded and needed in hardware endianness */
|
2008-05-13 22:01:25 +07:00
|
|
|
desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber);
|
|
|
|
desc->intf = intf;
|
|
|
|
INIT_WORK(&desc->rxwork, wdm_rxwork);
|
2018-06-14 23:36:46 +07:00
|
|
|
INIT_WORK(&desc->service_outs_intr, service_interrupt_work);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
2009-04-20 22:24:49 +07:00
|
|
|
rv = -EINVAL;
|
2012-03-06 23:29:20 +07:00
|
|
|
if (!usb_endpoint_is_int_in(ep))
|
2008-05-13 22:01:25 +07:00
|
|
|
goto err;
|
|
|
|
|
2011-08-23 17:12:03 +07:00
|
|
|
desc->wMaxPacketSize = usb_endpoint_maxp(ep);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
desc->orq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
|
|
|
|
if (!desc->orq)
|
|
|
|
goto err;
|
|
|
|
desc->irq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
|
|
|
|
if (!desc->irq)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
desc->validity = usb_alloc_urb(0, GFP_KERNEL);
|
|
|
|
if (!desc->validity)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
desc->response = usb_alloc_urb(0, GFP_KERNEL);
|
|
|
|
if (!desc->response)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
desc->command = usb_alloc_urb(0, GFP_KERNEL);
|
|
|
|
if (!desc->command)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
desc->ubuf = kmalloc(desc->wMaxCommand, GFP_KERNEL);
|
|
|
|
if (!desc->ubuf)
|
|
|
|
goto err;
|
|
|
|
|
2012-01-16 21:12:00 +07:00
|
|
|
desc->sbuf = kmalloc(desc->wMaxPacketSize, GFP_KERNEL);
|
2008-05-13 22:01:25 +07:00
|
|
|
if (!desc->sbuf)
|
|
|
|
goto err;
|
|
|
|
|
2012-01-16 21:12:00 +07:00
|
|
|
desc->inbuf = kmalloc(desc->wMaxCommand, GFP_KERNEL);
|
2008-05-13 22:01:25 +07:00
|
|
|
if (!desc->inbuf)
|
2012-01-16 21:12:00 +07:00
|
|
|
goto err;
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
usb_fill_int_urb(
|
|
|
|
desc->validity,
|
|
|
|
interface_to_usbdev(intf),
|
|
|
|
usb_rcvintpipe(interface_to_usbdev(intf), ep->bEndpointAddress),
|
|
|
|
desc->sbuf,
|
|
|
|
desc->wMaxPacketSize,
|
|
|
|
wdm_int_callback,
|
|
|
|
desc,
|
|
|
|
ep->bInterval
|
|
|
|
);
|
|
|
|
|
2012-01-16 21:11:58 +07:00
|
|
|
desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
|
|
|
|
desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
|
|
|
|
desc->irq->wValue = 0;
|
2015-03-20 20:29:34 +07:00
|
|
|
desc->irq->wIndex = desc->inum; /* already converted */
|
2012-01-16 21:11:58 +07:00
|
|
|
desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
|
|
|
|
|
|
|
|
usb_fill_control_urb(
|
|
|
|
desc->response,
|
2012-01-16 21:12:01 +07:00
|
|
|
interface_to_usbdev(intf),
|
2012-01-16 21:11:58 +07:00
|
|
|
/* using common endpoint 0 */
|
|
|
|
usb_rcvctrlpipe(interface_to_usbdev(desc->intf), 0),
|
|
|
|
(unsigned char *)desc->irq,
|
|
|
|
desc->inbuf,
|
|
|
|
desc->wMaxCommand,
|
|
|
|
wdm_in_callback,
|
|
|
|
desc
|
|
|
|
);
|
|
|
|
|
2012-03-06 23:29:22 +07:00
|
|
|
desc->manage_power = manage_power;
|
|
|
|
|
2012-03-06 23:29:21 +07:00
|
|
|
spin_lock(&wdm_device_list_lock);
|
|
|
|
list_add(&desc->device_list, &wdm_device_list);
|
|
|
|
spin_unlock(&wdm_device_list_lock);
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
rv = usb_register_dev(intf, &wdm_class);
|
|
|
|
if (rv < 0)
|
2012-03-06 23:29:21 +07:00
|
|
|
goto err;
|
2009-04-20 22:24:49 +07:00
|
|
|
else
|
2012-01-20 10:17:25 +07:00
|
|
|
dev_info(&intf->dev, "%s: USB WDM device\n", dev_name(intf->usb_dev));
|
2008-05-13 22:01:25 +07:00
|
|
|
out:
|
|
|
|
return rv;
|
|
|
|
err:
|
2012-05-09 18:53:23 +07:00
|
|
|
spin_lock(&wdm_device_list_lock);
|
|
|
|
list_del(&desc->device_list);
|
|
|
|
spin_unlock(&wdm_device_list_lock);
|
2012-03-06 23:29:20 +07:00
|
|
|
cleanup(desc);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2012-03-06 23:29:22 +07:00
|
|
|
static int wdm_manage_power(struct usb_interface *intf, int on)
|
|
|
|
{
|
|
|
|
/* need autopm_get/put here to ensure the usbcore sees the new value */
|
|
|
|
int rv = usb_autopm_get_interface(intf);
|
|
|
|
|
|
|
|
intf->needs_remote_wakeup = on;
|
2013-11-30 02:17:45 +07:00
|
|
|
if (!rv)
|
|
|
|
usb_autopm_put_interface(intf);
|
|
|
|
return 0;
|
2012-03-06 23:29:22 +07:00
|
|
|
}
|
|
|
|
|
2012-03-06 23:29:20 +07:00
|
|
|
static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
|
|
|
{
|
|
|
|
int rv = -EINVAL;
|
|
|
|
struct usb_host_interface *iface;
|
|
|
|
struct usb_endpoint_descriptor *ep;
|
2016-07-14 20:41:33 +07:00
|
|
|
struct usb_cdc_parsed_header hdr;
|
2012-03-06 23:29:20 +07:00
|
|
|
u8 *buffer = intf->altsetting->extra;
|
|
|
|
int buflen = intf->altsetting->extralen;
|
|
|
|
u16 maxcom = WDM_DEFAULT_BUFSIZE;
|
|
|
|
|
|
|
|
if (!buffer)
|
|
|
|
goto err;
|
|
|
|
|
2016-07-14 20:41:33 +07:00
|
|
|
cdc_parse_cdc_header(&hdr, intf, buffer, buflen);
|
|
|
|
|
|
|
|
if (hdr.usb_cdc_dmm_desc)
|
|
|
|
maxcom = le16_to_cpu(hdr.usb_cdc_dmm_desc->wMaxCommand);
|
2012-03-06 23:29:20 +07:00
|
|
|
|
|
|
|
iface = intf->cur_altsetting;
|
|
|
|
if (iface->desc.bNumEndpoints != 1)
|
|
|
|
goto err;
|
|
|
|
ep = &iface->endpoint[0].desc;
|
|
|
|
|
2017-04-21 15:01:29 +07:00
|
|
|
rv = wdm_create(intf, ep, maxcom, &wdm_manage_power);
|
2012-03-06 23:29:20 +07:00
|
|
|
|
|
|
|
err:
|
2008-05-13 22:01:25 +07:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2012-03-06 23:29:22 +07:00
|
|
|
/**
|
|
|
|
* usb_cdc_wdm_register - register a WDM subdriver
|
|
|
|
* @intf: usb interface the subdriver will associate with
|
|
|
|
* @ep: interrupt endpoint to monitor for notifications
|
|
|
|
* @bufsize: maximum message size to support for read/write
|
|
|
|
*
|
|
|
|
* Create WDM usb class character device and associate it with intf
|
|
|
|
* without binding, allowing another driver to manage the interface.
|
|
|
|
*
|
|
|
|
* The subdriver will manage the given interrupt endpoint exclusively
|
|
|
|
* and will issue control requests referring to the given intf. It
|
|
|
|
* will otherwise avoid interferring, and in particular not do
|
|
|
|
* usb_set_intfdata/usb_get_intfdata on intf.
|
|
|
|
*
|
|
|
|
* The return value is a pointer to the subdriver's struct usb_driver.
|
|
|
|
* The registering driver is responsible for calling this subdriver's
|
|
|
|
* disconnect, suspend, resume, pre_reset and post_reset methods from
|
|
|
|
* its own.
|
|
|
|
*/
|
|
|
|
struct usb_driver *usb_cdc_wdm_register(struct usb_interface *intf,
|
|
|
|
struct usb_endpoint_descriptor *ep,
|
|
|
|
int bufsize,
|
|
|
|
int (*manage_power)(struct usb_interface *, int))
|
|
|
|
{
|
2019-05-31 20:53:47 +07:00
|
|
|
int rv;
|
2012-03-06 23:29:22 +07:00
|
|
|
|
2017-04-21 15:01:29 +07:00
|
|
|
rv = wdm_create(intf, ep, bufsize, manage_power);
|
2012-03-06 23:29:22 +07:00
|
|
|
if (rv < 0)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return &wdm_driver;
|
|
|
|
err:
|
|
|
|
return ERR_PTR(rv);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(usb_cdc_wdm_register);
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
static void wdm_disconnect(struct usb_interface *intf)
|
|
|
|
{
|
|
|
|
struct wdm_device *desc;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
usb_deregister_dev(intf, &wdm_class);
|
2012-03-06 23:29:21 +07:00
|
|
|
desc = wdm_find_device(intf);
|
2008-05-13 22:01:25 +07:00
|
|
|
mutex_lock(&wdm_mutex);
|
|
|
|
|
|
|
|
/* the spinlock makes sure no new urbs are generated in the callbacks */
|
|
|
|
spin_lock_irqsave(&desc->iuspin, flags);
|
|
|
|
set_bit(WDM_DISCONNECTING, &desc->flags);
|
|
|
|
set_bit(WDM_READ, &desc->flags);
|
|
|
|
spin_unlock_irqrestore(&desc->iuspin, flags);
|
2012-01-16 21:11:57 +07:00
|
|
|
wake_up_all(&desc->wait);
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_lock(&desc->rlock);
|
|
|
|
mutex_lock(&desc->wlock);
|
2008-05-13 22:01:25 +07:00
|
|
|
kill_urbs(desc);
|
2010-02-28 02:56:47 +07:00
|
|
|
cancel_work_sync(&desc->rxwork);
|
2018-06-14 23:36:46 +07:00
|
|
|
cancel_work_sync(&desc->service_outs_intr);
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_unlock(&desc->wlock);
|
|
|
|
mutex_unlock(&desc->rlock);
|
2012-05-09 18:53:23 +07:00
|
|
|
|
|
|
|
/* the desc->intf pointer used as list key is now invalid */
|
|
|
|
spin_lock(&wdm_device_list_lock);
|
|
|
|
list_del(&desc->device_list);
|
|
|
|
spin_unlock(&wdm_device_list_lock);
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
if (!desc->count)
|
|
|
|
cleanup(desc);
|
2012-04-30 14:26:11 +07:00
|
|
|
else
|
2016-08-15 16:17:55 +07:00
|
|
|
dev_dbg(&intf->dev, "%d open files - postponing cleanup\n", desc->count);
|
2008-05-13 22:01:25 +07:00
|
|
|
mutex_unlock(&wdm_mutex);
|
|
|
|
}
|
|
|
|
|
2010-02-28 02:56:47 +07:00
|
|
|
#ifdef CONFIG_PM
|
2008-06-24 20:56:10 +07:00
|
|
|
static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
|
|
|
|
{
|
2012-03-06 23:29:21 +07:00
|
|
|
struct wdm_device *desc = wdm_find_device(intf);
|
2008-06-24 20:56:10 +07:00
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
|
|
|
|
|
2010-02-28 02:56:47 +07:00
|
|
|
/* if this is an autosuspend the caller does the locking */
|
2012-01-16 18:41:48 +07:00
|
|
|
if (!PMSG_IS_AUTO(message)) {
|
|
|
|
mutex_lock(&desc->rlock);
|
|
|
|
mutex_lock(&desc->wlock);
|
|
|
|
}
|
2010-02-28 02:56:22 +07:00
|
|
|
spin_lock_irq(&desc->iuspin);
|
2010-02-28 02:56:47 +07:00
|
|
|
|
2011-08-20 04:49:48 +07:00
|
|
|
if (PMSG_IS_AUTO(message) &&
|
2010-02-28 02:54:59 +07:00
|
|
|
(test_bit(WDM_IN_USE, &desc->flags)
|
|
|
|
|| test_bit(WDM_RESPONDING, &desc->flags))) {
|
2010-02-28 02:56:22 +07:00
|
|
|
spin_unlock_irq(&desc->iuspin);
|
2008-06-24 20:56:10 +07:00
|
|
|
rv = -EBUSY;
|
|
|
|
} else {
|
2010-02-28 02:56:47 +07:00
|
|
|
|
2010-02-28 02:55:52 +07:00
|
|
|
set_bit(WDM_SUSPENDING, &desc->flags);
|
2010-02-28 02:56:22 +07:00
|
|
|
spin_unlock_irq(&desc->iuspin);
|
2010-02-28 02:56:47 +07:00
|
|
|
/* callback submits work - order is essential */
|
2008-06-24 20:56:10 +07:00
|
|
|
kill_urbs(desc);
|
2010-02-28 02:56:47 +07:00
|
|
|
cancel_work_sync(&desc->rxwork);
|
2018-06-14 23:36:46 +07:00
|
|
|
cancel_work_sync(&desc->service_outs_intr);
|
2008-06-24 20:56:10 +07:00
|
|
|
}
|
2012-01-16 18:41:48 +07:00
|
|
|
if (!PMSG_IS_AUTO(message)) {
|
|
|
|
mutex_unlock(&desc->wlock);
|
|
|
|
mutex_unlock(&desc->rlock);
|
|
|
|
}
|
2008-06-24 20:56:10 +07:00
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
2010-02-28 02:56:47 +07:00
|
|
|
#endif
|
2008-06-24 20:56:10 +07:00
|
|
|
|
|
|
|
static int recover_from_urb_loss(struct wdm_device *desc)
|
|
|
|
{
|
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
if (desc->count) {
|
|
|
|
rv = usb_submit_urb(desc->validity, GFP_NOIO);
|
|
|
|
if (rv < 0)
|
2008-08-14 23:37:34 +07:00
|
|
|
dev_err(&desc->intf->dev,
|
|
|
|
"Error resume submitting int urb - %d\n", rv);
|
2008-06-24 20:56:10 +07:00
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
2010-02-28 02:56:47 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_PM
|
2008-06-24 20:56:10 +07:00
|
|
|
static int wdm_resume(struct usb_interface *intf)
|
|
|
|
{
|
2012-03-06 23:29:21 +07:00
|
|
|
struct wdm_device *desc = wdm_find_device(intf);
|
2008-06-24 20:56:10 +07:00
|
|
|
int rv;
|
|
|
|
|
|
|
|
dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor);
|
2010-02-28 02:57:12 +07:00
|
|
|
|
2010-02-28 02:55:52 +07:00
|
|
|
clear_bit(WDM_SUSPENDING, &desc->flags);
|
2010-02-28 02:56:22 +07:00
|
|
|
rv = recover_from_urb_loss(desc);
|
2010-02-28 02:57:12 +07:00
|
|
|
|
2008-06-24 20:56:10 +07:00
|
|
|
return rv;
|
|
|
|
}
|
2010-02-28 02:56:47 +07:00
|
|
|
#endif
|
2008-06-24 20:56:10 +07:00
|
|
|
|
|
|
|
static int wdm_pre_reset(struct usb_interface *intf)
|
|
|
|
{
|
2012-03-06 23:29:21 +07:00
|
|
|
struct wdm_device *desc = wdm_find_device(intf);
|
2008-06-24 20:56:10 +07:00
|
|
|
|
2011-04-29 19:12:21 +07:00
|
|
|
/*
|
|
|
|
* we notify everybody using poll of
|
|
|
|
* an exceptional situation
|
|
|
|
* must be done before recovery lest a spontaneous
|
|
|
|
* message from the device is lost
|
|
|
|
*/
|
|
|
|
spin_lock_irq(&desc->iuspin);
|
2012-02-10 15:44:08 +07:00
|
|
|
set_bit(WDM_RESETTING, &desc->flags); /* inform read/write */
|
|
|
|
set_bit(WDM_READ, &desc->flags); /* unblock read */
|
|
|
|
clear_bit(WDM_IN_USE, &desc->flags); /* unblock write */
|
2011-04-29 19:12:21 +07:00
|
|
|
desc->rerr = -EINTR;
|
|
|
|
spin_unlock_irq(&desc->iuspin);
|
|
|
|
wake_up_all(&desc->wait);
|
2012-02-10 15:44:08 +07:00
|
|
|
mutex_lock(&desc->rlock);
|
|
|
|
mutex_lock(&desc->wlock);
|
|
|
|
kill_urbs(desc);
|
|
|
|
cancel_work_sync(&desc->rxwork);
|
2018-06-14 23:36:46 +07:00
|
|
|
cancel_work_sync(&desc->service_outs_intr);
|
2008-06-24 20:56:10 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wdm_post_reset(struct usb_interface *intf)
|
|
|
|
{
|
2012-03-06 23:29:21 +07:00
|
|
|
struct wdm_device *desc = wdm_find_device(intf);
|
2008-06-24 20:56:10 +07:00
|
|
|
int rv;
|
|
|
|
|
2013-03-12 20:52:42 +07:00
|
|
|
clear_bit(WDM_OVERFLOW, &desc->flags);
|
2012-02-10 15:44:08 +07:00
|
|
|
clear_bit(WDM_RESETTING, &desc->flags);
|
2008-06-24 20:56:10 +07:00
|
|
|
rv = recover_from_urb_loss(desc);
|
2012-01-16 18:41:48 +07:00
|
|
|
mutex_unlock(&desc->wlock);
|
|
|
|
mutex_unlock(&desc->rlock);
|
2019-02-18 21:34:51 +07:00
|
|
|
return rv;
|
2008-06-24 20:56:10 +07:00
|
|
|
}
|
|
|
|
|
2008-05-13 22:01:25 +07:00
|
|
|
static struct usb_driver wdm_driver = {
|
|
|
|
.name = "cdc_wdm",
|
|
|
|
.probe = wdm_probe,
|
|
|
|
.disconnect = wdm_disconnect,
|
2010-02-28 02:56:47 +07:00
|
|
|
#ifdef CONFIG_PM
|
2008-06-24 20:56:10 +07:00
|
|
|
.suspend = wdm_suspend,
|
|
|
|
.resume = wdm_resume,
|
|
|
|
.reset_resume = wdm_resume,
|
2010-02-28 02:56:47 +07:00
|
|
|
#endif
|
2008-06-24 20:56:10 +07:00
|
|
|
.pre_reset = wdm_pre_reset,
|
|
|
|
.post_reset = wdm_post_reset,
|
2008-05-13 22:01:25 +07:00
|
|
|
.id_table = wdm_ids,
|
2008-06-24 20:56:10 +07:00
|
|
|
.supports_autosuspend = 1,
|
2012-04-24 00:08:51 +07:00
|
|
|
.disable_hub_initiated_lpm = 1,
|
2008-05-13 22:01:25 +07:00
|
|
|
};
|
|
|
|
|
2011-11-19 00:34:02 +07:00
|
|
|
module_usb_driver(wdm_driver);
|
2008-05-13 22:01:25 +07:00
|
|
|
|
|
|
|
MODULE_AUTHOR(DRIVER_AUTHOR);
|
2008-06-19 19:20:18 +07:00
|
|
|
MODULE_DESCRIPTION(DRIVER_DESC);
|
2008-05-13 22:01:25 +07:00
|
|
|
MODULE_LICENSE("GPL");
|