diff options
Diffstat (limited to 'drivers/usb/gadget/function')
56 files changed, 34161 insertions, 0 deletions
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile new file mode 100644 index 000000000..bd7def576 --- /dev/null +++ b/drivers/usb/gadget/function/Makefile @@ -0,0 +1,46 @@ +# +# USB peripheral controller drivers +# + +ccflags-y := -I$(srctree)/drivers/usb/gadget/ +ccflags-y += -I$(srctree)/drivers/usb/gadget/udc/ + +# USB Functions +usb_f_acm-y := f_acm.o +obj-$(CONFIG_USB_F_ACM) += usb_f_acm.o +usb_f_ss_lb-y := f_loopback.o f_sourcesink.o +obj-$(CONFIG_USB_F_SS_LB) += usb_f_ss_lb.o +obj-$(CONFIG_USB_U_SERIAL) += u_serial.o +usb_f_serial-y := f_serial.o +obj-$(CONFIG_USB_F_SERIAL) += usb_f_serial.o +usb_f_obex-y := f_obex.o +obj-$(CONFIG_USB_F_OBEX) += usb_f_obex.o +obj-$(CONFIG_USB_U_ETHER) += u_ether.o +usb_f_ncm-y := f_ncm.o +obj-$(CONFIG_USB_F_NCM) += usb_f_ncm.o +usb_f_ecm-y := f_ecm.o +obj-$(CONFIG_USB_F_ECM) += usb_f_ecm.o +usb_f_phonet-y := f_phonet.o +obj-$(CONFIG_USB_F_PHONET) += usb_f_phonet.o +usb_f_eem-y := f_eem.o +obj-$(CONFIG_USB_F_EEM) += usb_f_eem.o +usb_f_ecm_subset-y := f_subset.o +obj-$(CONFIG_USB_F_SUBSET) += usb_f_ecm_subset.o +usb_f_rndis-y := f_rndis.o rndis.o +obj-$(CONFIG_USB_F_RNDIS) += usb_f_rndis.o +usb_f_mass_storage-y := f_mass_storage.o storage_common.o +obj-$(CONFIG_USB_F_MASS_STORAGE)+= usb_f_mass_storage.o +usb_f_fs-y := f_fs.o +obj-$(CONFIG_USB_F_FS) += usb_f_fs.o +usb_f_uac1-y := f_uac1.o u_uac1.o +obj-$(CONFIG_USB_F_UAC1) += usb_f_uac1.o +usb_f_uac2-y := f_uac2.o +obj-$(CONFIG_USB_F_UAC2) += usb_f_uac2.o +usb_f_uvc-y := f_uvc.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_configfs.o +obj-$(CONFIG_USB_F_UVC) += usb_f_uvc.o +usb_f_midi-y := f_midi.o +obj-$(CONFIG_USB_F_MIDI) += usb_f_midi.o +usb_f_hid-y := f_hid.o +obj-$(CONFIG_USB_F_HID) += usb_f_hid.o +usb_f_printer-y := f_printer.o +obj-$(CONFIG_USB_F_PRINTER) += usb_f_printer.o diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c new file mode 100644 index 000000000..aad8165e9 --- /dev/null +++ b/drivers/usb/gadget/function/f_acm.c @@ -0,0 +1,855 @@ +/* + * f_acm.c -- USB CDC serial (ACM) function driver + * + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 by David Brownell + * Copyright (C) 2008 by Nokia Corporation + * Copyright (C) 2009 by Samsung Electronics + * Author: Michal Nazarewicz (mina86@mina86.com) + * + * This software is distributed under the terms of the GNU General + * Public License ("GPL") as published by the Free Software Foundation, + * either version 2 of that License or (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/err.h> + +#include "u_serial.h" +#include "gadget_chips.h" + + +/* + * This CDC ACM function support just wraps control functions and + * notifications around the generic serial-over-usb code. + * + * Because CDC ACM is standardized by the USB-IF, many host operating + * systems have drivers for it. Accordingly, ACM is the preferred + * interop solution for serial-port type connections. The control + * models are often not necessary, and in any case don't do much in + * this bare-bones implementation. + * + * Note that even MS-Windows has some support for ACM. However, that + * support is somewhat broken because when you use ACM in a composite + * device, having multiple interfaces confuses the poor OS. It doesn't + * seem to understand CDC Union descriptors. The new "association" + * descriptors (roughly equivalent to CDC Unions) may sometimes help. + */ + +struct f_acm { + struct gserial port; + u8 ctrl_id, data_id; + u8 port_num; + + u8 pending; + + /* lock is mostly for pending and notify_req ... they get accessed + * by callbacks both from tty (open/close/break) under its spinlock, + * and notify_req.complete() which can't use that lock. + */ + spinlock_t lock; + + struct usb_ep *notify; + struct usb_request *notify_req; + + struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ + + /* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */ + u16 port_handshake_bits; +#define ACM_CTRL_RTS (1 << 1) /* unused with full duplex */ +#define ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */ + + /* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */ + u16 serial_state; +#define ACM_CTRL_OVERRUN (1 << 6) +#define ACM_CTRL_PARITY (1 << 5) +#define ACM_CTRL_FRAMING (1 << 4) +#define ACM_CTRL_RI (1 << 3) +#define ACM_CTRL_BRK (1 << 2) +#define ACM_CTRL_DSR (1 << 1) +#define ACM_CTRL_DCD (1 << 0) +}; + +static inline struct f_acm *func_to_acm(struct usb_function *f) +{ + return container_of(f, struct f_acm, port.func); +} + +static inline struct f_acm *port_to_acm(struct gserial *p) +{ + return container_of(p, struct f_acm, port); +} + +/*-------------------------------------------------------------------------*/ + +/* notification endpoint uses smallish and infrequent fixed-size messages */ + +#define GS_NOTIFY_INTERVAL_MS 32 +#define GS_NOTIFY_MAXPACKET 10 /* notification + 2 bytes */ + +/* interface and class descriptors: */ + +static struct usb_interface_assoc_descriptor +acm_iad_descriptor = { + .bLength = sizeof acm_iad_descriptor, + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + + /* .bFirstInterface = DYNAMIC, */ + .bInterfaceCount = 2, // control + data + .bFunctionClass = USB_CLASS_COMM, + .bFunctionSubClass = USB_CDC_SUBCLASS_ACM, + .bFunctionProtocol = USB_CDC_ACM_PROTO_AT_V25TER, + /* .iFunction = DYNAMIC */ +}; + + +static struct usb_interface_descriptor acm_control_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, + .bInterfaceProtocol = USB_CDC_ACM_PROTO_AT_V25TER, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_interface_descriptor acm_data_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_cdc_header_desc acm_header_desc = { + .bLength = sizeof(acm_header_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + .bcdCDC = cpu_to_le16(0x0110), +}; + +static struct usb_cdc_call_mgmt_descriptor +acm_call_mgmt_descriptor = { + .bLength = sizeof(acm_call_mgmt_descriptor), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE, + .bmCapabilities = 0, + /* .bDataInterface = DYNAMIC */ +}; + +static struct usb_cdc_acm_descriptor acm_descriptor = { + .bLength = sizeof(acm_descriptor), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_ACM_TYPE, + .bmCapabilities = USB_CDC_CAP_LINE, +}; + +static struct usb_cdc_union_desc acm_union_desc = { + .bLength = sizeof(acm_union_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_UNION_TYPE, + /* .bMasterInterface0 = DYNAMIC */ + /* .bSlaveInterface0 = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor acm_fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET), + .bInterval = GS_NOTIFY_INTERVAL_MS, +}; + +static struct usb_endpoint_descriptor acm_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor acm_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *acm_fs_function[] = { + (struct usb_descriptor_header *) &acm_iad_descriptor, + (struct usb_descriptor_header *) &acm_control_interface_desc, + (struct usb_descriptor_header *) &acm_header_desc, + (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, + (struct usb_descriptor_header *) &acm_descriptor, + (struct usb_descriptor_header *) &acm_union_desc, + (struct usb_descriptor_header *) &acm_fs_notify_desc, + (struct usb_descriptor_header *) &acm_data_interface_desc, + (struct usb_descriptor_header *) &acm_fs_in_desc, + (struct usb_descriptor_header *) &acm_fs_out_desc, + NULL, +}; + +/* high speed support: */ +static struct usb_endpoint_descriptor acm_hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(GS_NOTIFY_MAXPACKET), + .bInterval = USB_MS_TO_HS_INTERVAL(GS_NOTIFY_INTERVAL_MS), +}; + +static struct usb_endpoint_descriptor acm_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor acm_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *acm_hs_function[] = { + (struct usb_descriptor_header *) &acm_iad_descriptor, + (struct usb_descriptor_header *) &acm_control_interface_desc, + (struct usb_descriptor_header *) &acm_header_desc, + (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, + (struct usb_descriptor_header *) &acm_descriptor, + (struct usb_descriptor_header *) &acm_union_desc, + (struct usb_descriptor_header *) &acm_hs_notify_desc, + (struct usb_descriptor_header *) &acm_data_interface_desc, + (struct usb_descriptor_header *) &acm_hs_in_desc, + (struct usb_descriptor_header *) &acm_hs_out_desc, + NULL, +}; + +static struct usb_endpoint_descriptor acm_ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor acm_ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor acm_ss_bulk_comp_desc = { + .bLength = sizeof acm_ss_bulk_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_descriptor_header *acm_ss_function[] = { + (struct usb_descriptor_header *) &acm_iad_descriptor, + (struct usb_descriptor_header *) &acm_control_interface_desc, + (struct usb_descriptor_header *) &acm_header_desc, + (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, + (struct usb_descriptor_header *) &acm_descriptor, + (struct usb_descriptor_header *) &acm_union_desc, + (struct usb_descriptor_header *) &acm_hs_notify_desc, + (struct usb_descriptor_header *) &acm_ss_bulk_comp_desc, + (struct usb_descriptor_header *) &acm_data_interface_desc, + (struct usb_descriptor_header *) &acm_ss_in_desc, + (struct usb_descriptor_header *) &acm_ss_bulk_comp_desc, + (struct usb_descriptor_header *) &acm_ss_out_desc, + (struct usb_descriptor_header *) &acm_ss_bulk_comp_desc, + NULL, +}; + +/* string descriptors: */ + +#define ACM_CTRL_IDX 0 +#define ACM_DATA_IDX 1 +#define ACM_IAD_IDX 2 + +/* static strings, in UTF-8 */ +static struct usb_string acm_string_defs[] = { + [ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)", + [ACM_DATA_IDX].s = "CDC ACM Data", + [ACM_IAD_IDX ].s = "CDC Serial", + { } /* end of list */ +}; + +static struct usb_gadget_strings acm_string_table = { + .language = 0x0409, /* en-us */ + .strings = acm_string_defs, +}; + +static struct usb_gadget_strings *acm_strings[] = { + &acm_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +/* ACM control ... data handling is delegated to tty library code. + * The main task of this function is to activate and deactivate + * that code based on device state; track parameters like line + * speed, handshake state, and so on; and issue notifications. + */ + +static void acm_complete_set_line_coding(struct usb_ep *ep, + struct usb_request *req) +{ + struct f_acm *acm = ep->driver_data; + struct usb_composite_dev *cdev = acm->port.func.config->cdev; + + if (req->status != 0) { + dev_dbg(&cdev->gadget->dev, "acm ttyGS%d completion, err %d\n", + acm->port_num, req->status); + return; + } + + /* normal completion */ + if (req->actual != sizeof(acm->port_line_coding)) { + dev_dbg(&cdev->gadget->dev, "acm ttyGS%d short resp, len %d\n", + acm->port_num, req->actual); + usb_ep_set_halt(ep); + } else { + struct usb_cdc_line_coding *value = req->buf; + + /* REVISIT: we currently just remember this data. + * If we change that, (a) validate it first, then + * (b) update whatever hardware needs updating, + * (c) worry about locking. This is information on + * the order of 9600-8-N-1 ... most of which means + * nothing unless we control a real RS232 line. + */ + acm->port_line_coding = *value; + } +} + +static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_acm *acm = func_to_acm(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* composite driver infrastructure handles everything except + * CDC class messages; interface activation uses set_alt(). + * + * Note CDC spec table 4 lists the ACM request profile. It requires + * encapsulated command support ... we don't handle any, and respond + * to them by stalling. Options include get/set/clear comm features + * (not that useful) and SEND_BREAK. + */ + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + /* SET_LINE_CODING ... just read and save what the host sends */ + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_LINE_CODING: + if (w_length != sizeof(struct usb_cdc_line_coding) + || w_index != acm->ctrl_id) + goto invalid; + + value = w_length; + cdev->gadget->ep0->driver_data = acm; + req->complete = acm_complete_set_line_coding; + break; + + /* GET_LINE_CODING ... return what host sent, or initial value */ + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_GET_LINE_CODING: + if (w_index != acm->ctrl_id) + goto invalid; + + value = min_t(unsigned, w_length, + sizeof(struct usb_cdc_line_coding)); + memcpy(req->buf, &acm->port_line_coding, value); + break; + + /* SET_CONTROL_LINE_STATE ... save what the host sent */ + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_REQ_SET_CONTROL_LINE_STATE: + if (w_index != acm->ctrl_id) + goto invalid; + + value = 0; + + /* FIXME we should not allow data to flow until the + * host sets the ACM_CTRL_DTR bit; and when it clears + * that bit, we should return to that no-flow state. + */ + acm->port_handshake_bits = w_value; + break; + + default: +invalid: + dev_vdbg(&cdev->gadget->dev, + "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + dev_dbg(&cdev->gadget->dev, + "acm ttyGS%d req%02x.%02x v%04x i%04x l%d\n", + acm->port_num, ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "acm response on ttyGS%d, err %d\n", + acm->port_num, value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + +static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_acm *acm = func_to_acm(f); + struct usb_composite_dev *cdev = f->config->cdev; + + /* we know alt == 0, so this is an activation or a reset */ + + if (intf == acm->ctrl_id) { + if (acm->notify->driver_data) { + dev_vdbg(&cdev->gadget->dev, + "reset acm control interface %d\n", intf); + usb_ep_disable(acm->notify); + } + + if (!acm->notify->desc) + if (config_ep_by_speed(cdev->gadget, f, acm->notify)) + return -EINVAL; + + usb_ep_enable(acm->notify); + acm->notify->driver_data = acm; + + } else if (intf == acm->data_id) { + if (acm->port.in->driver_data) { + dev_dbg(&cdev->gadget->dev, + "reset acm ttyGS%d\n", acm->port_num); + gserial_disconnect(&acm->port); + } + if (!acm->port.in->desc || !acm->port.out->desc) { + dev_dbg(&cdev->gadget->dev, + "activate acm ttyGS%d\n", acm->port_num); + if (config_ep_by_speed(cdev->gadget, f, + acm->port.in) || + config_ep_by_speed(cdev->gadget, f, + acm->port.out)) { + acm->port.in->desc = NULL; + acm->port.out->desc = NULL; + return -EINVAL; + } + } + gserial_connect(&acm->port, acm->port_num); + + } else + return -EINVAL; + + return 0; +} + +static void acm_disable(struct usb_function *f) +{ + struct f_acm *acm = func_to_acm(f); + struct usb_composite_dev *cdev = f->config->cdev; + + dev_dbg(&cdev->gadget->dev, "acm ttyGS%d deactivated\n", acm->port_num); + gserial_disconnect(&acm->port); + usb_ep_disable(acm->notify); + acm->notify->driver_data = NULL; +} + +/*-------------------------------------------------------------------------*/ + +/** + * acm_cdc_notify - issue CDC notification to host + * @acm: wraps host to be notified + * @type: notification type + * @value: Refer to cdc specs, wValue field. + * @data: data to be sent + * @length: size of data + * Context: irqs blocked, acm->lock held, acm_notify_req non-null + * + * Returns zero on success or a negative errno. + * + * See section 6.3.5 of the CDC 1.1 specification for information + * about the only notification we issue: SerialState change. + */ +static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value, + void *data, unsigned length) +{ + struct usb_ep *ep = acm->notify; + struct usb_request *req; + struct usb_cdc_notification *notify; + const unsigned len = sizeof(*notify) + length; + void *buf; + int status; + + req = acm->notify_req; + acm->notify_req = NULL; + acm->pending = false; + + req->length = len; + notify = req->buf; + buf = notify + 1; + + notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS + | USB_RECIP_INTERFACE; + notify->bNotificationType = type; + notify->wValue = cpu_to_le16(value); + notify->wIndex = cpu_to_le16(acm->ctrl_id); + notify->wLength = cpu_to_le16(length); + memcpy(buf, data, length); + + /* ep_queue() can complete immediately if it fills the fifo... */ + spin_unlock(&acm->lock); + status = usb_ep_queue(ep, req, GFP_ATOMIC); + spin_lock(&acm->lock); + + if (status < 0) { + ERROR(acm->port.func.config->cdev, + "acm ttyGS%d can't notify serial state, %d\n", + acm->port_num, status); + acm->notify_req = req; + } + + return status; +} + +static int acm_notify_serial_state(struct f_acm *acm) +{ + struct usb_composite_dev *cdev = acm->port.func.config->cdev; + int status; + + spin_lock(&acm->lock); + if (acm->notify_req) { + dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", + acm->port_num, acm->serial_state); + status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, + 0, &acm->serial_state, sizeof(acm->serial_state)); + } else { + acm->pending = true; + status = 0; + } + spin_unlock(&acm->lock); + return status; +} + +static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_acm *acm = req->context; + u8 doit = false; + + /* on this call path we do NOT hold the port spinlock, + * which is why ACM needs its own spinlock + */ + spin_lock(&acm->lock); + if (req->status != -ESHUTDOWN) + doit = acm->pending; + acm->notify_req = req; + spin_unlock(&acm->lock); + + if (doit) + acm_notify_serial_state(acm); +} + +/* connect == the TTY link is open */ + +static void acm_connect(struct gserial *port) +{ + struct f_acm *acm = port_to_acm(port); + + acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD; + acm_notify_serial_state(acm); +} + +static void acm_disconnect(struct gserial *port) +{ + struct f_acm *acm = port_to_acm(port); + + acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD); + acm_notify_serial_state(acm); +} + +static int acm_send_break(struct gserial *port, int duration) +{ + struct f_acm *acm = port_to_acm(port); + u16 state; + + state = acm->serial_state; + state &= ~ACM_CTRL_BRK; + if (duration) + state |= ACM_CTRL_BRK; + + acm->serial_state = state; + return acm_notify_serial_state(acm); +} + +/*-------------------------------------------------------------------------*/ + +/* ACM function driver setup/binding */ +static int +acm_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_acm *acm = func_to_acm(f); + struct usb_string *us; + int status; + struct usb_ep *ep; + + /* REVISIT might want instance-specific strings to help + * distinguish instances ... + */ + + /* maybe allocate device-global string IDs, and patch descriptors */ + us = usb_gstrings_attach(cdev, acm_strings, + ARRAY_SIZE(acm_string_defs)); + if (IS_ERR(us)) + return PTR_ERR(us); + acm_control_interface_desc.iInterface = us[ACM_CTRL_IDX].id; + acm_data_interface_desc.iInterface = us[ACM_DATA_IDX].id; + acm_iad_descriptor.iFunction = us[ACM_IAD_IDX].id; + + /* allocate instance-specific interface IDs, and patch descriptors */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + acm->ctrl_id = status; + acm_iad_descriptor.bFirstInterface = status; + + acm_control_interface_desc.bInterfaceNumber = status; + acm_union_desc .bMasterInterface0 = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + acm->data_id = status; + + acm_data_interface_desc.bInterfaceNumber = status; + acm_union_desc.bSlaveInterface0 = status; + acm_call_mgmt_descriptor.bDataInterface = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc); + if (!ep) + goto fail; + acm->port.in = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc); + if (!ep) + goto fail; + acm->port.out = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc); + if (!ep) + goto fail; + acm->notify = ep; + ep->driver_data = cdev; /* claim */ + + /* allocate notification */ + acm->notify_req = gs_alloc_req(ep, + sizeof(struct usb_cdc_notification) + 2, + GFP_KERNEL); + if (!acm->notify_req) + goto fail; + + acm->notify_req->complete = acm_cdc_notify_complete; + acm->notify_req->context = acm; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + acm_hs_in_desc.bEndpointAddress = acm_fs_in_desc.bEndpointAddress; + acm_hs_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress; + acm_hs_notify_desc.bEndpointAddress = + acm_fs_notify_desc.bEndpointAddress; + + acm_ss_in_desc.bEndpointAddress = acm_fs_in_desc.bEndpointAddress; + acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function, + acm_ss_function); + if (status) + goto fail; + + dev_dbg(&cdev->gadget->dev, + "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n", + acm->port_num, + gadget_is_superspeed(c->cdev->gadget) ? "super" : + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + acm->port.in->name, acm->port.out->name, + acm->notify->name); + return 0; + +fail: + if (acm->notify_req) + gs_free_req(acm->notify, acm->notify_req); + + /* we might as well release our claims on endpoints */ + if (acm->notify) + acm->notify->driver_data = NULL; + if (acm->port.out) + acm->port.out->driver_data = NULL; + if (acm->port.in) + acm->port.in->driver_data = NULL; + + ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); + + return status; +} + +static void acm_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_acm *acm = func_to_acm(f); + + acm_string_defs[0].id = 0; + usb_free_all_descriptors(f); + if (acm->notify_req) + gs_free_req(acm->notify, acm->notify_req); +} + +static void acm_free_func(struct usb_function *f) +{ + struct f_acm *acm = func_to_acm(f); + + kfree(acm); +} + +static struct usb_function *acm_alloc_func(struct usb_function_instance *fi) +{ + struct f_serial_opts *opts; + struct f_acm *acm; + + acm = kzalloc(sizeof(*acm), GFP_KERNEL); + if (!acm) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&acm->lock); + + acm->port.connect = acm_connect; + acm->port.disconnect = acm_disconnect; + acm->port.send_break = acm_send_break; + + acm->port.func.name = "acm"; + acm->port.func.strings = acm_strings; + /* descriptors are per-instance copies */ + acm->port.func.bind = acm_bind; + acm->port.func.set_alt = acm_set_alt; + acm->port.func.setup = acm_setup; + acm->port.func.disable = acm_disable; + + opts = container_of(fi, struct f_serial_opts, func_inst); + acm->port_num = opts->port_num; + acm->port.func.unbind = acm_unbind; + acm->port.func.free_func = acm_free_func; + + return &acm->port.func; +} + +static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_serial_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_serial_opts); +static ssize_t f_acm_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct f_serial_opts *opts = to_f_serial_opts(item); + struct f_serial_opts_attribute *f_serial_opts_attr = + container_of(attr, struct f_serial_opts_attribute, attr); + ssize_t ret = 0; + + if (f_serial_opts_attr->show) + ret = f_serial_opts_attr->show(opts, page); + return ret; +} + +static void acm_attr_release(struct config_item *item) +{ + struct f_serial_opts *opts = to_f_serial_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations acm_item_ops = { + .release = acm_attr_release, + .show_attribute = f_acm_attr_show, +}; + +static ssize_t f_acm_port_num_show(struct f_serial_opts *opts, char *page) +{ + return sprintf(page, "%u\n", opts->port_num); +} + +static struct f_serial_opts_attribute f_acm_port_num = + __CONFIGFS_ATTR_RO(port_num, f_acm_port_num_show); + + +static struct configfs_attribute *acm_attrs[] = { + &f_acm_port_num.attr, + NULL, +}; + +static struct config_item_type acm_func_type = { + .ct_item_ops = &acm_item_ops, + .ct_attrs = acm_attrs, + .ct_owner = THIS_MODULE, +}; + +static void acm_free_instance(struct usb_function_instance *fi) +{ + struct f_serial_opts *opts; + + opts = container_of(fi, struct f_serial_opts, func_inst); + gserial_free_line(opts->port_num); + kfree(opts); +} + +static struct usb_function_instance *acm_alloc_instance(void) +{ + struct f_serial_opts *opts; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + opts->func_inst.free_func_inst = acm_free_instance; + ret = gserial_alloc_line(&opts->port_num); + if (ret) { + kfree(opts); + return ERR_PTR(ret); + } + config_group_init_type_name(&opts->func_inst.group, "", + &acm_func_type); + return &opts->func_inst; +} +DECLARE_USB_FUNCTION_INIT(acm, acm_alloc_instance, acm_alloc_func); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_ecm.c b/drivers/usb/gadget/function/f_ecm.c new file mode 100644 index 000000000..798760fa7 --- /dev/null +++ b/drivers/usb/gadget/function/f_ecm.c @@ -0,0 +1,973 @@ +/* + * f_ecm.c -- USB CDC Ethernet (ECM) link function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/etherdevice.h> + +#include "u_ether.h" +#include "u_ether_configfs.h" +#include "u_ecm.h" + + +/* + * This function is a "CDC Ethernet Networking Control Model" (CDC ECM) + * Ethernet link. The data transfer model is simple (packets sent and + * received over bulk endpoints using normal short packet termination), + * and the control model exposes various data and optional notifications. + * + * ECM is well standardized and (except for Microsoft) supported by most + * operating systems with USB host support. It's the preferred interop + * solution for Ethernet over USB, at least for firmware based solutions. + * (Hardware solutions tend to be more minimalist.) A newer and simpler + * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on. + * + * Note that ECM requires the use of "alternate settings" for its data + * interface. This means that the set_alt() method has real work to do, + * and also means that a get_alt() method is required. + */ + + +enum ecm_notify_state { + ECM_NOTIFY_NONE, /* don't notify */ + ECM_NOTIFY_CONNECT, /* issue CONNECT next */ + ECM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */ +}; + +struct f_ecm { + struct gether port; + u8 ctrl_id, data_id; + + char ethaddr[14]; + + struct usb_ep *notify; + struct usb_request *notify_req; + u8 notify_state; + bool is_open; + + /* FIXME is_open needs some irq-ish locking + * ... possibly the same as port.ioport + */ +}; + +static inline struct f_ecm *func_to_ecm(struct usb_function *f) +{ + return container_of(f, struct f_ecm, port.func); +} + +/* peak (theoretical) bulk transfer rate in bits-per-second */ +static inline unsigned ecm_bitrate(struct usb_gadget *g) +{ + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) + return 13 * 1024 * 8 * 1000 * 8; + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else + return 19 * 64 * 1 * 1000 * 8; +} + +/*-------------------------------------------------------------------------*/ + +/* + * Include the status endpoint if we can, even though it's optional. + * + * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one + * packet, to simplify cancellation; and a big transfer interval, to + * waste less bandwidth. + * + * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even + * if they ignore the connect/disconnect notifications that real aether + * can provide. More advanced cdc configurations might want to support + * encapsulated commands (vendor-specific, using control-OUT). + */ + +#define ECM_STATUS_INTERVAL_MS 32 +#define ECM_STATUS_BYTECOUNT 16 /* 8 byte header + data */ + + +/* interface descriptor: */ + +static struct usb_interface_assoc_descriptor +ecm_iad_descriptor = { + .bLength = sizeof ecm_iad_descriptor, + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + + /* .bFirstInterface = DYNAMIC, */ + .bInterfaceCount = 2, /* control + data */ + .bFunctionClass = USB_CLASS_COMM, + .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bFunctionProtocol = USB_CDC_PROTO_NONE, + /* .iFunction = DYNAMIC */ +}; + + +static struct usb_interface_descriptor ecm_control_intf = { + .bLength = sizeof ecm_control_intf, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + /* status endpoint is optional; this could be patched later */ + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_cdc_header_desc ecm_header_desc = { + .bLength = sizeof ecm_header_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + + .bcdCDC = cpu_to_le16(0x0110), +}; + +static struct usb_cdc_union_desc ecm_union_desc = { + .bLength = sizeof(ecm_union_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_UNION_TYPE, + /* .bMasterInterface0 = DYNAMIC */ + /* .bSlaveInterface0 = DYNAMIC */ +}; + +static struct usb_cdc_ether_desc ecm_desc = { + .bLength = sizeof ecm_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, + + /* this descriptor actually adds value, surprise! */ + /* .iMACAddress = DYNAMIC */ + .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ + .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), + .wNumberMCFilters = cpu_to_le16(0), + .bNumberPowerFilters = 0, +}; + +/* the default data interface has no endpoints ... */ + +static struct usb_interface_descriptor ecm_data_nop_intf = { + .bLength = sizeof ecm_data_nop_intf, + .bDescriptorType = USB_DT_INTERFACE, + + .bInterfaceNumber = 1, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, + /* .iInterface = DYNAMIC */ +}; + +/* ... but the "real" data interface has two bulk endpoints */ + +static struct usb_interface_descriptor ecm_data_intf = { + .bLength = sizeof ecm_data_intf, + .bDescriptorType = USB_DT_INTERFACE, + + .bInterfaceNumber = 1, + .bAlternateSetting = 1, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, + /* .iInterface = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor fs_ecm_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT), + .bInterval = ECM_STATUS_INTERVAL_MS, +}; + +static struct usb_endpoint_descriptor fs_ecm_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor fs_ecm_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *ecm_fs_function[] = { + /* CDC ECM control descriptors */ + (struct usb_descriptor_header *) &ecm_iad_descriptor, + (struct usb_descriptor_header *) &ecm_control_intf, + (struct usb_descriptor_header *) &ecm_header_desc, + (struct usb_descriptor_header *) &ecm_union_desc, + (struct usb_descriptor_header *) &ecm_desc, + + /* NOTE: status endpoint might need to be removed */ + (struct usb_descriptor_header *) &fs_ecm_notify_desc, + + /* data interface, altsettings 0 and 1 */ + (struct usb_descriptor_header *) &ecm_data_nop_intf, + (struct usb_descriptor_header *) &ecm_data_intf, + (struct usb_descriptor_header *) &fs_ecm_in_desc, + (struct usb_descriptor_header *) &fs_ecm_out_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor hs_ecm_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT), + .bInterval = USB_MS_TO_HS_INTERVAL(ECM_STATUS_INTERVAL_MS), +}; + +static struct usb_endpoint_descriptor hs_ecm_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor hs_ecm_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *ecm_hs_function[] = { + /* CDC ECM control descriptors */ + (struct usb_descriptor_header *) &ecm_iad_descriptor, + (struct usb_descriptor_header *) &ecm_control_intf, + (struct usb_descriptor_header *) &ecm_header_desc, + (struct usb_descriptor_header *) &ecm_union_desc, + (struct usb_descriptor_header *) &ecm_desc, + + /* NOTE: status endpoint might need to be removed */ + (struct usb_descriptor_header *) &hs_ecm_notify_desc, + + /* data interface, altsettings 0 and 1 */ + (struct usb_descriptor_header *) &ecm_data_nop_intf, + (struct usb_descriptor_header *) &ecm_data_intf, + (struct usb_descriptor_header *) &hs_ecm_in_desc, + (struct usb_descriptor_header *) &hs_ecm_out_desc, + NULL, +}; + +/* super speed support: */ + +static struct usb_endpoint_descriptor ss_ecm_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(ECM_STATUS_BYTECOUNT), + .bInterval = USB_MS_TO_HS_INTERVAL(ECM_STATUS_INTERVAL_MS), +}; + +static struct usb_ss_ep_comp_descriptor ss_ecm_intr_comp_desc = { + .bLength = sizeof ss_ecm_intr_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 3 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + .wBytesPerInterval = cpu_to_le16(ECM_STATUS_BYTECOUNT), +}; + +static struct usb_endpoint_descriptor ss_ecm_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor ss_ecm_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_ecm_bulk_comp_desc = { + .bLength = sizeof ss_ecm_bulk_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 2 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ +}; + +static struct usb_descriptor_header *ecm_ss_function[] = { + /* CDC ECM control descriptors */ + (struct usb_descriptor_header *) &ecm_iad_descriptor, + (struct usb_descriptor_header *) &ecm_control_intf, + (struct usb_descriptor_header *) &ecm_header_desc, + (struct usb_descriptor_header *) &ecm_union_desc, + (struct usb_descriptor_header *) &ecm_desc, + + /* NOTE: status endpoint might need to be removed */ + (struct usb_descriptor_header *) &ss_ecm_notify_desc, + (struct usb_descriptor_header *) &ss_ecm_intr_comp_desc, + + /* data interface, altsettings 0 and 1 */ + (struct usb_descriptor_header *) &ecm_data_nop_intf, + (struct usb_descriptor_header *) &ecm_data_intf, + (struct usb_descriptor_header *) &ss_ecm_in_desc, + (struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc, + (struct usb_descriptor_header *) &ss_ecm_out_desc, + (struct usb_descriptor_header *) &ss_ecm_bulk_comp_desc, + NULL, +}; + +/* string descriptors: */ + +static struct usb_string ecm_string_defs[] = { + [0].s = "CDC Ethernet Control Model (ECM)", + [1].s = "", + [2].s = "CDC Ethernet Data", + [3].s = "CDC ECM", + { } /* end of list */ +}; + +static struct usb_gadget_strings ecm_string_table = { + .language = 0x0409, /* en-us */ + .strings = ecm_string_defs, +}; + +static struct usb_gadget_strings *ecm_strings[] = { + &ecm_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static void ecm_do_notify(struct f_ecm *ecm) +{ + struct usb_request *req = ecm->notify_req; + struct usb_cdc_notification *event; + struct usb_composite_dev *cdev = ecm->port.func.config->cdev; + __le32 *data; + int status; + + /* notification already in flight? */ + if (!req) + return; + + event = req->buf; + switch (ecm->notify_state) { + case ECM_NOTIFY_NONE: + return; + + case ECM_NOTIFY_CONNECT: + event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; + if (ecm->is_open) + event->wValue = cpu_to_le16(1); + else + event->wValue = cpu_to_le16(0); + event->wLength = 0; + req->length = sizeof *event; + + DBG(cdev, "notify connect %s\n", + ecm->is_open ? "true" : "false"); + ecm->notify_state = ECM_NOTIFY_SPEED; + break; + + case ECM_NOTIFY_SPEED: + event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; + event->wValue = cpu_to_le16(0); + event->wLength = cpu_to_le16(8); + req->length = ECM_STATUS_BYTECOUNT; + + /* SPEED_CHANGE data is up/down speeds in bits/sec */ + data = req->buf + sizeof *event; + data[0] = cpu_to_le32(ecm_bitrate(cdev->gadget)); + data[1] = data[0]; + + DBG(cdev, "notify speed %d\n", ecm_bitrate(cdev->gadget)); + ecm->notify_state = ECM_NOTIFY_NONE; + break; + } + event->bmRequestType = 0xA1; + event->wIndex = cpu_to_le16(ecm->ctrl_id); + + ecm->notify_req = NULL; + status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC); + if (status < 0) { + ecm->notify_req = req; + DBG(cdev, "notify --> %d\n", status); + } +} + +static void ecm_notify(struct f_ecm *ecm) +{ + /* NOTE on most versions of Linux, host side cdc-ethernet + * won't listen for notifications until its netdevice opens. + * The first notification then sits in the FIFO for a long + * time, and the second one is queued. + */ + ecm->notify_state = ECM_NOTIFY_CONNECT; + ecm_do_notify(ecm); +} + +static void ecm_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_ecm *ecm = req->context; + struct usb_composite_dev *cdev = ecm->port.func.config->cdev; + struct usb_cdc_notification *event = req->buf; + + switch (req->status) { + case 0: + /* no fault */ + break; + case -ECONNRESET: + case -ESHUTDOWN: + ecm->notify_state = ECM_NOTIFY_NONE; + break; + default: + DBG(cdev, "event %02x --> %d\n", + event->bNotificationType, req->status); + break; + } + ecm->notify_req = req; + ecm_do_notify(ecm); +} + +static int ecm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_ecm *ecm = func_to_ecm(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* composite driver infrastructure handles everything except + * CDC class messages; interface activation uses set_alt(). + */ + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SET_ETHERNET_PACKET_FILTER: + /* see 6.2.30: no data, wIndex = interface, + * wValue = packet filter bitmap + */ + if (w_length != 0 || w_index != ecm->ctrl_id) + goto invalid; + DBG(cdev, "packet filter %02x\n", w_value); + /* REVISIT locking of cdc_filter. This assumes the UDC + * driver won't have a concurrent packet TX irq running on + * another CPU; or that if it does, this write is atomic... + */ + ecm->port.cdc_filter = w_value; + value = 0; + break; + + /* and optionally: + * case USB_CDC_SEND_ENCAPSULATED_COMMAND: + * case USB_CDC_GET_ENCAPSULATED_RESPONSE: + * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS: + * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER: + * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER: + * case USB_CDC_GET_ETHERNET_STATISTIC: + */ + + default: +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "ecm req %02x.%02x response err %d\n", + ctrl->bRequestType, ctrl->bRequest, + value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + + +static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_ecm *ecm = func_to_ecm(f); + struct usb_composite_dev *cdev = f->config->cdev; + + /* Control interface has only altsetting 0 */ + if (intf == ecm->ctrl_id) { + if (alt != 0) + goto fail; + + if (ecm->notify->driver_data) { + VDBG(cdev, "reset ecm control %d\n", intf); + usb_ep_disable(ecm->notify); + } + if (!(ecm->notify->desc)) { + VDBG(cdev, "init ecm ctrl %d\n", intf); + if (config_ep_by_speed(cdev->gadget, f, ecm->notify)) + goto fail; + } + usb_ep_enable(ecm->notify); + ecm->notify->driver_data = ecm; + + /* Data interface has two altsettings, 0 and 1 */ + } else if (intf == ecm->data_id) { + if (alt > 1) + goto fail; + + if (ecm->port.in_ep->driver_data) { + DBG(cdev, "reset ecm\n"); + gether_disconnect(&ecm->port); + } + + if (!ecm->port.in_ep->desc || + !ecm->port.out_ep->desc) { + DBG(cdev, "init ecm\n"); + if (config_ep_by_speed(cdev->gadget, f, + ecm->port.in_ep) || + config_ep_by_speed(cdev->gadget, f, + ecm->port.out_ep)) { + ecm->port.in_ep->desc = NULL; + ecm->port.out_ep->desc = NULL; + goto fail; + } + } + + /* CDC Ethernet only sends data in non-default altsettings. + * Changing altsettings resets filters, statistics, etc. + */ + if (alt == 1) { + struct net_device *net; + + /* Enable zlps by default for ECM conformance; + * override for musb_hdrc (avoids txdma ovhead). + */ + ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget) + ); + ecm->port.cdc_filter = DEFAULT_FILTER; + DBG(cdev, "activate ecm\n"); + net = gether_connect(&ecm->port); + if (IS_ERR(net)) + return PTR_ERR(net); + } + + /* NOTE this can be a minor disagreement with the ECM spec, + * which says speed notifications will "always" follow + * connection notifications. But we allow one connect to + * follow another (if the first is in flight), and instead + * just guarantee that a speed notification is always sent. + */ + ecm_notify(ecm); + } else + goto fail; + + return 0; +fail: + return -EINVAL; +} + +/* Because the data interface supports multiple altsettings, + * this ECM function *MUST* implement a get_alt() method. + */ +static int ecm_get_alt(struct usb_function *f, unsigned intf) +{ + struct f_ecm *ecm = func_to_ecm(f); + + if (intf == ecm->ctrl_id) + return 0; + return ecm->port.in_ep->driver_data ? 1 : 0; +} + +static void ecm_disable(struct usb_function *f) +{ + struct f_ecm *ecm = func_to_ecm(f); + struct usb_composite_dev *cdev = f->config->cdev; + + DBG(cdev, "ecm deactivated\n"); + + if (ecm->port.in_ep->driver_data) + gether_disconnect(&ecm->port); + + if (ecm->notify->driver_data) { + usb_ep_disable(ecm->notify); + ecm->notify->driver_data = NULL; + ecm->notify->desc = NULL; + } +} + +/*-------------------------------------------------------------------------*/ + +/* + * Callbacks let us notify the host about connect/disconnect when the + * net device is opened or closed. + * + * For testing, note that link states on this side include both opened + * and closed variants of: + * + * - disconnected/unconfigured + * - configured but inactive (data alt 0) + * - configured and active (data alt 1) + * + * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and + * SET_INTERFACE (altsetting). Remember also that "configured" doesn't + * imply the host is actually polling the notification endpoint, and + * likewise that "active" doesn't imply it's actually using the data + * endpoints for traffic. + */ + +static void ecm_open(struct gether *geth) +{ + struct f_ecm *ecm = func_to_ecm(&geth->func); + + DBG(ecm->port.func.config->cdev, "%s\n", __func__); + + ecm->is_open = true; + ecm_notify(ecm); +} + +static void ecm_close(struct gether *geth) +{ + struct f_ecm *ecm = func_to_ecm(&geth->func); + + DBG(ecm->port.func.config->cdev, "%s\n", __func__); + + ecm->is_open = false; + ecm_notify(ecm); +} + +/*-------------------------------------------------------------------------*/ + +/* ethernet function driver setup/binding */ + +static int +ecm_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_ecm *ecm = func_to_ecm(f); + struct usb_string *us; + int status; + struct usb_ep *ep; + + struct f_ecm_opts *ecm_opts; + + if (!can_support_ecm(cdev->gadget)) + return -EINVAL; + + ecm_opts = container_of(f->fi, struct f_ecm_opts, func_inst); + + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to ecm_opts->bound access + */ + if (!ecm_opts->bound) { + mutex_lock(&ecm_opts->lock); + gether_set_gadget(ecm_opts->net, cdev->gadget); + status = gether_register_netdev(ecm_opts->net); + mutex_unlock(&ecm_opts->lock); + if (status) + return status; + ecm_opts->bound = true; + } + + us = usb_gstrings_attach(cdev, ecm_strings, + ARRAY_SIZE(ecm_string_defs)); + if (IS_ERR(us)) + return PTR_ERR(us); + ecm_control_intf.iInterface = us[0].id; + ecm_data_intf.iInterface = us[2].id; + ecm_desc.iMACAddress = us[1].id; + ecm_iad_descriptor.iFunction = us[3].id; + + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ecm->ctrl_id = status; + ecm_iad_descriptor.bFirstInterface = status; + + ecm_control_intf.bInterfaceNumber = status; + ecm_union_desc.bMasterInterface0 = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ecm->data_id = status; + + ecm_data_nop_intf.bInterfaceNumber = status; + ecm_data_intf.bInterfaceNumber = status; + ecm_union_desc.bSlaveInterface0 = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_in_desc); + if (!ep) + goto fail; + ecm->port.in_ep = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_out_desc); + if (!ep) + goto fail; + ecm->port.out_ep = ep; + ep->driver_data = cdev; /* claim */ + + /* NOTE: a status/notification endpoint is *OPTIONAL* but we + * don't treat it that way. It's simpler, and some newer CDC + * profiles (wireless handsets) no longer treat it as optional. + */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_ecm_notify_desc); + if (!ep) + goto fail; + ecm->notify = ep; + ep->driver_data = cdev; /* claim */ + + status = -ENOMEM; + + /* allocate notification request and buffer */ + ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!ecm->notify_req) + goto fail; + ecm->notify_req->buf = kmalloc(ECM_STATUS_BYTECOUNT, GFP_KERNEL); + if (!ecm->notify_req->buf) + goto fail; + ecm->notify_req->context = ecm; + ecm->notify_req->complete = ecm_notify_complete; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + hs_ecm_in_desc.bEndpointAddress = fs_ecm_in_desc.bEndpointAddress; + hs_ecm_out_desc.bEndpointAddress = fs_ecm_out_desc.bEndpointAddress; + hs_ecm_notify_desc.bEndpointAddress = + fs_ecm_notify_desc.bEndpointAddress; + + ss_ecm_in_desc.bEndpointAddress = fs_ecm_in_desc.bEndpointAddress; + ss_ecm_out_desc.bEndpointAddress = fs_ecm_out_desc.bEndpointAddress; + ss_ecm_notify_desc.bEndpointAddress = + fs_ecm_notify_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, ecm_fs_function, ecm_hs_function, + ecm_ss_function); + if (status) + goto fail; + + /* NOTE: all that is done without knowing or caring about + * the network link ... which is unavailable to this code + * until we're activated via set_alt(). + */ + + ecm->port.open = ecm_open; + ecm->port.close = ecm_close; + + DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n", + gadget_is_superspeed(c->cdev->gadget) ? "super" : + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + ecm->port.in_ep->name, ecm->port.out_ep->name, + ecm->notify->name); + return 0; + +fail: + if (ecm->notify_req) { + kfree(ecm->notify_req->buf); + usb_ep_free_request(ecm->notify, ecm->notify_req); + } + + /* we might as well release our claims on endpoints */ + if (ecm->notify) + ecm->notify->driver_data = NULL; + if (ecm->port.out_ep) + ecm->port.out_ep->driver_data = NULL; + if (ecm->port.in_ep) + ecm->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); + + return status; +} + +static inline struct f_ecm_opts *to_f_ecm_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_ecm_opts, + func_inst.group); +} + +/* f_ecm_item_ops */ +USB_ETHERNET_CONFIGFS_ITEM(ecm); + +/* f_ecm_opts_dev_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ecm); + +/* f_ecm_opts_host_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ecm); + +/* f_ecm_opts_qmult */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ecm); + +/* f_ecm_opts_ifname */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ecm); + +static struct configfs_attribute *ecm_attrs[] = { + &f_ecm_opts_dev_addr.attr, + &f_ecm_opts_host_addr.attr, + &f_ecm_opts_qmult.attr, + &f_ecm_opts_ifname.attr, + NULL, +}; + +static struct config_item_type ecm_func_type = { + .ct_item_ops = &ecm_item_ops, + .ct_attrs = ecm_attrs, + .ct_owner = THIS_MODULE, +}; + +static void ecm_free_inst(struct usb_function_instance *f) +{ + struct f_ecm_opts *opts; + + opts = container_of(f, struct f_ecm_opts, func_inst); + if (opts->bound) + gether_cleanup(netdev_priv(opts->net)); + else + free_netdev(opts->net); + kfree(opts); +} + +static struct usb_function_instance *ecm_alloc_inst(void) +{ + struct f_ecm_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = ecm_free_inst; + opts->net = gether_setup_default(); + if (IS_ERR(opts->net)) { + struct net_device *net = opts->net; + kfree(opts); + return ERR_CAST(net); + } + + config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type); + + return &opts->func_inst; +} + +static void ecm_free(struct usb_function *f) +{ + struct f_ecm *ecm; + struct f_ecm_opts *opts; + + ecm = func_to_ecm(f); + opts = container_of(f->fi, struct f_ecm_opts, func_inst); + kfree(ecm); + mutex_lock(&opts->lock); + opts->refcnt--; + mutex_unlock(&opts->lock); +} + +static void ecm_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_ecm *ecm = func_to_ecm(f); + + DBG(c->cdev, "ecm unbind\n"); + + usb_free_all_descriptors(f); + + kfree(ecm->notify_req->buf); + usb_ep_free_request(ecm->notify, ecm->notify_req); +} + +static struct usb_function *ecm_alloc(struct usb_function_instance *fi) +{ + struct f_ecm *ecm; + struct f_ecm_opts *opts; + int status; + + /* allocate and initialize one new instance */ + ecm = kzalloc(sizeof(*ecm), GFP_KERNEL); + if (!ecm) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_ecm_opts, func_inst); + mutex_lock(&opts->lock); + opts->refcnt++; + + /* export host's Ethernet address in CDC format */ + status = gether_get_host_addr_cdc(opts->net, ecm->ethaddr, + sizeof(ecm->ethaddr)); + if (status < 12) { + kfree(ecm); + mutex_unlock(&opts->lock); + return ERR_PTR(-EINVAL); + } + ecm_string_defs[1].s = ecm->ethaddr; + + ecm->port.ioport = netdev_priv(opts->net); + mutex_unlock(&opts->lock); + ecm->port.cdc_filter = DEFAULT_FILTER; + + ecm->port.func.name = "cdc_ethernet"; + /* descriptors are per-instance copies */ + ecm->port.func.bind = ecm_bind; + ecm->port.func.unbind = ecm_unbind; + ecm->port.func.set_alt = ecm_set_alt; + ecm->port.func.get_alt = ecm_get_alt; + ecm->port.func.setup = ecm_setup; + ecm->port.func.disable = ecm_disable; + ecm->port.func.free_func = ecm_free; + + return &ecm->port.func; +} + +DECLARE_USB_FUNCTION_INIT(ecm, ecm_alloc_inst, ecm_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Brownell"); diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c new file mode 100644 index 000000000..c9e90de5b --- /dev/null +++ b/drivers/usb/gadget/function/f_eem.c @@ -0,0 +1,659 @@ +/* + * f_eem.c -- USB CDC Ethernet (EEM) link function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2008 Nokia Corporation + * Copyright (C) 2009 EF Johnson Technologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/etherdevice.h> +#include <linux/crc32.h> +#include <linux/slab.h> + +#include "u_ether.h" +#include "u_ether_configfs.h" +#include "u_eem.h" + +#define EEM_HLEN 2 + +/* + * This function is a "CDC Ethernet Emulation Model" (CDC EEM) + * Ethernet link. + */ + +struct f_eem { + struct gether port; + u8 ctrl_id; +}; + +static inline struct f_eem *func_to_eem(struct usb_function *f) +{ + return container_of(f, struct f_eem, port.func); +} + +/*-------------------------------------------------------------------------*/ + +/* interface descriptor: */ + +static struct usb_interface_descriptor eem_intf = { + .bLength = sizeof eem_intf, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_EEM, + .bInterfaceProtocol = USB_CDC_PROTO_EEM, + /* .iInterface = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor eem_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor eem_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *eem_fs_function[] = { + /* CDC EEM control descriptors */ + (struct usb_descriptor_header *) &eem_intf, + (struct usb_descriptor_header *) &eem_fs_in_desc, + (struct usb_descriptor_header *) &eem_fs_out_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor eem_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor eem_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *eem_hs_function[] = { + /* CDC EEM control descriptors */ + (struct usb_descriptor_header *) &eem_intf, + (struct usb_descriptor_header *) &eem_hs_in_desc, + (struct usb_descriptor_header *) &eem_hs_out_desc, + NULL, +}; + +/* super speed support: */ + +static struct usb_endpoint_descriptor eem_ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor eem_ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc = { + .bLength = sizeof eem_ss_bulk_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 2 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ +}; + +static struct usb_descriptor_header *eem_ss_function[] = { + /* CDC EEM control descriptors */ + (struct usb_descriptor_header *) &eem_intf, + (struct usb_descriptor_header *) &eem_ss_in_desc, + (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, + (struct usb_descriptor_header *) &eem_ss_out_desc, + (struct usb_descriptor_header *) &eem_ss_bulk_comp_desc, + NULL, +}; + +/* string descriptors: */ + +static struct usb_string eem_string_defs[] = { + [0].s = "CDC Ethernet Emulation Model (EEM)", + { } /* end of list */ +}; + +static struct usb_gadget_strings eem_string_table = { + .language = 0x0409, /* en-us */ + .strings = eem_string_defs, +}; + +static struct usb_gadget_strings *eem_strings[] = { + &eem_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + + /* device either stalls (value < 0) or reports success */ + return value; +} + + +static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_eem *eem = func_to_eem(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct net_device *net; + + /* we know alt == 0, so this is an activation or a reset */ + if (alt != 0) + goto fail; + + if (intf == eem->ctrl_id) { + + if (eem->port.in_ep->driver_data) { + DBG(cdev, "reset eem\n"); + gether_disconnect(&eem->port); + } + + if (!eem->port.in_ep->desc || !eem->port.out_ep->desc) { + DBG(cdev, "init eem\n"); + if (config_ep_by_speed(cdev->gadget, f, + eem->port.in_ep) || + config_ep_by_speed(cdev->gadget, f, + eem->port.out_ep)) { + eem->port.in_ep->desc = NULL; + eem->port.out_ep->desc = NULL; + goto fail; + } + } + + /* zlps should not occur because zero-length EEM packets + * will be inserted in those cases where they would occur + */ + eem->port.is_zlp_ok = 1; + eem->port.cdc_filter = DEFAULT_FILTER; + DBG(cdev, "activate eem\n"); + net = gether_connect(&eem->port); + if (IS_ERR(net)) + return PTR_ERR(net); + } else + goto fail; + + return 0; +fail: + return -EINVAL; +} + +static void eem_disable(struct usb_function *f) +{ + struct f_eem *eem = func_to_eem(f); + struct usb_composite_dev *cdev = f->config->cdev; + + DBG(cdev, "eem deactivated\n"); + + if (eem->port.in_ep->driver_data) + gether_disconnect(&eem->port); +} + +/*-------------------------------------------------------------------------*/ + +/* EEM function driver setup/binding */ + +static int eem_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_eem *eem = func_to_eem(f); + struct usb_string *us; + int status; + struct usb_ep *ep; + + struct f_eem_opts *eem_opts; + + eem_opts = container_of(f->fi, struct f_eem_opts, func_inst); + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to eem_opts->bound access + */ + if (!eem_opts->bound) { + mutex_lock(&eem_opts->lock); + gether_set_gadget(eem_opts->net, cdev->gadget); + status = gether_register_netdev(eem_opts->net); + mutex_unlock(&eem_opts->lock); + if (status) + return status; + eem_opts->bound = true; + } + + us = usb_gstrings_attach(cdev, eem_strings, + ARRAY_SIZE(eem_string_defs)); + if (IS_ERR(us)) + return PTR_ERR(us); + eem_intf.iInterface = us[0].id; + + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + eem->ctrl_id = status; + eem_intf.bInterfaceNumber = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc); + if (!ep) + goto fail; + eem->port.in_ep = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc); + if (!ep) + goto fail; + eem->port.out_ep = ep; + ep->driver_data = cdev; /* claim */ + + status = -ENOMEM; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + eem_hs_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; + eem_hs_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; + + eem_ss_in_desc.bEndpointAddress = eem_fs_in_desc.bEndpointAddress; + eem_ss_out_desc.bEndpointAddress = eem_fs_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, eem_fs_function, eem_hs_function, + eem_ss_function); + if (status) + goto fail; + + DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n", + gadget_is_superspeed(c->cdev->gadget) ? "super" : + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + eem->port.in_ep->name, eem->port.out_ep->name); + return 0; + +fail: + if (eem->port.out_ep) + eem->port.out_ep->driver_data = NULL; + if (eem->port.in_ep) + eem->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); + + return status; +} + +static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct sk_buff *skb = (struct sk_buff *)req->context; + + dev_kfree_skb_any(skb); +} + +/* + * Add the EEM header and ethernet checksum. + * We currently do not attempt to put multiple ethernet frames + * into a single USB transfer + */ +static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb) +{ + struct sk_buff *skb2 = NULL; + struct usb_ep *in = port->in_ep; + int padlen = 0; + u16 len = skb->len; + + int headroom = skb_headroom(skb); + int tailroom = skb_tailroom(skb); + + /* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0, + * stick two bytes of zero-length EEM packet on the end. + */ + if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0) + padlen += 2; + + if ((tailroom >= (ETH_FCS_LEN + padlen)) && + (headroom >= EEM_HLEN) && !skb_cloned(skb)) + goto done; + + skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) + return skb; + +done: + /* use the "no CRC" option */ + put_unaligned_be32(0xdeadbeef, skb_put(skb, 4)); + + /* EEM packet header format: + * b0..13: length of ethernet frame + * b14: bmCRC (0 == sentinel CRC) + * b15: bmType (0 == data) + */ + len = skb->len; + put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2)); + + /* add a zero-length EEM packet, if needed */ + if (padlen) + put_unaligned_le16(0, skb_put(skb, 2)); + + return skb; +} + +/* + * Remove the EEM header. Note that there can be many EEM packets in a single + * USB transfer, so we need to break them out and handle them independently. + */ +static int eem_unwrap(struct gether *port, + struct sk_buff *skb, + struct sk_buff_head *list) +{ + struct usb_composite_dev *cdev = port->func.config->cdev; + int status = 0; + + do { + struct sk_buff *skb2; + u16 header; + u16 len = 0; + + if (skb->len < EEM_HLEN) { + status = -EINVAL; + DBG(cdev, "invalid EEM header\n"); + goto error; + } + + /* remove the EEM header */ + header = get_unaligned_le16(skb->data); + skb_pull(skb, EEM_HLEN); + + /* EEM packet header format: + * b0..14: EEM type dependent (data or command) + * b15: bmType (0 == data, 1 == command) + */ + if (header & BIT(15)) { + struct usb_request *req = cdev->req; + u16 bmEEMCmd; + + /* EEM command packet format: + * b0..10: bmEEMCmdParam + * b11..13: bmEEMCmd + * b14: reserved (must be zero) + * b15: bmType (1 == command) + */ + if (header & BIT(14)) + continue; + + bmEEMCmd = (header >> 11) & 0x7; + switch (bmEEMCmd) { + case 0: /* echo */ + len = header & 0x7FF; + if (skb->len < len) { + status = -EOVERFLOW; + goto error; + } + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) { + DBG(cdev, "EEM echo response error\n"); + goto next; + } + skb_trim(skb2, len); + put_unaligned_le16(BIT(15) | BIT(11) | len, + skb_push(skb2, 2)); + skb_copy_bits(skb2, 0, req->buf, skb2->len); + req->length = skb2->len; + req->complete = eem_cmd_complete; + req->zero = 1; + req->context = skb2; + if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC)) + DBG(cdev, "echo response queue fail\n"); + break; + + case 1: /* echo response */ + case 2: /* suspend hint */ + case 3: /* response hint */ + case 4: /* response complete hint */ + case 5: /* tickle */ + default: /* reserved */ + continue; + } + } else { + u32 crc, crc2; + struct sk_buff *skb3; + + /* check for zero-length EEM packet */ + if (header == 0) + continue; + + /* EEM data packet format: + * b0..13: length of ethernet frame + * b14: bmCRC (0 == sentinel, 1 == calculated) + * b15: bmType (0 == data) + */ + len = header & 0x3FFF; + if ((skb->len < len) + || (len < (ETH_HLEN + ETH_FCS_LEN))) { + status = -EINVAL; + goto error; + } + + /* validate CRC */ + if (header & BIT(14)) { + crc = get_unaligned_le32(skb->data + len + - ETH_FCS_LEN); + crc2 = ~crc32_le(~0, + skb->data, len - ETH_FCS_LEN); + } else { + crc = get_unaligned_be32(skb->data + len + - ETH_FCS_LEN); + crc2 = 0xdeadbeef; + } + if (crc != crc2) { + DBG(cdev, "invalid EEM CRC\n"); + goto next; + } + + skb2 = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!skb2)) { + DBG(cdev, "unable to unframe EEM packet\n"); + continue; + } + skb_trim(skb2, len - ETH_FCS_LEN); + + skb3 = skb_copy_expand(skb2, + NET_IP_ALIGN, + 0, + GFP_ATOMIC); + if (unlikely(!skb3)) { + DBG(cdev, "unable to realign EEM packet\n"); + dev_kfree_skb_any(skb2); + continue; + } + dev_kfree_skb_any(skb2); + skb_queue_tail(list, skb3); + } +next: + skb_pull(skb, len); + } while (skb->len); + +error: + dev_kfree_skb_any(skb); + return status; +} + +static inline struct f_eem_opts *to_f_eem_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_eem_opts, + func_inst.group); +} + +/* f_eem_item_ops */ +USB_ETHERNET_CONFIGFS_ITEM(eem); + +/* f_eem_opts_dev_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(eem); + +/* f_eem_opts_host_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(eem); + +/* f_eem_opts_qmult */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(eem); + +/* f_eem_opts_ifname */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(eem); + +static struct configfs_attribute *eem_attrs[] = { + &f_eem_opts_dev_addr.attr, + &f_eem_opts_host_addr.attr, + &f_eem_opts_qmult.attr, + &f_eem_opts_ifname.attr, + NULL, +}; + +static struct config_item_type eem_func_type = { + .ct_item_ops = &eem_item_ops, + .ct_attrs = eem_attrs, + .ct_owner = THIS_MODULE, +}; + +static void eem_free_inst(struct usb_function_instance *f) +{ + struct f_eem_opts *opts; + + opts = container_of(f, struct f_eem_opts, func_inst); + if (opts->bound) + gether_cleanup(netdev_priv(opts->net)); + else + free_netdev(opts->net); + kfree(opts); +} + +static struct usb_function_instance *eem_alloc_inst(void) +{ + struct f_eem_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = eem_free_inst; + opts->net = gether_setup_default(); + if (IS_ERR(opts->net)) { + struct net_device *net = opts->net; + kfree(opts); + return ERR_CAST(net); + } + + config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type); + + return &opts->func_inst; +} + +static void eem_free(struct usb_function *f) +{ + struct f_eem *eem; + struct f_eem_opts *opts; + + eem = func_to_eem(f); + opts = container_of(f->fi, struct f_eem_opts, func_inst); + kfree(eem); + mutex_lock(&opts->lock); + opts->refcnt--; + mutex_unlock(&opts->lock); +} + +static void eem_unbind(struct usb_configuration *c, struct usb_function *f) +{ + DBG(c->cdev, "eem unbind\n"); + + usb_free_all_descriptors(f); +} + +static struct usb_function *eem_alloc(struct usb_function_instance *fi) +{ + struct f_eem *eem; + struct f_eem_opts *opts; + + /* allocate and initialize one new instance */ + eem = kzalloc(sizeof(*eem), GFP_KERNEL); + if (!eem) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_eem_opts, func_inst); + mutex_lock(&opts->lock); + opts->refcnt++; + + eem->port.ioport = netdev_priv(opts->net); + mutex_unlock(&opts->lock); + eem->port.cdc_filter = DEFAULT_FILTER; + + eem->port.func.name = "cdc_eem"; + /* descriptors are per-instance copies */ + eem->port.func.bind = eem_bind; + eem->port.func.unbind = eem_unbind; + eem->port.func.set_alt = eem_set_alt; + eem->port.func.setup = eem_setup; + eem->port.func.disable = eem_disable; + eem->port.func.free_func = eem_free; + eem->port.wrap = eem_wrap; + eem->port.unwrap = eem_unwrap; + eem->port.header_len = EEM_HLEN; + + return &eem->port.func; +} + +DECLARE_USB_FUNCTION_INIT(eem, eem_alloc_inst, eem_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Brownell"); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c new file mode 100644 index 000000000..6e7be91e6 --- /dev/null +++ b/drivers/usb/gadget/function/f_fs.c @@ -0,0 +1,3503 @@ +/* + * f_fs.c -- user mode file system API for USB composite function controllers + * + * Copyright (C) 2010 Samsung Electronics + * Author: Michal Nazarewicz <mina86@mina86.com> + * + * Based on inode.c (GadgetFS) which was: + * Copyright (C) 2003-2004 David Brownell + * Copyright (C) 2003 Agilent Technologies + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + + +/* #define DEBUG */ +/* #define VERBOSE_DEBUG */ + +#include <linux/blkdev.h> +#include <linux/pagemap.h> +#include <linux/export.h> +#include <linux/hid.h> +#include <linux/module.h> +#include <linux/uio.h> +#include <asm/unaligned.h> + +#include <linux/usb/composite.h> +#include <linux/usb/functionfs.h> + +#include <linux/aio.h> +#include <linux/mmu_context.h> +#include <linux/poll.h> +#include <linux/eventfd.h> + +#include "u_fs.h" +#include "u_f.h" +#include "u_os_desc.h" +#include "configfs.h" + +#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */ + +/* Reference counter handling */ +static void ffs_data_get(struct ffs_data *ffs); +static void ffs_data_put(struct ffs_data *ffs); +/* Creates new ffs_data object. */ +static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); + +/* Opened counter handling. */ +static void ffs_data_opened(struct ffs_data *ffs); +static void ffs_data_closed(struct ffs_data *ffs); + +/* Called with ffs->mutex held; take over ownership of data. */ +static int __must_check +__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); +static int __must_check +__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); + + +/* The function structure ***************************************************/ + +struct ffs_ep; + +struct ffs_function { + struct usb_configuration *conf; + struct usb_gadget *gadget; + struct ffs_data *ffs; + + struct ffs_ep *eps; + u8 eps_revmap[16]; + short *interfaces_nums; + + struct usb_function function; +}; + + +static struct ffs_function *ffs_func_from_usb(struct usb_function *f) +{ + return container_of(f, struct ffs_function, function); +} + + +static inline enum ffs_setup_state +ffs_setup_state_clear_cancelled(struct ffs_data *ffs) +{ + return (enum ffs_setup_state) + cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP); +} + + +static void ffs_func_eps_disable(struct ffs_function *func); +static int __must_check ffs_func_eps_enable(struct ffs_function *func); + +static int ffs_func_bind(struct usb_configuration *, + struct usb_function *); +static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned); +static void ffs_func_disable(struct usb_function *); +static int ffs_func_setup(struct usb_function *, + const struct usb_ctrlrequest *); +static void ffs_func_suspend(struct usb_function *); +static void ffs_func_resume(struct usb_function *); + + +static int ffs_func_revmap_ep(struct ffs_function *func, u8 num); +static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf); + + +/* The endpoints structures *************************************************/ + +struct ffs_ep { + struct usb_ep *ep; /* P: ffs->eps_lock */ + struct usb_request *req; /* P: epfile->mutex */ + + /* [0]: full speed, [1]: high speed, [2]: super speed */ + struct usb_endpoint_descriptor *descs[3]; + + u8 num; + + int status; /* P: epfile->mutex */ +}; + +struct ffs_epfile { + /* Protects ep->ep and ep->req. */ + struct mutex mutex; + wait_queue_head_t wait; + + struct ffs_data *ffs; + struct ffs_ep *ep; /* P: ffs->eps_lock */ + + struct dentry *dentry; + + char name[5]; + + unsigned char in; /* P: ffs->eps_lock */ + unsigned char isoc; /* P: ffs->eps_lock */ + + unsigned char _pad; +}; + +/* ffs_io_data structure ***************************************************/ + +struct ffs_io_data { + bool aio; + bool read; + + struct kiocb *kiocb; + struct iov_iter data; + const void *to_free; + char *buf; + + struct mm_struct *mm; + struct work_struct work; + + struct usb_ep *ep; + struct usb_request *req; + + struct ffs_data *ffs; +}; + +struct ffs_desc_helper { + struct ffs_data *ffs; + unsigned interfaces_count; + unsigned eps_count; +}; + +static int __must_check ffs_epfiles_create(struct ffs_data *ffs); +static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); + +static struct dentry * +ffs_sb_create_file(struct super_block *sb, const char *name, void *data, + const struct file_operations *fops); + +/* Devices management *******************************************************/ + +DEFINE_MUTEX(ffs_lock); +EXPORT_SYMBOL_GPL(ffs_lock); + +static struct ffs_dev *_ffs_find_dev(const char *name); +static struct ffs_dev *_ffs_alloc_dev(void); +static int _ffs_name_dev(struct ffs_dev *dev, const char *name); +static void _ffs_free_dev(struct ffs_dev *dev); +static void *ffs_acquire_dev(const char *dev_name); +static void ffs_release_dev(struct ffs_data *ffs_data); +static int ffs_ready(struct ffs_data *ffs); +static void ffs_closed(struct ffs_data *ffs); + +/* Misc helper functions ****************************************************/ + +static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) + __attribute__((warn_unused_result, nonnull)); +static char *ffs_prepare_buffer(const char __user *buf, size_t len) + __attribute__((warn_unused_result, nonnull)); + + +/* Control file aka ep0 *****************************************************/ + +static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct ffs_data *ffs = req->context; + + complete_all(&ffs->ep0req_completion); +} + +static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len) +{ + struct usb_request *req = ffs->ep0req; + int ret; + + req->zero = len < le16_to_cpu(ffs->ev.setup.wLength); + + spin_unlock_irq(&ffs->ev.waitq.lock); + + req->buf = data; + req->length = len; + + /* + * UDC layer requires to provide a buffer even for ZLP, but should + * not use it at all. Let's provide some poisoned pointer to catch + * possible bug in the driver. + */ + if (req->buf == NULL) + req->buf = (void *)0xDEADBABE; + + reinit_completion(&ffs->ep0req_completion); + + ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC); + if (unlikely(ret < 0)) + return ret; + + ret = wait_for_completion_interruptible(&ffs->ep0req_completion); + if (unlikely(ret)) { + usb_ep_dequeue(ffs->gadget->ep0, req); + return -EINTR; + } + + ffs->setup_state = FFS_NO_SETUP; + return req->status ? req->status : req->actual; +} + +static int __ffs_ep0_stall(struct ffs_data *ffs) +{ + if (ffs->ev.can_stall) { + pr_vdebug("ep0 stall\n"); + usb_ep_set_halt(ffs->gadget->ep0); + ffs->setup_state = FFS_NO_SETUP; + return -EL2HLT; + } else { + pr_debug("bogus ep0 stall!\n"); + return -ESRCH; + } +} + +static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, + size_t len, loff_t *ptr) +{ + struct ffs_data *ffs = file->private_data; + ssize_t ret; + char *data; + + ENTER(); + + /* Fast check if setup was canceled */ + if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) + return -EIDRM; + + /* Acquire mutex */ + ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); + if (unlikely(ret < 0)) + return ret; + + /* Check state */ + switch (ffs->state) { + case FFS_READ_DESCRIPTORS: + case FFS_READ_STRINGS: + /* Copy data */ + if (unlikely(len < 16)) { + ret = -EINVAL; + break; + } + + data = ffs_prepare_buffer(buf, len); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + break; + } + + /* Handle data */ + if (ffs->state == FFS_READ_DESCRIPTORS) { + pr_info("read descriptors\n"); + ret = __ffs_data_got_descs(ffs, data, len); + if (unlikely(ret < 0)) + break; + + ffs->state = FFS_READ_STRINGS; + ret = len; + } else { + pr_info("read strings\n"); + ret = __ffs_data_got_strings(ffs, data, len); + if (unlikely(ret < 0)) + break; + + ret = ffs_epfiles_create(ffs); + if (unlikely(ret)) { + ffs->state = FFS_CLOSING; + break; + } + + ffs->state = FFS_ACTIVE; + mutex_unlock(&ffs->mutex); + + ret = ffs_ready(ffs); + if (unlikely(ret < 0)) { + ffs->state = FFS_CLOSING; + return ret; + } + + return len; + } + break; + + case FFS_ACTIVE: + data = NULL; + /* + * We're called from user space, we can use _irq + * rather then _irqsave + */ + spin_lock_irq(&ffs->ev.waitq.lock); + switch (ffs_setup_state_clear_cancelled(ffs)) { + case FFS_SETUP_CANCELLED: + ret = -EIDRM; + goto done_spin; + + case FFS_NO_SETUP: + ret = -ESRCH; + goto done_spin; + + case FFS_SETUP_PENDING: + break; + } + + /* FFS_SETUP_PENDING */ + if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) { + spin_unlock_irq(&ffs->ev.waitq.lock); + ret = __ffs_ep0_stall(ffs); + break; + } + + /* FFS_SETUP_PENDING and not stall */ + len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); + + spin_unlock_irq(&ffs->ev.waitq.lock); + + data = ffs_prepare_buffer(buf, len); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + break; + } + + spin_lock_irq(&ffs->ev.waitq.lock); + + /* + * We are guaranteed to be still in FFS_ACTIVE state + * but the state of setup could have changed from + * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need + * to check for that. If that happened we copied data + * from user space in vain but it's unlikely. + * + * For sure we are not in FFS_NO_SETUP since this is + * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP + * transition can be performed and it's protected by + * mutex. + */ + if (ffs_setup_state_clear_cancelled(ffs) == + FFS_SETUP_CANCELLED) { + ret = -EIDRM; +done_spin: + spin_unlock_irq(&ffs->ev.waitq.lock); + } else { + /* unlocks spinlock */ + ret = __ffs_ep0_queue_wait(ffs, data, len); + } + kfree(data); + break; + + default: + ret = -EBADFD; + break; + } + + mutex_unlock(&ffs->mutex); + return ret; +} + +/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */ +static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf, + size_t n) +{ + /* + * n cannot be bigger than ffs->ev.count, which cannot be bigger than + * size of ffs->ev.types array (which is four) so that's how much space + * we reserve. + */ + struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)]; + const size_t size = n * sizeof *events; + unsigned i = 0; + + memset(events, 0, size); + + do { + events[i].type = ffs->ev.types[i]; + if (events[i].type == FUNCTIONFS_SETUP) { + events[i].u.setup = ffs->ev.setup; + ffs->setup_state = FFS_SETUP_PENDING; + } + } while (++i < n); + + ffs->ev.count -= n; + if (ffs->ev.count) + memmove(ffs->ev.types, ffs->ev.types + n, + ffs->ev.count * sizeof *ffs->ev.types); + + spin_unlock_irq(&ffs->ev.waitq.lock); + mutex_unlock(&ffs->mutex); + + return unlikely(__copy_to_user(buf, events, size)) ? -EFAULT : size; +} + +static ssize_t ffs_ep0_read(struct file *file, char __user *buf, + size_t len, loff_t *ptr) +{ + struct ffs_data *ffs = file->private_data; + char *data = NULL; + size_t n; + int ret; + + ENTER(); + + /* Fast check if setup was canceled */ + if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED) + return -EIDRM; + + /* Acquire mutex */ + ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); + if (unlikely(ret < 0)) + return ret; + + /* Check state */ + if (ffs->state != FFS_ACTIVE) { + ret = -EBADFD; + goto done_mutex; + } + + /* + * We're called from user space, we can use _irq rather then + * _irqsave + */ + spin_lock_irq(&ffs->ev.waitq.lock); + + switch (ffs_setup_state_clear_cancelled(ffs)) { + case FFS_SETUP_CANCELLED: + ret = -EIDRM; + break; + + case FFS_NO_SETUP: + n = len / sizeof(struct usb_functionfs_event); + if (unlikely(!n)) { + ret = -EINVAL; + break; + } + + if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) { + ret = -EAGAIN; + break; + } + + if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq, + ffs->ev.count)) { + ret = -EINTR; + break; + } + + return __ffs_ep0_read_events(ffs, buf, + min(n, (size_t)ffs->ev.count)); + + case FFS_SETUP_PENDING: + if (ffs->ev.setup.bRequestType & USB_DIR_IN) { + spin_unlock_irq(&ffs->ev.waitq.lock); + ret = __ffs_ep0_stall(ffs); + goto done_mutex; + } + + len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength)); + + spin_unlock_irq(&ffs->ev.waitq.lock); + + if (likely(len)) { + data = kmalloc(len, GFP_KERNEL); + if (unlikely(!data)) { + ret = -ENOMEM; + goto done_mutex; + } + } + + spin_lock_irq(&ffs->ev.waitq.lock); + + /* See ffs_ep0_write() */ + if (ffs_setup_state_clear_cancelled(ffs) == + FFS_SETUP_CANCELLED) { + ret = -EIDRM; + break; + } + + /* unlocks spinlock */ + ret = __ffs_ep0_queue_wait(ffs, data, len); + if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len))) + ret = -EFAULT; + goto done_mutex; + + default: + ret = -EBADFD; + break; + } + + spin_unlock_irq(&ffs->ev.waitq.lock); +done_mutex: + mutex_unlock(&ffs->mutex); + kfree(data); + return ret; +} + +static int ffs_ep0_open(struct inode *inode, struct file *file) +{ + struct ffs_data *ffs = inode->i_private; + + ENTER(); + + if (unlikely(ffs->state == FFS_CLOSING)) + return -EBUSY; + + file->private_data = ffs; + ffs_data_opened(ffs); + + return 0; +} + +static int ffs_ep0_release(struct inode *inode, struct file *file) +{ + struct ffs_data *ffs = file->private_data; + + ENTER(); + + ffs_data_closed(ffs); + + return 0; +} + +static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) +{ + struct ffs_data *ffs = file->private_data; + struct usb_gadget *gadget = ffs->gadget; + long ret; + + ENTER(); + + if (code == FUNCTIONFS_INTERFACE_REVMAP) { + struct ffs_function *func = ffs->func; + ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; + } else if (gadget && gadget->ops->ioctl) { + ret = gadget->ops->ioctl(gadget, code, value); + } else { + ret = -ENOTTY; + } + + return ret; +} + +static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait) +{ + struct ffs_data *ffs = file->private_data; + unsigned int mask = POLLWRNORM; + int ret; + + poll_wait(file, &ffs->ev.waitq, wait); + + ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK); + if (unlikely(ret < 0)) + return mask; + + switch (ffs->state) { + case FFS_READ_DESCRIPTORS: + case FFS_READ_STRINGS: + mask |= POLLOUT; + break; + + case FFS_ACTIVE: + switch (ffs->setup_state) { + case FFS_NO_SETUP: + if (ffs->ev.count) + mask |= POLLIN; + break; + + case FFS_SETUP_PENDING: + case FFS_SETUP_CANCELLED: + mask |= (POLLIN | POLLOUT); + break; + } + case FFS_CLOSING: + break; + case FFS_DEACTIVATED: + break; + } + + mutex_unlock(&ffs->mutex); + + return mask; +} + +static const struct file_operations ffs_ep0_operations = { + .llseek = no_llseek, + + .open = ffs_ep0_open, + .write = ffs_ep0_write, + .read = ffs_ep0_read, + .release = ffs_ep0_release, + .unlocked_ioctl = ffs_ep0_ioctl, + .poll = ffs_ep0_poll, +}; + + +/* "Normal" endpoints operations ********************************************/ + +static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) +{ + ENTER(); + if (likely(req->context)) { + struct ffs_ep *ep = _ep->driver_data; + ep->status = req->status ? req->status : req->actual; + complete(req->context); + } +} + +static void ffs_user_copy_worker(struct work_struct *work) +{ + struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, + work); + int ret = io_data->req->status ? io_data->req->status : + io_data->req->actual; + + if (io_data->read && ret > 0) { + use_mm(io_data->mm); + ret = copy_to_iter(io_data->buf, ret, &io_data->data); + if (iov_iter_count(&io_data->data)) + ret = -EFAULT; + unuse_mm(io_data->mm); + } + + io_data->kiocb->ki_complete(io_data->kiocb, ret, ret); + + if (io_data->ffs->ffs_eventfd && + !(io_data->kiocb->ki_flags & IOCB_EVENTFD)) + eventfd_signal(io_data->ffs->ffs_eventfd, 1); + + usb_ep_free_request(io_data->ep, io_data->req); + + io_data->kiocb->private = NULL; + if (io_data->read) + kfree(io_data->to_free); + kfree(io_data->buf); + kfree(io_data); +} + +static void ffs_epfile_async_io_complete(struct usb_ep *_ep, + struct usb_request *req) +{ + struct ffs_io_data *io_data = req->context; + + ENTER(); + + INIT_WORK(&io_data->work, ffs_user_copy_worker); + schedule_work(&io_data->work); +} + +static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) +{ + struct ffs_epfile *epfile = file->private_data; + struct ffs_ep *ep; + char *data = NULL; + ssize_t ret, data_len = -EINVAL; + int halt; + + /* Are we still active? */ + if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) { + ret = -ENODEV; + goto error; + } + + /* Wait for endpoint to be enabled */ + ep = epfile->ep; + if (!ep) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + goto error; + } + + ret = wait_event_interruptible(epfile->wait, (ep = epfile->ep)); + if (ret) { + ret = -EINTR; + goto error; + } + } + + /* Do we halt? */ + halt = (!io_data->read == !epfile->in); + if (halt && epfile->isoc) { + ret = -EINVAL; + goto error; + } + + /* Allocate & copy */ + if (!halt) { + /* + * if we _do_ wait above, the epfile->ffs->gadget might be NULL + * before the waiting completes, so do not assign to 'gadget' earlier + */ + struct usb_gadget *gadget = epfile->ffs->gadget; + size_t copied; + + spin_lock_irq(&epfile->ffs->eps_lock); + /* In the meantime, endpoint got disabled or changed. */ + if (epfile->ep != ep) { + spin_unlock_irq(&epfile->ffs->eps_lock); + return -ESHUTDOWN; + } + data_len = iov_iter_count(&io_data->data); + /* + * Controller may require buffer size to be aligned to + * maxpacketsize of an out endpoint. + */ + if (io_data->read) + data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); + spin_unlock_irq(&epfile->ffs->eps_lock); + + data = kmalloc(data_len, GFP_KERNEL); + if (unlikely(!data)) + return -ENOMEM; + if (!io_data->read) { + copied = copy_from_iter(data, data_len, &io_data->data); + if (copied != data_len) { + ret = -EFAULT; + goto error; + } + } + } + + /* We will be using request */ + ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK); + if (unlikely(ret)) + goto error; + + spin_lock_irq(&epfile->ffs->eps_lock); + + if (epfile->ep != ep) { + /* In the meantime, endpoint got disabled or changed. */ + ret = -ESHUTDOWN; + spin_unlock_irq(&epfile->ffs->eps_lock); + } else if (halt) { + /* Halt */ + if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep)) + usb_ep_set_halt(ep->ep); + spin_unlock_irq(&epfile->ffs->eps_lock); + ret = -EBADMSG; + } else { + /* Fire the request */ + struct usb_request *req; + + /* + * Sanity Check: even though data_len can't be used + * uninitialized at the time I write this comment, some + * compilers complain about this situation. + * In order to keep the code clean from warnings, data_len is + * being initialized to -EINVAL during its declaration, which + * means we can't rely on compiler anymore to warn no future + * changes won't result in data_len being used uninitialized. + * For such reason, we're adding this redundant sanity check + * here. + */ + if (unlikely(data_len == -EINVAL)) { + WARN(1, "%s: data_len == -EINVAL\n", __func__); + ret = -EINVAL; + goto error_lock; + } + + if (io_data->aio) { + req = usb_ep_alloc_request(ep->ep, GFP_KERNEL); + if (unlikely(!req)) + goto error_lock; + + req->buf = data; + req->length = data_len; + + io_data->buf = data; + io_data->ep = ep->ep; + io_data->req = req; + io_data->ffs = epfile->ffs; + + req->context = io_data; + req->complete = ffs_epfile_async_io_complete; + + ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); + if (unlikely(ret)) { + usb_ep_free_request(ep->ep, req); + goto error_lock; + } + ret = -EIOCBQUEUED; + + spin_unlock_irq(&epfile->ffs->eps_lock); + } else { + DECLARE_COMPLETION_ONSTACK(done); + + req = ep->req; + req->buf = data; + req->length = data_len; + + req->context = &done; + req->complete = ffs_epfile_io_complete; + + ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); + + spin_unlock_irq(&epfile->ffs->eps_lock); + + if (unlikely(ret < 0)) { + /* nop */ + } else if (unlikely( + wait_for_completion_interruptible(&done))) { + ret = -EINTR; + usb_ep_dequeue(ep->ep, req); + } else { + /* + * XXX We may end up silently droping data + * here. Since data_len (i.e. req->length) may + * be bigger than len (after being rounded up + * to maxpacketsize), we may end up with more + * data then user space has space for. + */ + ret = ep->status; + if (io_data->read && ret > 0) { + ret = copy_to_iter(data, ret, &io_data->data); + if (!ret) + ret = -EFAULT; + } + } + kfree(data); + } + } + + mutex_unlock(&epfile->mutex); + return ret; + +error_lock: + spin_unlock_irq(&epfile->ffs->eps_lock); + mutex_unlock(&epfile->mutex); +error: + kfree(data); + return ret; +} + +static int +ffs_epfile_open(struct inode *inode, struct file *file) +{ + struct ffs_epfile *epfile = inode->i_private; + + ENTER(); + + if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) + return -ENODEV; + + file->private_data = epfile; + ffs_data_opened(epfile->ffs); + + return 0; +} + +static int ffs_aio_cancel(struct kiocb *kiocb) +{ + struct ffs_io_data *io_data = kiocb->private; + struct ffs_epfile *epfile = kiocb->ki_filp->private_data; + int value; + + ENTER(); + + spin_lock_irq(&epfile->ffs->eps_lock); + + if (likely(io_data && io_data->ep && io_data->req)) + value = usb_ep_dequeue(io_data->ep, io_data->req); + else + value = -EINVAL; + + spin_unlock_irq(&epfile->ffs->eps_lock); + + return value; +} + +static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) +{ + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; + + ENTER(); + + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; + } + + p->read = false; + p->kiocb = kiocb; + p->data = *from; + p->mm = current->mm; + + kiocb->private = p; + + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + if (p->aio) + kfree(p); + else + *from = p->data; + return res; +} + +static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) +{ + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; + + ENTER(); + + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; + } + + p->read = true; + p->kiocb = kiocb; + if (p->aio) { + p->to_free = dup_iter(&p->data, to, GFP_KERNEL); + if (!p->to_free) { + kfree(p); + return -ENOMEM; + } + } else { + p->data = *to; + p->to_free = NULL; + } + p->mm = current->mm; + + kiocb->private = p; + + if (p->aio) + kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); + + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + + if (p->aio) { + kfree(p->to_free); + kfree(p); + } else { + *to = p->data; + } + return res; +} + +static int +ffs_epfile_release(struct inode *inode, struct file *file) +{ + struct ffs_epfile *epfile = inode->i_private; + + ENTER(); + + ffs_data_closed(epfile->ffs); + + return 0; +} + +static long ffs_epfile_ioctl(struct file *file, unsigned code, + unsigned long value) +{ + struct ffs_epfile *epfile = file->private_data; + int ret; + + ENTER(); + + if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) + return -ENODEV; + + spin_lock_irq(&epfile->ffs->eps_lock); + if (likely(epfile->ep)) { + switch (code) { + case FUNCTIONFS_FIFO_STATUS: + ret = usb_ep_fifo_status(epfile->ep->ep); + break; + case FUNCTIONFS_FIFO_FLUSH: + usb_ep_fifo_flush(epfile->ep->ep); + ret = 0; + break; + case FUNCTIONFS_CLEAR_HALT: + ret = usb_ep_clear_halt(epfile->ep->ep); + break; + case FUNCTIONFS_ENDPOINT_REVMAP: + ret = epfile->ep->num; + break; + case FUNCTIONFS_ENDPOINT_DESC: + { + int desc_idx; + struct usb_endpoint_descriptor *desc; + + switch (epfile->ffs->gadget->speed) { + case USB_SPEED_SUPER: + desc_idx = 2; + break; + case USB_SPEED_HIGH: + desc_idx = 1; + break; + default: + desc_idx = 0; + } + desc = epfile->ep->descs[desc_idx]; + + spin_unlock_irq(&epfile->ffs->eps_lock); + ret = copy_to_user((void *)value, desc, sizeof(*desc)); + if (ret) + ret = -EFAULT; + return ret; + } + default: + ret = -ENOTTY; + } + } else { + ret = -ENODEV; + } + spin_unlock_irq(&epfile->ffs->eps_lock); + + return ret; +} + +static const struct file_operations ffs_epfile_operations = { + .llseek = no_llseek, + + .open = ffs_epfile_open, + .write_iter = ffs_epfile_write_iter, + .read_iter = ffs_epfile_read_iter, + .release = ffs_epfile_release, + .unlocked_ioctl = ffs_epfile_ioctl, +}; + + +/* File system and super block operations ***********************************/ + +/* + * Mounting the file system creates a controller file, used first for + * function configuration then later for event monitoring. + */ + +static struct inode *__must_check +ffs_sb_make_inode(struct super_block *sb, void *data, + const struct file_operations *fops, + const struct inode_operations *iops, + struct ffs_file_perms *perms) +{ + struct inode *inode; + + ENTER(); + + inode = new_inode(sb); + + if (likely(inode)) { + struct timespec current_time = CURRENT_TIME; + + inode->i_ino = get_next_ino(); + inode->i_mode = perms->mode; + inode->i_uid = perms->uid; + inode->i_gid = perms->gid; + inode->i_atime = current_time; + inode->i_mtime = current_time; + inode->i_ctime = current_time; + inode->i_private = data; + if (fops) + inode->i_fop = fops; + if (iops) + inode->i_op = iops; + } + + return inode; +} + +/* Create "regular" file */ +static struct dentry *ffs_sb_create_file(struct super_block *sb, + const char *name, void *data, + const struct file_operations *fops) +{ + struct ffs_data *ffs = sb->s_fs_info; + struct dentry *dentry; + struct inode *inode; + + ENTER(); + + dentry = d_alloc_name(sb->s_root, name); + if (unlikely(!dentry)) + return NULL; + + inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms); + if (unlikely(!inode)) { + dput(dentry); + return NULL; + } + + d_add(dentry, inode); + return dentry; +} + +/* Super block */ +static const struct super_operations ffs_sb_operations = { + .statfs = simple_statfs, + .drop_inode = generic_delete_inode, +}; + +struct ffs_sb_fill_data { + struct ffs_file_perms perms; + umode_t root_mode; + const char *dev_name; + bool no_disconnect; + struct ffs_data *ffs_data; +}; + +static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) +{ + struct ffs_sb_fill_data *data = _data; + struct inode *inode; + struct ffs_data *ffs = data->ffs_data; + + ENTER(); + + ffs->sb = sb; + data->ffs_data = NULL; + sb->s_fs_info = ffs; + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_magic = FUNCTIONFS_MAGIC; + sb->s_op = &ffs_sb_operations; + sb->s_time_gran = 1; + + /* Root inode */ + data->perms.mode = data->root_mode; + inode = ffs_sb_make_inode(sb, NULL, + &simple_dir_operations, + &simple_dir_inode_operations, + &data->perms); + sb->s_root = d_make_root(inode); + if (unlikely(!sb->s_root)) + return -ENOMEM; + + /* EP0 file */ + if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs, + &ffs_ep0_operations))) + return -ENOMEM; + + return 0; +} + +static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts) +{ + ENTER(); + + if (!opts || !*opts) + return 0; + + for (;;) { + unsigned long value; + char *eq, *comma; + + /* Option limit */ + comma = strchr(opts, ','); + if (comma) + *comma = 0; + + /* Value limit */ + eq = strchr(opts, '='); + if (unlikely(!eq)) { + pr_err("'=' missing in %s\n", opts); + return -EINVAL; + } + *eq = 0; + + /* Parse value */ + if (kstrtoul(eq + 1, 0, &value)) { + pr_err("%s: invalid value: %s\n", opts, eq + 1); + return -EINVAL; + } + + /* Interpret option */ + switch (eq - opts) { + case 13: + if (!memcmp(opts, "no_disconnect", 13)) + data->no_disconnect = !!value; + else + goto invalid; + break; + case 5: + if (!memcmp(opts, "rmode", 5)) + data->root_mode = (value & 0555) | S_IFDIR; + else if (!memcmp(opts, "fmode", 5)) + data->perms.mode = (value & 0666) | S_IFREG; + else + goto invalid; + break; + + case 4: + if (!memcmp(opts, "mode", 4)) { + data->root_mode = (value & 0555) | S_IFDIR; + data->perms.mode = (value & 0666) | S_IFREG; + } else { + goto invalid; + } + break; + + case 3: + if (!memcmp(opts, "uid", 3)) { + data->perms.uid = make_kuid(current_user_ns(), value); + if (!uid_valid(data->perms.uid)) { + pr_err("%s: unmapped value: %lu\n", opts, value); + return -EINVAL; + } + } else if (!memcmp(opts, "gid", 3)) { + data->perms.gid = make_kgid(current_user_ns(), value); + if (!gid_valid(data->perms.gid)) { + pr_err("%s: unmapped value: %lu\n", opts, value); + return -EINVAL; + } + } else { + goto invalid; + } + break; + + default: +invalid: + pr_err("%s: invalid option\n", opts); + return -EINVAL; + } + + /* Next iteration */ + if (!comma) + break; + opts = comma + 1; + } + + return 0; +} + +/* "mount -t functionfs dev_name /dev/function" ends up here */ + +static struct dentry * +ffs_fs_mount(struct file_system_type *t, int flags, + const char *dev_name, void *opts) +{ + struct ffs_sb_fill_data data = { + .perms = { + .mode = S_IFREG | 0600, + .uid = GLOBAL_ROOT_UID, + .gid = GLOBAL_ROOT_GID, + }, + .root_mode = S_IFDIR | 0500, + .no_disconnect = false, + }; + struct dentry *rv; + int ret; + void *ffs_dev; + struct ffs_data *ffs; + + ENTER(); + + ret = ffs_fs_parse_opts(&data, opts); + if (unlikely(ret < 0)) + return ERR_PTR(ret); + + ffs = ffs_data_new(); + if (unlikely(!ffs)) + return ERR_PTR(-ENOMEM); + ffs->file_perms = data.perms; + ffs->no_disconnect = data.no_disconnect; + + ffs->dev_name = kstrdup(dev_name, GFP_KERNEL); + if (unlikely(!ffs->dev_name)) { + ffs_data_put(ffs); + return ERR_PTR(-ENOMEM); + } + + ffs_dev = ffs_acquire_dev(dev_name); + if (IS_ERR(ffs_dev)) { + ffs_data_put(ffs); + return ERR_CAST(ffs_dev); + } + ffs->private_data = ffs_dev; + data.ffs_data = ffs; + + rv = mount_nodev(t, flags, &data, ffs_sb_fill); + if (IS_ERR(rv) && data.ffs_data) { + ffs_release_dev(data.ffs_data); + ffs_data_put(data.ffs_data); + } + return rv; +} + +static void +ffs_fs_kill_sb(struct super_block *sb) +{ + ENTER(); + + kill_litter_super(sb); + if (sb->s_fs_info) { + ffs_release_dev(sb->s_fs_info); + ffs_data_closed(sb->s_fs_info); + ffs_data_put(sb->s_fs_info); + } +} + +static struct file_system_type ffs_fs_type = { + .owner = THIS_MODULE, + .name = "functionfs", + .mount = ffs_fs_mount, + .kill_sb = ffs_fs_kill_sb, +}; +MODULE_ALIAS_FS("functionfs"); + + +/* Driver's main init/cleanup functions *************************************/ + +static int functionfs_init(void) +{ + int ret; + + ENTER(); + + ret = register_filesystem(&ffs_fs_type); + if (likely(!ret)) + pr_info("file system registered\n"); + else + pr_err("failed registering file system (%d)\n", ret); + + return ret; +} + +static void functionfs_cleanup(void) +{ + ENTER(); + + pr_info("unloading\n"); + unregister_filesystem(&ffs_fs_type); +} + + +/* ffs_data and ffs_function construction and destruction code **************/ + +static void ffs_data_clear(struct ffs_data *ffs); +static void ffs_data_reset(struct ffs_data *ffs); + +static void ffs_data_get(struct ffs_data *ffs) +{ + ENTER(); + + atomic_inc(&ffs->ref); +} + +static void ffs_data_opened(struct ffs_data *ffs) +{ + ENTER(); + + atomic_inc(&ffs->ref); + if (atomic_add_return(1, &ffs->opened) == 1 && + ffs->state == FFS_DEACTIVATED) { + ffs->state = FFS_CLOSING; + ffs_data_reset(ffs); + } +} + +static void ffs_data_put(struct ffs_data *ffs) +{ + ENTER(); + + if (unlikely(atomic_dec_and_test(&ffs->ref))) { + pr_info("%s(): freeing\n", __func__); + ffs_data_clear(ffs); + BUG_ON(waitqueue_active(&ffs->ev.waitq) || + waitqueue_active(&ffs->ep0req_completion.wait)); + kfree(ffs->dev_name); + kfree(ffs); + } +} + +static void ffs_data_closed(struct ffs_data *ffs) +{ + ENTER(); + + if (atomic_dec_and_test(&ffs->opened)) { + if (ffs->no_disconnect) { + ffs->state = FFS_DEACTIVATED; + if (ffs->epfiles) { + ffs_epfiles_destroy(ffs->epfiles, + ffs->eps_count); + ffs->epfiles = NULL; + } + if (ffs->setup_state == FFS_SETUP_PENDING) + __ffs_ep0_stall(ffs); + } else { + ffs->state = FFS_CLOSING; + ffs_data_reset(ffs); + } + } + if (atomic_read(&ffs->opened) < 0) { + ffs->state = FFS_CLOSING; + ffs_data_reset(ffs); + } + + ffs_data_put(ffs); +} + +static struct ffs_data *ffs_data_new(void) +{ + struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); + if (unlikely(!ffs)) + return NULL; + + ENTER(); + + atomic_set(&ffs->ref, 1); + atomic_set(&ffs->opened, 0); + ffs->state = FFS_READ_DESCRIPTORS; + mutex_init(&ffs->mutex); + spin_lock_init(&ffs->eps_lock); + init_waitqueue_head(&ffs->ev.waitq); + init_completion(&ffs->ep0req_completion); + + /* XXX REVISIT need to update it in some places, or do we? */ + ffs->ev.can_stall = 1; + + return ffs; +} + +static void ffs_data_clear(struct ffs_data *ffs) +{ + ENTER(); + + ffs_closed(ffs); + + BUG_ON(ffs->gadget); + + if (ffs->epfiles) + ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + + if (ffs->ffs_eventfd) + eventfd_ctx_put(ffs->ffs_eventfd); + + kfree(ffs->raw_descs_data); + kfree(ffs->raw_strings); + kfree(ffs->stringtabs); +} + +static void ffs_data_reset(struct ffs_data *ffs) +{ + ENTER(); + + ffs_data_clear(ffs); + + ffs->epfiles = NULL; + ffs->raw_descs_data = NULL; + ffs->raw_descs = NULL; + ffs->raw_strings = NULL; + ffs->stringtabs = NULL; + + ffs->raw_descs_length = 0; + ffs->fs_descs_count = 0; + ffs->hs_descs_count = 0; + ffs->ss_descs_count = 0; + + ffs->strings_count = 0; + ffs->interfaces_count = 0; + ffs->eps_count = 0; + + ffs->ev.count = 0; + + ffs->state = FFS_READ_DESCRIPTORS; + ffs->setup_state = FFS_NO_SETUP; + ffs->flags = 0; +} + + +static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) +{ + struct usb_gadget_strings **lang; + int first_id; + + ENTER(); + + if (WARN_ON(ffs->state != FFS_ACTIVE + || test_and_set_bit(FFS_FL_BOUND, &ffs->flags))) + return -EBADFD; + + first_id = usb_string_ids_n(cdev, ffs->strings_count); + if (unlikely(first_id < 0)) + return first_id; + + ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); + if (unlikely(!ffs->ep0req)) + return -ENOMEM; + ffs->ep0req->complete = ffs_ep0_complete; + ffs->ep0req->context = ffs; + + lang = ffs->stringtabs; + if (lang) { + for (; *lang; ++lang) { + struct usb_string *str = (*lang)->strings; + int id = first_id; + for (; str->s; ++id, ++str) + str->id = id; + } + } + + ffs->gadget = cdev->gadget; + ffs_data_get(ffs); + return 0; +} + +static void functionfs_unbind(struct ffs_data *ffs) +{ + ENTER(); + + if (!WARN_ON(!ffs->gadget)) { + usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req); + ffs->ep0req = NULL; + ffs->gadget = NULL; + clear_bit(FFS_FL_BOUND, &ffs->flags); + ffs_data_put(ffs); + } +} + +static int ffs_epfiles_create(struct ffs_data *ffs) +{ + struct ffs_epfile *epfile, *epfiles; + unsigned i, count; + + ENTER(); + + count = ffs->eps_count; + epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL); + if (!epfiles) + return -ENOMEM; + + epfile = epfiles; + for (i = 1; i <= count; ++i, ++epfile) { + epfile->ffs = ffs; + mutex_init(&epfile->mutex); + init_waitqueue_head(&epfile->wait); + if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) + sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]); + else + sprintf(epfile->name, "ep%u", i); + epfile->dentry = ffs_sb_create_file(ffs->sb, epfile->name, + epfile, + &ffs_epfile_operations); + if (unlikely(!epfile->dentry)) { + ffs_epfiles_destroy(epfiles, i - 1); + return -ENOMEM; + } + } + + ffs->epfiles = epfiles; + return 0; +} + +static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) +{ + struct ffs_epfile *epfile = epfiles; + + ENTER(); + + for (; count; --count, ++epfile) { + BUG_ON(mutex_is_locked(&epfile->mutex) || + waitqueue_active(&epfile->wait)); + if (epfile->dentry) { + d_delete(epfile->dentry); + dput(epfile->dentry); + epfile->dentry = NULL; + } + } + + kfree(epfiles); +} + +static void ffs_func_eps_disable(struct ffs_function *func) +{ + struct ffs_ep *ep = func->eps; + struct ffs_epfile *epfile = func->ffs->epfiles; + unsigned count = func->ffs->eps_count; + unsigned long flags; + + spin_lock_irqsave(&func->ffs->eps_lock, flags); + do { + /* pending requests get nuked */ + if (likely(ep->ep)) + usb_ep_disable(ep->ep); + ++ep; + + if (epfile) { + epfile->ep = NULL; + ++epfile; + } + } while (--count); + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); +} + +static int ffs_func_eps_enable(struct ffs_function *func) +{ + struct ffs_data *ffs = func->ffs; + struct ffs_ep *ep = func->eps; + struct ffs_epfile *epfile = ffs->epfiles; + unsigned count = ffs->eps_count; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&func->ffs->eps_lock, flags); + do { + struct usb_endpoint_descriptor *ds; + int desc_idx; + + if (ffs->gadget->speed == USB_SPEED_SUPER) + desc_idx = 2; + else if (ffs->gadget->speed == USB_SPEED_HIGH) + desc_idx = 1; + else + desc_idx = 0; + + /* fall-back to lower speed if desc missing for current speed */ + do { + ds = ep->descs[desc_idx]; + } while (!ds && --desc_idx >= 0); + + if (!ds) { + ret = -EINVAL; + break; + } + + ep->ep->driver_data = ep; + ep->ep->desc = ds; + ret = usb_ep_enable(ep->ep); + if (likely(!ret)) { + epfile->ep = ep; + epfile->in = usb_endpoint_dir_in(ds); + epfile->isoc = usb_endpoint_xfer_isoc(ds); + } else { + break; + } + + wake_up(&epfile->wait); + + ++ep; + ++epfile; + } while (--count); + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); + + return ret; +} + + +/* Parsing and building descriptors and strings *****************************/ + +/* + * This validates if data pointed by data is a valid USB descriptor as + * well as record how many interfaces, endpoints and strings are + * required by given configuration. Returns address after the + * descriptor or NULL if data is invalid. + */ + +enum ffs_entity_type { + FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT +}; + +enum ffs_os_desc_type { + FFS_OS_DESC, FFS_OS_DESC_EXT_COMPAT, FFS_OS_DESC_EXT_PROP +}; + +typedef int (*ffs_entity_callback)(enum ffs_entity_type entity, + u8 *valuep, + struct usb_descriptor_header *desc, + void *priv); + +typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity, + struct usb_os_desc_header *h, void *data, + unsigned len, void *priv); + +static int __must_check ffs_do_single_desc(char *data, unsigned len, + ffs_entity_callback entity, + void *priv) +{ + struct usb_descriptor_header *_ds = (void *)data; + u8 length; + int ret; + + ENTER(); + + /* At least two bytes are required: length and type */ + if (len < 2) { + pr_vdebug("descriptor too short\n"); + return -EINVAL; + } + + /* If we have at least as many bytes as the descriptor takes? */ + length = _ds->bLength; + if (len < length) { + pr_vdebug("descriptor longer then available data\n"); + return -EINVAL; + } + +#define __entity_check_INTERFACE(val) 1 +#define __entity_check_STRING(val) (val) +#define __entity_check_ENDPOINT(val) ((val) & USB_ENDPOINT_NUMBER_MASK) +#define __entity(type, val) do { \ + pr_vdebug("entity " #type "(%02x)\n", (val)); \ + if (unlikely(!__entity_check_ ##type(val))) { \ + pr_vdebug("invalid entity's value\n"); \ + return -EINVAL; \ + } \ + ret = entity(FFS_ ##type, &val, _ds, priv); \ + if (unlikely(ret < 0)) { \ + pr_debug("entity " #type "(%02x); ret = %d\n", \ + (val), ret); \ + return ret; \ + } \ + } while (0) + + /* Parse descriptor depending on type. */ + switch (_ds->bDescriptorType) { + case USB_DT_DEVICE: + case USB_DT_CONFIG: + case USB_DT_STRING: + case USB_DT_DEVICE_QUALIFIER: + /* function can't have any of those */ + pr_vdebug("descriptor reserved for gadget: %d\n", + _ds->bDescriptorType); + return -EINVAL; + + case USB_DT_INTERFACE: { + struct usb_interface_descriptor *ds = (void *)_ds; + pr_vdebug("interface descriptor\n"); + if (length != sizeof *ds) + goto inv_length; + + __entity(INTERFACE, ds->bInterfaceNumber); + if (ds->iInterface) + __entity(STRING, ds->iInterface); + } + break; + + case USB_DT_ENDPOINT: { + struct usb_endpoint_descriptor *ds = (void *)_ds; + pr_vdebug("endpoint descriptor\n"); + if (length != USB_DT_ENDPOINT_SIZE && + length != USB_DT_ENDPOINT_AUDIO_SIZE) + goto inv_length; + __entity(ENDPOINT, ds->bEndpointAddress); + } + break; + + case HID_DT_HID: + pr_vdebug("hid descriptor\n"); + if (length != sizeof(struct hid_descriptor)) + goto inv_length; + break; + + case USB_DT_OTG: + if (length != sizeof(struct usb_otg_descriptor)) + goto inv_length; + break; + + case USB_DT_INTERFACE_ASSOCIATION: { + struct usb_interface_assoc_descriptor *ds = (void *)_ds; + pr_vdebug("interface association descriptor\n"); + if (length != sizeof *ds) + goto inv_length; + if (ds->iFunction) + __entity(STRING, ds->iFunction); + } + break; + + case USB_DT_SS_ENDPOINT_COMP: + pr_vdebug("EP SS companion descriptor\n"); + if (length != sizeof(struct usb_ss_ep_comp_descriptor)) + goto inv_length; + break; + + case USB_DT_OTHER_SPEED_CONFIG: + case USB_DT_INTERFACE_POWER: + case USB_DT_DEBUG: + case USB_DT_SECURITY: + case USB_DT_CS_RADIO_CONTROL: + /* TODO */ + pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType); + return -EINVAL; + + default: + /* We should never be here */ + pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType); + return -EINVAL; + +inv_length: + pr_vdebug("invalid length: %d (descriptor %d)\n", + _ds->bLength, _ds->bDescriptorType); + return -EINVAL; + } + +#undef __entity +#undef __entity_check_DESCRIPTOR +#undef __entity_check_INTERFACE +#undef __entity_check_STRING +#undef __entity_check_ENDPOINT + + return length; +} + +static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len, + ffs_entity_callback entity, void *priv) +{ + const unsigned _len = len; + unsigned long num = 0; + + ENTER(); + + for (;;) { + int ret; + + if (num == count) + data = NULL; + + /* Record "descriptor" entity */ + ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv); + if (unlikely(ret < 0)) { + pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n", + num, ret); + return ret; + } + + if (!data) + return _len - len; + + ret = ffs_do_single_desc(data, len, entity, priv); + if (unlikely(ret < 0)) { + pr_debug("%s returns %d\n", __func__, ret); + return ret; + } + + len -= ret; + data += ret; + ++num; + } +} + +static int __ffs_data_do_entity(enum ffs_entity_type type, + u8 *valuep, struct usb_descriptor_header *desc, + void *priv) +{ + struct ffs_desc_helper *helper = priv; + struct usb_endpoint_descriptor *d; + + ENTER(); + + switch (type) { + case FFS_DESCRIPTOR: + break; + + case FFS_INTERFACE: + /* + * Interfaces are indexed from zero so if we + * encountered interface "n" then there are at least + * "n+1" interfaces. + */ + if (*valuep >= helper->interfaces_count) + helper->interfaces_count = *valuep + 1; + break; + + case FFS_STRING: + /* + * Strings are indexed from 1 (0 is magic ;) reserved + * for languages list or some such) + */ + if (*valuep > helper->ffs->strings_count) + helper->ffs->strings_count = *valuep; + break; + + case FFS_ENDPOINT: + d = (void *)desc; + helper->eps_count++; + if (helper->eps_count >= 15) + return -EINVAL; + /* Check if descriptors for any speed were already parsed */ + if (!helper->ffs->eps_count && !helper->ffs->interfaces_count) + helper->ffs->eps_addrmap[helper->eps_count] = + d->bEndpointAddress; + else if (helper->ffs->eps_addrmap[helper->eps_count] != + d->bEndpointAddress) + return -EINVAL; + break; + } + + return 0; +} + +static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type, + struct usb_os_desc_header *desc) +{ + u16 bcd_version = le16_to_cpu(desc->bcdVersion); + u16 w_index = le16_to_cpu(desc->wIndex); + + if (bcd_version != 1) { + pr_vdebug("unsupported os descriptors version: %d", + bcd_version); + return -EINVAL; + } + switch (w_index) { + case 0x4: + *next_type = FFS_OS_DESC_EXT_COMPAT; + break; + case 0x5: + *next_type = FFS_OS_DESC_EXT_PROP; + break; + default: + pr_vdebug("unsupported os descriptor type: %d", w_index); + return -EINVAL; + } + + return sizeof(*desc); +} + +/* + * Process all extended compatibility/extended property descriptors + * of a feature descriptor + */ +static int __must_check ffs_do_single_os_desc(char *data, unsigned len, + enum ffs_os_desc_type type, + u16 feature_count, + ffs_os_desc_callback entity, + void *priv, + struct usb_os_desc_header *h) +{ + int ret; + const unsigned _len = len; + + ENTER(); + + /* loop over all ext compat/ext prop descriptors */ + while (feature_count--) { + ret = entity(type, h, data, len, priv); + if (unlikely(ret < 0)) { + pr_debug("bad OS descriptor, type: %d\n", type); + return ret; + } + data += ret; + len -= ret; + } + return _len - len; +} + +/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */ +static int __must_check ffs_do_os_descs(unsigned count, + char *data, unsigned len, + ffs_os_desc_callback entity, void *priv) +{ + const unsigned _len = len; + unsigned long num = 0; + + ENTER(); + + for (num = 0; num < count; ++num) { + int ret; + enum ffs_os_desc_type type; + u16 feature_count; + struct usb_os_desc_header *desc = (void *)data; + + if (len < sizeof(*desc)) + return -EINVAL; + + /* + * Record "descriptor" entity. + * Process dwLength, bcdVersion, wIndex, get b/wCount. + * Move the data pointer to the beginning of extended + * compatibilities proper or extended properties proper + * portions of the data + */ + if (le32_to_cpu(desc->dwLength) > len) + return -EINVAL; + + ret = __ffs_do_os_desc_header(&type, desc); + if (unlikely(ret < 0)) { + pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n", + num, ret); + return ret; + } + /* + * 16-bit hex "?? 00" Little Endian looks like 8-bit hex "??" + */ + feature_count = le16_to_cpu(desc->wCount); + if (type == FFS_OS_DESC_EXT_COMPAT && + (feature_count > 255 || desc->Reserved)) + return -EINVAL; + len -= ret; + data += ret; + + /* + * Process all function/property descriptors + * of this Feature Descriptor + */ + ret = ffs_do_single_os_desc(data, len, type, + feature_count, entity, priv, desc); + if (unlikely(ret < 0)) { + pr_debug("%s returns %d\n", __func__, ret); + return ret; + } + + len -= ret; + data += ret; + } + return _len - len; +} + +/** + * Validate contents of the buffer from userspace related to OS descriptors. + */ +static int __ffs_data_do_os_desc(enum ffs_os_desc_type type, + struct usb_os_desc_header *h, void *data, + unsigned len, void *priv) +{ + struct ffs_data *ffs = priv; + u8 length; + + ENTER(); + + switch (type) { + case FFS_OS_DESC_EXT_COMPAT: { + struct usb_ext_compat_desc *d = data; + int i; + + if (len < sizeof(*d) || + d->bFirstInterfaceNumber >= ffs->interfaces_count || + d->Reserved1) + return -EINVAL; + for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i) + if (d->Reserved2[i]) + return -EINVAL; + + length = sizeof(struct usb_ext_compat_desc); + } + break; + case FFS_OS_DESC_EXT_PROP: { + struct usb_ext_prop_desc *d = data; + u32 type, pdl; + u16 pnl; + + if (len < sizeof(*d) || h->interface >= ffs->interfaces_count) + return -EINVAL; + length = le32_to_cpu(d->dwSize); + type = le32_to_cpu(d->dwPropertyDataType); + if (type < USB_EXT_PROP_UNICODE || + type > USB_EXT_PROP_UNICODE_MULTI) { + pr_vdebug("unsupported os descriptor property type: %d", + type); + return -EINVAL; + } + pnl = le16_to_cpu(d->wPropertyNameLength); + pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl)); + if (length != 14 + pnl + pdl) { + pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n", + length, pnl, pdl, type); + return -EINVAL; + } + ++ffs->ms_os_descs_ext_prop_count; + /* property name reported to the host as "WCHAR"s */ + ffs->ms_os_descs_ext_prop_name_len += pnl * 2; + ffs->ms_os_descs_ext_prop_data_len += pdl; + } + break; + default: + pr_vdebug("unknown descriptor: %d\n", type); + return -EINVAL; + } + return length; +} + +static int __ffs_data_got_descs(struct ffs_data *ffs, + char *const _data, size_t len) +{ + char *data = _data, *raw_descs; + unsigned os_descs_count = 0, counts[3], flags; + int ret = -EINVAL, i; + struct ffs_desc_helper helper; + + ENTER(); + + if (get_unaligned_le32(data + 4) != len) + goto error; + + switch (get_unaligned_le32(data)) { + case FUNCTIONFS_DESCRIPTORS_MAGIC: + flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC; + data += 8; + len -= 8; + break; + case FUNCTIONFS_DESCRIPTORS_MAGIC_V2: + flags = get_unaligned_le32(data + 8); + ffs->user_flags = flags; + if (flags & ~(FUNCTIONFS_HAS_FS_DESC | + FUNCTIONFS_HAS_HS_DESC | + FUNCTIONFS_HAS_SS_DESC | + FUNCTIONFS_HAS_MS_OS_DESC | + FUNCTIONFS_VIRTUAL_ADDR | + FUNCTIONFS_EVENTFD)) { + ret = -ENOSYS; + goto error; + } + data += 12; + len -= 12; + break; + default: + goto error; + } + + if (flags & FUNCTIONFS_EVENTFD) { + if (len < 4) + goto error; + ffs->ffs_eventfd = + eventfd_ctx_fdget((int)get_unaligned_le32(data)); + if (IS_ERR(ffs->ffs_eventfd)) { + ret = PTR_ERR(ffs->ffs_eventfd); + ffs->ffs_eventfd = NULL; + goto error; + } + data += 4; + len -= 4; + } + + /* Read fs_count, hs_count and ss_count (if present) */ + for (i = 0; i < 3; ++i) { + if (!(flags & (1 << i))) { + counts[i] = 0; + } else if (len < 4) { + goto error; + } else { + counts[i] = get_unaligned_le32(data); + data += 4; + len -= 4; + } + } + if (flags & (1 << i)) { + os_descs_count = get_unaligned_le32(data); + data += 4; + len -= 4; + }; + + /* Read descriptors */ + raw_descs = data; + helper.ffs = ffs; + for (i = 0; i < 3; ++i) { + if (!counts[i]) + continue; + helper.interfaces_count = 0; + helper.eps_count = 0; + ret = ffs_do_descs(counts[i], data, len, + __ffs_data_do_entity, &helper); + if (ret < 0) + goto error; + if (!ffs->eps_count && !ffs->interfaces_count) { + ffs->eps_count = helper.eps_count; + ffs->interfaces_count = helper.interfaces_count; + } else { + if (ffs->eps_count != helper.eps_count) { + ret = -EINVAL; + goto error; + } + if (ffs->interfaces_count != helper.interfaces_count) { + ret = -EINVAL; + goto error; + } + } + data += ret; + len -= ret; + } + if (os_descs_count) { + ret = ffs_do_os_descs(os_descs_count, data, len, + __ffs_data_do_os_desc, ffs); + if (ret < 0) + goto error; + data += ret; + len -= ret; + } + + if (raw_descs == data || len) { + ret = -EINVAL; + goto error; + } + + ffs->raw_descs_data = _data; + ffs->raw_descs = raw_descs; + ffs->raw_descs_length = data - raw_descs; + ffs->fs_descs_count = counts[0]; + ffs->hs_descs_count = counts[1]; + ffs->ss_descs_count = counts[2]; + ffs->ms_os_descs_count = os_descs_count; + + return 0; + +error: + kfree(_data); + return ret; +} + +static int __ffs_data_got_strings(struct ffs_data *ffs, + char *const _data, size_t len) +{ + u32 str_count, needed_count, lang_count; + struct usb_gadget_strings **stringtabs, *t; + struct usb_string *strings, *s; + const char *data = _data; + + ENTER(); + + if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC || + get_unaligned_le32(data + 4) != len)) + goto error; + str_count = get_unaligned_le32(data + 8); + lang_count = get_unaligned_le32(data + 12); + + /* if one is zero the other must be zero */ + if (unlikely(!str_count != !lang_count)) + goto error; + + /* Do we have at least as many strings as descriptors need? */ + needed_count = ffs->strings_count; + if (unlikely(str_count < needed_count)) + goto error; + + /* + * If we don't need any strings just return and free all + * memory. + */ + if (!needed_count) { + kfree(_data); + return 0; + } + + /* Allocate everything in one chunk so there's less maintenance. */ + { + unsigned i = 0; + vla_group(d); + vla_item(d, struct usb_gadget_strings *, stringtabs, + lang_count + 1); + vla_item(d, struct usb_gadget_strings, stringtab, lang_count); + vla_item(d, struct usb_string, strings, + lang_count*(needed_count+1)); + + char *vlabuf = kmalloc(vla_group_size(d), GFP_KERNEL); + + if (unlikely(!vlabuf)) { + kfree(_data); + return -ENOMEM; + } + + /* Initialize the VLA pointers */ + stringtabs = vla_ptr(vlabuf, d, stringtabs); + t = vla_ptr(vlabuf, d, stringtab); + i = lang_count; + do { + *stringtabs++ = t++; + } while (--i); + *stringtabs = NULL; + + /* stringtabs = vlabuf = d_stringtabs for later kfree */ + stringtabs = vla_ptr(vlabuf, d, stringtabs); + t = vla_ptr(vlabuf, d, stringtab); + s = vla_ptr(vlabuf, d, strings); + strings = s; + } + + /* For each language */ + data += 16; + len -= 16; + + do { /* lang_count > 0 so we can use do-while */ + unsigned needed = needed_count; + + if (unlikely(len < 3)) + goto error_free; + t->language = get_unaligned_le16(data); + t->strings = s; + ++t; + + data += 2; + len -= 2; + + /* For each string */ + do { /* str_count > 0 so we can use do-while */ + size_t length = strnlen(data, len); + + if (unlikely(length == len)) + goto error_free; + + /* + * User may provide more strings then we need, + * if that's the case we simply ignore the + * rest + */ + if (likely(needed)) { + /* + * s->id will be set while adding + * function to configuration so for + * now just leave garbage here. + */ + s->s = data; + --needed; + ++s; + } + + data += length + 1; + len -= length + 1; + } while (--str_count); + + s->id = 0; /* terminator */ + s->s = NULL; + ++s; + + } while (--lang_count); + + /* Some garbage left? */ + if (unlikely(len)) + goto error_free; + + /* Done! */ + ffs->stringtabs = stringtabs; + ffs->raw_strings = _data; + + return 0; + +error_free: + kfree(stringtabs); +error: + kfree(_data); + return -EINVAL; +} + + +/* Events handling and management *******************************************/ + +static void __ffs_event_add(struct ffs_data *ffs, + enum usb_functionfs_event_type type) +{ + enum usb_functionfs_event_type rem_type1, rem_type2 = type; + int neg = 0; + + /* + * Abort any unhandled setup + * + * We do not need to worry about some cmpxchg() changing value + * of ffs->setup_state without holding the lock because when + * state is FFS_SETUP_PENDING cmpxchg() in several places in + * the source does nothing. + */ + if (ffs->setup_state == FFS_SETUP_PENDING) + ffs->setup_state = FFS_SETUP_CANCELLED; + + /* + * Logic of this function guarantees that there are at most four pending + * evens on ffs->ev.types queue. This is important because the queue + * has space for four elements only and __ffs_ep0_read_events function + * depends on that limit as well. If more event types are added, those + * limits have to be revisited or guaranteed to still hold. + */ + switch (type) { + case FUNCTIONFS_RESUME: + rem_type2 = FUNCTIONFS_SUSPEND; + /* FALL THROUGH */ + case FUNCTIONFS_SUSPEND: + case FUNCTIONFS_SETUP: + rem_type1 = type; + /* Discard all similar events */ + break; + + case FUNCTIONFS_BIND: + case FUNCTIONFS_UNBIND: + case FUNCTIONFS_DISABLE: + case FUNCTIONFS_ENABLE: + /* Discard everything other then power management. */ + rem_type1 = FUNCTIONFS_SUSPEND; + rem_type2 = FUNCTIONFS_RESUME; + neg = 1; + break; + + default: + WARN(1, "%d: unknown event, this should not happen\n", type); + return; + } + + { + u8 *ev = ffs->ev.types, *out = ev; + unsigned n = ffs->ev.count; + for (; n; --n, ++ev) + if ((*ev == rem_type1 || *ev == rem_type2) == neg) + *out++ = *ev; + else + pr_vdebug("purging event %d\n", *ev); + ffs->ev.count = out - ffs->ev.types; + } + + pr_vdebug("adding event %d\n", type); + ffs->ev.types[ffs->ev.count++] = type; + wake_up_locked(&ffs->ev.waitq); + if (ffs->ffs_eventfd) + eventfd_signal(ffs->ffs_eventfd, 1); +} + +static void ffs_event_add(struct ffs_data *ffs, + enum usb_functionfs_event_type type) +{ + unsigned long flags; + spin_lock_irqsave(&ffs->ev.waitq.lock, flags); + __ffs_event_add(ffs, type); + spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); +} + +/* Bind/unbind USB function hooks *******************************************/ + +static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i) + if (ffs->eps_addrmap[i] == endpoint_address) + return i; + return -ENOENT; +} + +static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, + struct usb_descriptor_header *desc, + void *priv) +{ + struct usb_endpoint_descriptor *ds = (void *)desc; + struct ffs_function *func = priv; + struct ffs_ep *ffs_ep; + unsigned ep_desc_id; + int idx; + static const char *speed_names[] = { "full", "high", "super" }; + + if (type != FFS_DESCRIPTOR) + return 0; + + /* + * If ss_descriptors is not NULL, we are reading super speed + * descriptors; if hs_descriptors is not NULL, we are reading high + * speed descriptors; otherwise, we are reading full speed + * descriptors. + */ + if (func->function.ss_descriptors) { + ep_desc_id = 2; + func->function.ss_descriptors[(long)valuep] = desc; + } else if (func->function.hs_descriptors) { + ep_desc_id = 1; + func->function.hs_descriptors[(long)valuep] = desc; + } else { + ep_desc_id = 0; + func->function.fs_descriptors[(long)valuep] = desc; + } + + if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) + return 0; + + idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1; + if (idx < 0) + return idx; + + ffs_ep = func->eps + idx; + + if (unlikely(ffs_ep->descs[ep_desc_id])) { + pr_err("two %sspeed descriptors for EP %d\n", + speed_names[ep_desc_id], + ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); + return -EINVAL; + } + ffs_ep->descs[ep_desc_id] = ds; + + ffs_dump_mem(": Original ep desc", ds, ds->bLength); + if (ffs_ep->ep) { + ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress; + if (!ds->wMaxPacketSize) + ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize; + } else { + struct usb_request *req; + struct usb_ep *ep; + u8 bEndpointAddress; + + /* + * We back up bEndpointAddress because autoconfig overwrites + * it with physical endpoint address. + */ + bEndpointAddress = ds->bEndpointAddress; + pr_vdebug("autoconfig\n"); + ep = usb_ep_autoconfig(func->gadget, ds); + if (unlikely(!ep)) + return -ENOTSUPP; + ep->driver_data = func->eps + idx; + + req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (unlikely(!req)) + return -ENOMEM; + + ffs_ep->ep = ep; + ffs_ep->req = req; + func->eps_revmap[ds->bEndpointAddress & + USB_ENDPOINT_NUMBER_MASK] = idx + 1; + /* + * If we use virtual address mapping, we restore + * original bEndpointAddress value. + */ + if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) + ds->bEndpointAddress = bEndpointAddress; + } + ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength); + + return 0; +} + +static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep, + struct usb_descriptor_header *desc, + void *priv) +{ + struct ffs_function *func = priv; + unsigned idx; + u8 newValue; + + switch (type) { + default: + case FFS_DESCRIPTOR: + /* Handled in previous pass by __ffs_func_bind_do_descs() */ + return 0; + + case FFS_INTERFACE: + idx = *valuep; + if (func->interfaces_nums[idx] < 0) { + int id = usb_interface_id(func->conf, &func->function); + if (unlikely(id < 0)) + return id; + func->interfaces_nums[idx] = id; + } + newValue = func->interfaces_nums[idx]; + break; + + case FFS_STRING: + /* String' IDs are allocated when fsf_data is bound to cdev */ + newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id; + break; + + case FFS_ENDPOINT: + /* + * USB_DT_ENDPOINT are handled in + * __ffs_func_bind_do_descs(). + */ + if (desc->bDescriptorType == USB_DT_ENDPOINT) + return 0; + + idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1; + if (unlikely(!func->eps[idx].ep)) + return -EINVAL; + + { + struct usb_endpoint_descriptor **descs; + descs = func->eps[idx].descs; + newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress; + } + break; + } + + pr_vdebug("%02x -> %02x\n", *valuep, newValue); + *valuep = newValue; + return 0; +} + +static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type, + struct usb_os_desc_header *h, void *data, + unsigned len, void *priv) +{ + struct ffs_function *func = priv; + u8 length = 0; + + switch (type) { + case FFS_OS_DESC_EXT_COMPAT: { + struct usb_ext_compat_desc *desc = data; + struct usb_os_desc_table *t; + + t = &func->function.os_desc_table[desc->bFirstInterfaceNumber]; + t->if_id = func->interfaces_nums[desc->bFirstInterfaceNumber]; + memcpy(t->os_desc->ext_compat_id, &desc->CompatibleID, + ARRAY_SIZE(desc->CompatibleID) + + ARRAY_SIZE(desc->SubCompatibleID)); + length = sizeof(*desc); + } + break; + case FFS_OS_DESC_EXT_PROP: { + struct usb_ext_prop_desc *desc = data; + struct usb_os_desc_table *t; + struct usb_os_desc_ext_prop *ext_prop; + char *ext_prop_name; + char *ext_prop_data; + + t = &func->function.os_desc_table[h->interface]; + t->if_id = func->interfaces_nums[h->interface]; + + ext_prop = func->ffs->ms_os_descs_ext_prop_avail; + func->ffs->ms_os_descs_ext_prop_avail += sizeof(*ext_prop); + + ext_prop->type = le32_to_cpu(desc->dwPropertyDataType); + ext_prop->name_len = le16_to_cpu(desc->wPropertyNameLength); + ext_prop->data_len = le32_to_cpu(*(u32 *) + usb_ext_prop_data_len_ptr(data, ext_prop->name_len)); + length = ext_prop->name_len + ext_prop->data_len + 14; + + ext_prop_name = func->ffs->ms_os_descs_ext_prop_name_avail; + func->ffs->ms_os_descs_ext_prop_name_avail += + ext_prop->name_len; + + ext_prop_data = func->ffs->ms_os_descs_ext_prop_data_avail; + func->ffs->ms_os_descs_ext_prop_data_avail += + ext_prop->data_len; + memcpy(ext_prop_data, + usb_ext_prop_data_ptr(data, ext_prop->name_len), + ext_prop->data_len); + /* unicode data reported to the host as "WCHAR"s */ + switch (ext_prop->type) { + case USB_EXT_PROP_UNICODE: + case USB_EXT_PROP_UNICODE_ENV: + case USB_EXT_PROP_UNICODE_LINK: + case USB_EXT_PROP_UNICODE_MULTI: + ext_prop->data_len *= 2; + break; + } + ext_prop->data = ext_prop_data; + + memcpy(ext_prop_name, usb_ext_prop_name_ptr(data), + ext_prop->name_len); + /* property name reported to the host as "WCHAR"s */ + ext_prop->name_len *= 2; + ext_prop->name = ext_prop_name; + + t->os_desc->ext_prop_len += + ext_prop->name_len + ext_prop->data_len + 14; + ++t->os_desc->ext_prop_count; + list_add_tail(&ext_prop->entry, &t->os_desc->ext_prop); + } + break; + default: + pr_vdebug("unknown descriptor: %d\n", type); + } + + return length; +} + +static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f, + struct usb_configuration *c) +{ + struct ffs_function *func = ffs_func_from_usb(f); + struct f_fs_opts *ffs_opts = + container_of(f->fi, struct f_fs_opts, func_inst); + int ret; + + ENTER(); + + /* + * Legacy gadget triggers binding in functionfs_ready_callback, + * which already uses locking; taking the same lock here would + * cause a deadlock. + * + * Configfs-enabled gadgets however do need ffs_dev_lock. + */ + if (!ffs_opts->no_configfs) + ffs_dev_lock(); + ret = ffs_opts->dev->desc_ready ? 0 : -ENODEV; + func->ffs = ffs_opts->dev->ffs_data; + if (!ffs_opts->no_configfs) + ffs_dev_unlock(); + if (ret) + return ERR_PTR(ret); + + func->conf = c; + func->gadget = c->cdev->gadget; + + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to ffs_opts->bound access + */ + if (!ffs_opts->refcnt) { + ret = functionfs_bind(func->ffs, c->cdev); + if (ret) + return ERR_PTR(ret); + } + ffs_opts->refcnt++; + func->function.strings = func->ffs->stringtabs; + + return ffs_opts; +} + +static int _ffs_func_bind(struct usb_configuration *c, + struct usb_function *f) +{ + struct ffs_function *func = ffs_func_from_usb(f); + struct ffs_data *ffs = func->ffs; + + const int full = !!func->ffs->fs_descs_count; + const int high = gadget_is_dualspeed(func->gadget) && + func->ffs->hs_descs_count; + const int super = gadget_is_superspeed(func->gadget) && + func->ffs->ss_descs_count; + + int fs_len, hs_len, ss_len, ret, i; + + /* Make it a single chunk, less management later on */ + vla_group(d); + vla_item_with_sz(d, struct ffs_ep, eps, ffs->eps_count); + vla_item_with_sz(d, struct usb_descriptor_header *, fs_descs, + full ? ffs->fs_descs_count + 1 : 0); + vla_item_with_sz(d, struct usb_descriptor_header *, hs_descs, + high ? ffs->hs_descs_count + 1 : 0); + vla_item_with_sz(d, struct usb_descriptor_header *, ss_descs, + super ? ffs->ss_descs_count + 1 : 0); + vla_item_with_sz(d, short, inums, ffs->interfaces_count); + vla_item_with_sz(d, struct usb_os_desc_table, os_desc_table, + c->cdev->use_os_string ? ffs->interfaces_count : 0); + vla_item_with_sz(d, char[16], ext_compat, + c->cdev->use_os_string ? ffs->interfaces_count : 0); + vla_item_with_sz(d, struct usb_os_desc, os_desc, + c->cdev->use_os_string ? ffs->interfaces_count : 0); + vla_item_with_sz(d, struct usb_os_desc_ext_prop, ext_prop, + ffs->ms_os_descs_ext_prop_count); + vla_item_with_sz(d, char, ext_prop_name, + ffs->ms_os_descs_ext_prop_name_len); + vla_item_with_sz(d, char, ext_prop_data, + ffs->ms_os_descs_ext_prop_data_len); + vla_item_with_sz(d, char, raw_descs, ffs->raw_descs_length); + char *vlabuf; + + ENTER(); + + /* Has descriptors only for speeds gadget does not support */ + if (unlikely(!(full | high | super))) + return -ENOTSUPP; + + /* Allocate a single chunk, less management later on */ + vlabuf = kzalloc(vla_group_size(d), GFP_KERNEL); + if (unlikely(!vlabuf)) + return -ENOMEM; + + ffs->ms_os_descs_ext_prop_avail = vla_ptr(vlabuf, d, ext_prop); + ffs->ms_os_descs_ext_prop_name_avail = + vla_ptr(vlabuf, d, ext_prop_name); + ffs->ms_os_descs_ext_prop_data_avail = + vla_ptr(vlabuf, d, ext_prop_data); + + /* Copy descriptors */ + memcpy(vla_ptr(vlabuf, d, raw_descs), ffs->raw_descs, + ffs->raw_descs_length); + + memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); + for (ret = ffs->eps_count; ret; --ret) { + struct ffs_ep *ptr; + + ptr = vla_ptr(vlabuf, d, eps); + ptr[ret].num = -1; + } + + /* Save pointers + * d_eps == vlabuf, func->eps used to kfree vlabuf later + */ + func->eps = vla_ptr(vlabuf, d, eps); + func->interfaces_nums = vla_ptr(vlabuf, d, inums); + + /* + * Go through all the endpoint descriptors and allocate + * endpoints first, so that later we can rewrite the endpoint + * numbers without worrying that it may be described later on. + */ + if (likely(full)) { + func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs); + fs_len = ffs_do_descs(ffs->fs_descs_count, + vla_ptr(vlabuf, d, raw_descs), + d_raw_descs__sz, + __ffs_func_bind_do_descs, func); + if (unlikely(fs_len < 0)) { + ret = fs_len; + goto error; + } + } else { + fs_len = 0; + } + + if (likely(high)) { + func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs); + hs_len = ffs_do_descs(ffs->hs_descs_count, + vla_ptr(vlabuf, d, raw_descs) + fs_len, + d_raw_descs__sz - fs_len, + __ffs_func_bind_do_descs, func); + if (unlikely(hs_len < 0)) { + ret = hs_len; + goto error; + } + } else { + hs_len = 0; + } + + if (likely(super)) { + func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs); + ss_len = ffs_do_descs(ffs->ss_descs_count, + vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len, + d_raw_descs__sz - fs_len - hs_len, + __ffs_func_bind_do_descs, func); + if (unlikely(ss_len < 0)) { + ret = ss_len; + goto error; + } + } else { + ss_len = 0; + } + + /* + * Now handle interface numbers allocation and interface and + * endpoint numbers rewriting. We can do that in one go + * now. + */ + ret = ffs_do_descs(ffs->fs_descs_count + + (high ? ffs->hs_descs_count : 0) + + (super ? ffs->ss_descs_count : 0), + vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz, + __ffs_func_bind_do_nums, func); + if (unlikely(ret < 0)) + goto error; + + func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table); + if (c->cdev->use_os_string) + for (i = 0; i < ffs->interfaces_count; ++i) { + struct usb_os_desc *desc; + + desc = func->function.os_desc_table[i].os_desc = + vla_ptr(vlabuf, d, os_desc) + + i * sizeof(struct usb_os_desc); + desc->ext_compat_id = + vla_ptr(vlabuf, d, ext_compat) + i * 16; + INIT_LIST_HEAD(&desc->ext_prop); + } + ret = ffs_do_os_descs(ffs->ms_os_descs_count, + vla_ptr(vlabuf, d, raw_descs) + + fs_len + hs_len + ss_len, + d_raw_descs__sz - fs_len - hs_len - ss_len, + __ffs_func_bind_do_os_desc, func); + if (unlikely(ret < 0)) + goto error; + func->function.os_desc_n = + c->cdev->use_os_string ? ffs->interfaces_count : 0; + + /* And we're done */ + ffs_event_add(ffs, FUNCTIONFS_BIND); + return 0; + +error: + /* XXX Do we need to release all claimed endpoints here? */ + return ret; +} + +static int ffs_func_bind(struct usb_configuration *c, + struct usb_function *f) +{ + struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c); + + if (IS_ERR(ffs_opts)) + return PTR_ERR(ffs_opts); + + return _ffs_func_bind(c, f); +} + + +/* Other USB function hooks *************************************************/ + +static void ffs_reset_work(struct work_struct *work) +{ + struct ffs_data *ffs = container_of(work, + struct ffs_data, reset_work); + ffs_data_reset(ffs); +} + +static int ffs_func_set_alt(struct usb_function *f, + unsigned interface, unsigned alt) +{ + struct ffs_function *func = ffs_func_from_usb(f); + struct ffs_data *ffs = func->ffs; + int ret = 0, intf; + + if (alt != (unsigned)-1) { + intf = ffs_func_revmap_intf(func, interface); + if (unlikely(intf < 0)) + return intf; + } + + if (ffs->func) + ffs_func_eps_disable(ffs->func); + + if (ffs->state == FFS_DEACTIVATED) { + ffs->state = FFS_CLOSING; + INIT_WORK(&ffs->reset_work, ffs_reset_work); + schedule_work(&ffs->reset_work); + return -ENODEV; + } + + if (ffs->state != FFS_ACTIVE) + return -ENODEV; + + if (alt == (unsigned)-1) { + ffs->func = NULL; + ffs_event_add(ffs, FUNCTIONFS_DISABLE); + return 0; + } + + ffs->func = func; + ret = ffs_func_eps_enable(func); + if (likely(ret >= 0)) + ffs_event_add(ffs, FUNCTIONFS_ENABLE); + return ret; +} + +static void ffs_func_disable(struct usb_function *f) +{ + ffs_func_set_alt(f, 0, (unsigned)-1); +} + +static int ffs_func_setup(struct usb_function *f, + const struct usb_ctrlrequest *creq) +{ + struct ffs_function *func = ffs_func_from_usb(f); + struct ffs_data *ffs = func->ffs; + unsigned long flags; + int ret; + + ENTER(); + + pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType); + pr_vdebug("creq->bRequest = %02x\n", creq->bRequest); + pr_vdebug("creq->wValue = %04x\n", le16_to_cpu(creq->wValue)); + pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex)); + pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength)); + + /* + * Most requests directed to interface go through here + * (notable exceptions are set/get interface) so we need to + * handle them. All other either handled by composite or + * passed to usb_configuration->setup() (if one is set). No + * matter, we will handle requests directed to endpoint here + * as well (as it's straightforward) but what to do with any + * other request? + */ + if (ffs->state != FFS_ACTIVE) + return -ENODEV; + + switch (creq->bRequestType & USB_RECIP_MASK) { + case USB_RECIP_INTERFACE: + ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex)); + if (unlikely(ret < 0)) + return ret; + break; + + case USB_RECIP_ENDPOINT: + ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex)); + if (unlikely(ret < 0)) + return ret; + if (func->ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR) + ret = func->ffs->eps_addrmap[ret]; + break; + + default: + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&ffs->ev.waitq.lock, flags); + ffs->ev.setup = *creq; + ffs->ev.setup.wIndex = cpu_to_le16(ret); + __ffs_event_add(ffs, FUNCTIONFS_SETUP); + spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); + + return 0; +} + +static void ffs_func_suspend(struct usb_function *f) +{ + ENTER(); + ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND); +} + +static void ffs_func_resume(struct usb_function *f) +{ + ENTER(); + ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME); +} + + +/* Endpoint and interface numbers reverse mapping ***************************/ + +static int ffs_func_revmap_ep(struct ffs_function *func, u8 num) +{ + num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK]; + return num ? num : -EDOM; +} + +static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf) +{ + short *nums = func->interfaces_nums; + unsigned count = func->ffs->interfaces_count; + + for (; count; --count, ++nums) { + if (*nums >= 0 && *nums == intf) + return nums - func->interfaces_nums; + } + + return -EDOM; +} + + +/* Devices management *******************************************************/ + +static LIST_HEAD(ffs_devices); + +static struct ffs_dev *_ffs_do_find_dev(const char *name) +{ + struct ffs_dev *dev; + + list_for_each_entry(dev, &ffs_devices, entry) { + if (!dev->name || !name) + continue; + if (strcmp(dev->name, name) == 0) + return dev; + } + + return NULL; +} + +/* + * ffs_lock must be taken by the caller of this function + */ +static struct ffs_dev *_ffs_get_single_dev(void) +{ + struct ffs_dev *dev; + + if (list_is_singular(&ffs_devices)) { + dev = list_first_entry(&ffs_devices, struct ffs_dev, entry); + if (dev->single) + return dev; + } + + return NULL; +} + +/* + * ffs_lock must be taken by the caller of this function + */ +static struct ffs_dev *_ffs_find_dev(const char *name) +{ + struct ffs_dev *dev; + + dev = _ffs_get_single_dev(); + if (dev) + return dev; + + return _ffs_do_find_dev(name); +} + +/* Configfs support *********************************************************/ + +static inline struct f_fs_opts *to_ffs_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_fs_opts, + func_inst.group); +} + +static void ffs_attr_release(struct config_item *item) +{ + struct f_fs_opts *opts = to_ffs_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations ffs_item_ops = { + .release = ffs_attr_release, +}; + +static struct config_item_type ffs_func_type = { + .ct_item_ops = &ffs_item_ops, + .ct_owner = THIS_MODULE, +}; + + +/* Function registration interface ******************************************/ + +static void ffs_free_inst(struct usb_function_instance *f) +{ + struct f_fs_opts *opts; + + opts = to_f_fs_opts(f); + ffs_dev_lock(); + _ffs_free_dev(opts->dev); + ffs_dev_unlock(); + kfree(opts); +} + +#define MAX_INST_NAME_LEN 40 + +static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) +{ + struct f_fs_opts *opts; + char *ptr; + const char *tmp; + int name_len, ret; + + name_len = strlen(name) + 1; + if (name_len > MAX_INST_NAME_LEN) + return -ENAMETOOLONG; + + ptr = kstrndup(name, name_len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + opts = to_f_fs_opts(fi); + tmp = NULL; + + ffs_dev_lock(); + + tmp = opts->dev->name_allocated ? opts->dev->name : NULL; + ret = _ffs_name_dev(opts->dev, ptr); + if (ret) { + kfree(ptr); + ffs_dev_unlock(); + return ret; + } + opts->dev->name_allocated = true; + + ffs_dev_unlock(); + + kfree(tmp); + + return 0; +} + +static struct usb_function_instance *ffs_alloc_inst(void) +{ + struct f_fs_opts *opts; + struct ffs_dev *dev; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + opts->func_inst.set_inst_name = ffs_set_inst_name; + opts->func_inst.free_func_inst = ffs_free_inst; + ffs_dev_lock(); + dev = _ffs_alloc_dev(); + ffs_dev_unlock(); + if (IS_ERR(dev)) { + kfree(opts); + return ERR_CAST(dev); + } + opts->dev = dev; + dev->opts = opts; + + config_group_init_type_name(&opts->func_inst.group, "", + &ffs_func_type); + return &opts->func_inst; +} + +static void ffs_free(struct usb_function *f) +{ + kfree(ffs_func_from_usb(f)); +} + +static void ffs_func_unbind(struct usb_configuration *c, + struct usb_function *f) +{ + struct ffs_function *func = ffs_func_from_usb(f); + struct ffs_data *ffs = func->ffs; + struct f_fs_opts *opts = + container_of(f->fi, struct f_fs_opts, func_inst); + struct ffs_ep *ep = func->eps; + unsigned count = ffs->eps_count; + unsigned long flags; + + ENTER(); + if (ffs->func == func) { + ffs_func_eps_disable(func); + ffs->func = NULL; + } + + if (!--opts->refcnt) + functionfs_unbind(ffs); + + /* cleanup after autoconfig */ + spin_lock_irqsave(&func->ffs->eps_lock, flags); + do { + if (ep->ep && ep->req) + usb_ep_free_request(ep->ep, ep->req); + ep->req = NULL; + ++ep; + } while (--count); + spin_unlock_irqrestore(&func->ffs->eps_lock, flags); + kfree(func->eps); + func->eps = NULL; + /* + * eps, descriptors and interfaces_nums are allocated in the + * same chunk so only one free is required. + */ + func->function.fs_descriptors = NULL; + func->function.hs_descriptors = NULL; + func->function.ss_descriptors = NULL; + func->interfaces_nums = NULL; + + ffs_event_add(ffs, FUNCTIONFS_UNBIND); +} + +static struct usb_function *ffs_alloc(struct usb_function_instance *fi) +{ + struct ffs_function *func; + + ENTER(); + + func = kzalloc(sizeof(*func), GFP_KERNEL); + if (unlikely(!func)) + return ERR_PTR(-ENOMEM); + + func->function.name = "Function FS Gadget"; + + func->function.bind = ffs_func_bind; + func->function.unbind = ffs_func_unbind; + func->function.set_alt = ffs_func_set_alt; + func->function.disable = ffs_func_disable; + func->function.setup = ffs_func_setup; + func->function.suspend = ffs_func_suspend; + func->function.resume = ffs_func_resume; + func->function.free_func = ffs_free; + + return &func->function; +} + +/* + * ffs_lock must be taken by the caller of this function + */ +static struct ffs_dev *_ffs_alloc_dev(void) +{ + struct ffs_dev *dev; + int ret; + + if (_ffs_get_single_dev()) + return ERR_PTR(-EBUSY); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + if (list_empty(&ffs_devices)) { + ret = functionfs_init(); + if (ret) { + kfree(dev); + return ERR_PTR(ret); + } + } + + list_add(&dev->entry, &ffs_devices); + + return dev; +} + +/* + * ffs_lock must be taken by the caller of this function + * The caller is responsible for "name" being available whenever f_fs needs it + */ +static int _ffs_name_dev(struct ffs_dev *dev, const char *name) +{ + struct ffs_dev *existing; + + existing = _ffs_do_find_dev(name); + if (existing) + return -EBUSY; + + dev->name = name; + + return 0; +} + +/* + * The caller is responsible for "name" being available whenever f_fs needs it + */ +int ffs_name_dev(struct ffs_dev *dev, const char *name) +{ + int ret; + + ffs_dev_lock(); + ret = _ffs_name_dev(dev, name); + ffs_dev_unlock(); + + return ret; +} +EXPORT_SYMBOL_GPL(ffs_name_dev); + +int ffs_single_dev(struct ffs_dev *dev) +{ + int ret; + + ret = 0; + ffs_dev_lock(); + + if (!list_is_singular(&ffs_devices)) + ret = -EBUSY; + else + dev->single = true; + + ffs_dev_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(ffs_single_dev); + +/* + * ffs_lock must be taken by the caller of this function + */ +static void _ffs_free_dev(struct ffs_dev *dev) +{ + list_del(&dev->entry); + if (dev->name_allocated) + kfree(dev->name); + kfree(dev); + if (list_empty(&ffs_devices)) + functionfs_cleanup(); +} + +static void *ffs_acquire_dev(const char *dev_name) +{ + struct ffs_dev *ffs_dev; + + ENTER(); + ffs_dev_lock(); + + ffs_dev = _ffs_find_dev(dev_name); + if (!ffs_dev) + ffs_dev = ERR_PTR(-ENOENT); + else if (ffs_dev->mounted) + ffs_dev = ERR_PTR(-EBUSY); + else if (ffs_dev->ffs_acquire_dev_callback && + ffs_dev->ffs_acquire_dev_callback(ffs_dev)) + ffs_dev = ERR_PTR(-ENOENT); + else + ffs_dev->mounted = true; + + ffs_dev_unlock(); + return ffs_dev; +} + +static void ffs_release_dev(struct ffs_data *ffs_data) +{ + struct ffs_dev *ffs_dev; + + ENTER(); + ffs_dev_lock(); + + ffs_dev = ffs_data->private_data; + if (ffs_dev) { + ffs_dev->mounted = false; + + if (ffs_dev->ffs_release_dev_callback) + ffs_dev->ffs_release_dev_callback(ffs_dev); + } + + ffs_dev_unlock(); +} + +static int ffs_ready(struct ffs_data *ffs) +{ + struct ffs_dev *ffs_obj; + int ret = 0; + + ENTER(); + ffs_dev_lock(); + + ffs_obj = ffs->private_data; + if (!ffs_obj) { + ret = -EINVAL; + goto done; + } + if (WARN_ON(ffs_obj->desc_ready)) { + ret = -EBUSY; + goto done; + } + + ffs_obj->desc_ready = true; + ffs_obj->ffs_data = ffs; + + if (ffs_obj->ffs_ready_callback) { + ret = ffs_obj->ffs_ready_callback(ffs); + if (ret) + goto done; + } + + set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags); +done: + ffs_dev_unlock(); + return ret; +} + +static void ffs_closed(struct ffs_data *ffs) +{ + struct ffs_dev *ffs_obj; + struct f_fs_opts *opts; + + ENTER(); + ffs_dev_lock(); + + ffs_obj = ffs->private_data; + if (!ffs_obj) + goto done; + + ffs_obj->desc_ready = false; + + if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) && + ffs_obj->ffs_closed_callback) + ffs_obj->ffs_closed_callback(ffs); + + if (ffs_obj->opts) + opts = ffs_obj->opts; + else + goto done; + + if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent + || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) + goto done; + + unregister_gadget_item(ffs_obj->opts-> + func_inst.group.cg_item.ci_parent->ci_parent); +done: + ffs_dev_unlock(); +} + +/* Misc helper functions ****************************************************/ + +static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock) +{ + return nonblock + ? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN + : mutex_lock_interruptible(mutex); +} + +static char *ffs_prepare_buffer(const char __user *buf, size_t len) +{ + char *data; + + if (unlikely(!len)) + return NULL; + + data = kmalloc(len, GFP_KERNEL); + if (unlikely(!data)) + return ERR_PTR(-ENOMEM); + + if (unlikely(__copy_from_user(data, buf, len))) { + kfree(data); + return ERR_PTR(-EFAULT); + } + + pr_vdebug("Buffer from user space:\n"); + ffs_dump_mem("", data, len); + + return data; +} + +DECLARE_USB_FUNCTION_INIT(ffs, ffs_alloc_inst, ffs_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Nazarewicz"); diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c new file mode 100644 index 000000000..f7f35a36c --- /dev/null +++ b/drivers/usb/gadget/function/f_hid.c @@ -0,0 +1,1014 @@ +/* + * f_hid.c -- USB HID function driver + * + * Copyright (C) 2010 Fabien Chouteau <fabien.chouteau@barco.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/hid.h> +#include <linux/idr.h> +#include <linux/cdev.h> +#include <linux/mutex.h> +#include <linux/poll.h> +#include <linux/uaccess.h> +#include <linux/wait.h> +#include <linux/sched.h> +#include <linux/usb/g_hid.h> + +#include "u_f.h" +#include "u_hid.h" + +#define HIDG_MINORS 4 + +static int major, minors; +static struct class *hidg_class; +static DEFINE_IDA(hidg_ida); +static DEFINE_MUTEX(hidg_ida_lock); /* protects access to hidg_ida */ + +/*-------------------------------------------------------------------------*/ +/* HID gadget struct */ + +struct f_hidg_req_list { + struct usb_request *req; + unsigned int pos; + struct list_head list; +}; + +struct f_hidg { + /* configuration */ + unsigned char bInterfaceSubClass; + unsigned char bInterfaceProtocol; + unsigned short report_desc_length; + char *report_desc; + unsigned short report_length; + + /* recv report */ + struct list_head completed_out_req; + spinlock_t spinlock; + wait_queue_head_t read_queue; + unsigned int qlen; + + /* send report */ + struct mutex lock; + bool write_pending; + wait_queue_head_t write_queue; + struct usb_request *req; + + int minor; + struct cdev cdev; + struct usb_function func; + + struct usb_ep *in_ep; + struct usb_ep *out_ep; +}; + +static inline struct f_hidg *func_to_hidg(struct usb_function *f) +{ + return container_of(f, struct f_hidg, func); +} + +/*-------------------------------------------------------------------------*/ +/* Static descriptors */ + +static struct usb_interface_descriptor hidg_interface_desc = { + .bLength = sizeof hidg_interface_desc, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bAlternateSetting = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_HID, + /* .bInterfaceSubClass = DYNAMIC */ + /* .bInterfaceProtocol = DYNAMIC */ + /* .iInterface = DYNAMIC */ +}; + +static struct hid_descriptor hidg_desc = { + .bLength = sizeof hidg_desc, + .bDescriptorType = HID_DT_HID, + .bcdHID = 0x0101, + .bCountryCode = 0x00, + .bNumDescriptors = 0x1, + /*.desc[0].bDescriptorType = DYNAMIC */ + /*.desc[0].wDescriptorLenght = DYNAMIC */ +}; + +/* High-Speed Support */ + +static struct usb_endpoint_descriptor hidg_hs_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 4, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_endpoint_descriptor hidg_hs_out_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 4, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_descriptor_header *hidg_hs_descriptors[] = { + (struct usb_descriptor_header *)&hidg_interface_desc, + (struct usb_descriptor_header *)&hidg_desc, + (struct usb_descriptor_header *)&hidg_hs_in_ep_desc, + (struct usb_descriptor_header *)&hidg_hs_out_ep_desc, + NULL, +}; + +/* Full-Speed Support */ + +static struct usb_endpoint_descriptor hidg_fs_in_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 10, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_endpoint_descriptor hidg_fs_out_ep_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_INT, + /*.wMaxPacketSize = DYNAMIC */ + .bInterval = 10, /* FIXME: Add this field in the + * HID gadget configuration? + * (struct hidg_func_descriptor) + */ +}; + +static struct usb_descriptor_header *hidg_fs_descriptors[] = { + (struct usb_descriptor_header *)&hidg_interface_desc, + (struct usb_descriptor_header *)&hidg_desc, + (struct usb_descriptor_header *)&hidg_fs_in_ep_desc, + (struct usb_descriptor_header *)&hidg_fs_out_ep_desc, + NULL, +}; + +/*-------------------------------------------------------------------------*/ +/* Strings */ + +#define CT_FUNC_HID_IDX 0 + +static struct usb_string ct_func_string_defs[] = { + [CT_FUNC_HID_IDX].s = "HID Interface", + {}, /* end of list */ +}; + +static struct usb_gadget_strings ct_func_string_table = { + .language = 0x0409, /* en-US */ + .strings = ct_func_string_defs, +}; + +static struct usb_gadget_strings *ct_func_strings[] = { + &ct_func_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ +/* Char Device */ + +static ssize_t f_hidg_read(struct file *file, char __user *buffer, + size_t count, loff_t *ptr) +{ + struct f_hidg *hidg = file->private_data; + struct f_hidg_req_list *list; + struct usb_request *req; + unsigned long flags; + int ret; + + if (!count) + return 0; + + if (!access_ok(VERIFY_WRITE, buffer, count)) + return -EFAULT; + + spin_lock_irqsave(&hidg->spinlock, flags); + +#define READ_COND (!list_empty(&hidg->completed_out_req)) + + /* wait for at least one buffer to complete */ + while (!READ_COND) { + spin_unlock_irqrestore(&hidg->spinlock, flags); + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible(hidg->read_queue, READ_COND)) + return -ERESTARTSYS; + + spin_lock_irqsave(&hidg->spinlock, flags); + } + + /* pick the first one */ + list = list_first_entry(&hidg->completed_out_req, + struct f_hidg_req_list, list); + req = list->req; + count = min_t(unsigned int, count, req->actual - list->pos); + spin_unlock_irqrestore(&hidg->spinlock, flags); + + /* copy to user outside spinlock */ + count -= copy_to_user(buffer, req->buf + list->pos, count); + list->pos += count; + + /* + * if this request is completely handled and transfered to + * userspace, remove its entry from the list and requeue it + * again. Otherwise, we will revisit it again upon the next + * call, taking into account its current read position. + */ + if (list->pos == req->actual) { + spin_lock_irqsave(&hidg->spinlock, flags); + list_del(&list->list); + kfree(list); + spin_unlock_irqrestore(&hidg->spinlock, flags); + + req->length = hidg->report_length; + ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); + if (ret < 0) + return ret; + } + + return count; +} + +static void f_hidg_req_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_hidg *hidg = (struct f_hidg *)ep->driver_data; + + if (req->status != 0) { + ERROR(hidg->func.config->cdev, + "End Point Request ERROR: %d\n", req->status); + } + + hidg->write_pending = 0; + wake_up(&hidg->write_queue); +} + +static ssize_t f_hidg_write(struct file *file, const char __user *buffer, + size_t count, loff_t *offp) +{ + struct f_hidg *hidg = file->private_data; + ssize_t status = -ENOMEM; + + if (!access_ok(VERIFY_READ, buffer, count)) + return -EFAULT; + + mutex_lock(&hidg->lock); + +#define WRITE_COND (!hidg->write_pending) + + /* write queue */ + while (!WRITE_COND) { + mutex_unlock(&hidg->lock); + if (file->f_flags & O_NONBLOCK) + return -EAGAIN; + + if (wait_event_interruptible_exclusive( + hidg->write_queue, WRITE_COND)) + return -ERESTARTSYS; + + mutex_lock(&hidg->lock); + } + + count = min_t(unsigned, count, hidg->report_length); + status = copy_from_user(hidg->req->buf, buffer, count); + + if (status != 0) { + ERROR(hidg->func.config->cdev, + "copy_from_user error\n"); + mutex_unlock(&hidg->lock); + return -EINVAL; + } + + hidg->req->status = 0; + hidg->req->zero = 0; + hidg->req->length = count; + hidg->req->complete = f_hidg_req_complete; + hidg->req->context = hidg; + hidg->write_pending = 1; + + status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); + if (status < 0) { + ERROR(hidg->func.config->cdev, + "usb_ep_queue error on int endpoint %zd\n", status); + hidg->write_pending = 0; + wake_up(&hidg->write_queue); + } else { + status = count; + } + + mutex_unlock(&hidg->lock); + + return status; +} + +static unsigned int f_hidg_poll(struct file *file, poll_table *wait) +{ + struct f_hidg *hidg = file->private_data; + unsigned int ret = 0; + + poll_wait(file, &hidg->read_queue, wait); + poll_wait(file, &hidg->write_queue, wait); + + if (WRITE_COND) + ret |= POLLOUT | POLLWRNORM; + + if (READ_COND) + ret |= POLLIN | POLLRDNORM; + + return ret; +} + +#undef WRITE_COND +#undef READ_COND + +static int f_hidg_release(struct inode *inode, struct file *fd) +{ + fd->private_data = NULL; + return 0; +} + +static int f_hidg_open(struct inode *inode, struct file *fd) +{ + struct f_hidg *hidg = + container_of(inode->i_cdev, struct f_hidg, cdev); + + fd->private_data = hidg; + + return 0; +} + +/*-------------------------------------------------------------------------*/ +/* usb_function */ + +static inline struct usb_request *hidg_alloc_ep_req(struct usb_ep *ep, + unsigned length) +{ + return alloc_ep_req(ep, length, length); +} + +static void hidg_set_report_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_hidg *hidg = (struct f_hidg *) req->context; + struct f_hidg_req_list *req_list; + unsigned long flags; + + req_list = kzalloc(sizeof(*req_list), GFP_ATOMIC); + if (!req_list) + return; + + req_list->req = req; + + spin_lock_irqsave(&hidg->spinlock, flags); + list_add_tail(&req_list->list, &hidg->completed_out_req); + spin_unlock_irqrestore(&hidg->spinlock, flags); + + wake_up(&hidg->read_queue); +} + +static int hidg_setup(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct f_hidg *hidg = func_to_hidg(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int status = 0; + __u16 value, length; + + value = __le16_to_cpu(ctrl->wValue); + length = __le16_to_cpu(ctrl->wLength); + + VDBG(cdev, + "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n", + __func__, ctrl->bRequestType, ctrl->bRequest, value); + + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 + | HID_REQ_GET_REPORT): + VDBG(cdev, "get_report\n"); + + /* send an empty report */ + length = min_t(unsigned, length, hidg->report_length); + memset(req->buf, 0x0, length); + + goto respond; + break; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 + | HID_REQ_GET_PROTOCOL): + VDBG(cdev, "get_protocol\n"); + goto stall; + break; + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 + | HID_REQ_SET_REPORT): + VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength); + goto stall; + break; + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 + | HID_REQ_SET_PROTOCOL): + VDBG(cdev, "set_protocol\n"); + goto stall; + break; + + case ((USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_INTERFACE) << 8 + | USB_REQ_GET_DESCRIPTOR): + switch (value >> 8) { + case HID_DT_HID: + { + struct hid_descriptor hidg_desc_copy = hidg_desc; + + VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n"); + hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT; + hidg_desc_copy.desc[0].wDescriptorLength = + cpu_to_le16(hidg->report_desc_length); + + length = min_t(unsigned short, length, + hidg_desc_copy.bLength); + memcpy(req->buf, &hidg_desc_copy, length); + goto respond; + break; + } + case HID_DT_REPORT: + VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n"); + length = min_t(unsigned short, length, + hidg->report_desc_length); + memcpy(req->buf, hidg->report_desc, length); + goto respond; + break; + + default: + VDBG(cdev, "Unknown descriptor request 0x%x\n", + value >> 8); + goto stall; + break; + } + break; + + default: + VDBG(cdev, "Unknown request 0x%x\n", + ctrl->bRequest); + goto stall; + break; + } + +stall: + return -EOPNOTSUPP; + +respond: + req->zero = 0; + req->length = length; + status = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (status < 0) + ERROR(cdev, "usb_ep_queue error on ep0 %d\n", value); + return status; +} + +static void hidg_disable(struct usb_function *f) +{ + struct f_hidg *hidg = func_to_hidg(f); + struct f_hidg_req_list *list, *next; + + usb_ep_disable(hidg->in_ep); + hidg->in_ep->driver_data = NULL; + + usb_ep_disable(hidg->out_ep); + hidg->out_ep->driver_data = NULL; + + list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { + list_del(&list->list); + kfree(list); + } +} + +static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct usb_composite_dev *cdev = f->config->cdev; + struct f_hidg *hidg = func_to_hidg(f); + int i, status = 0; + + VDBG(cdev, "hidg_set_alt intf:%d alt:%d\n", intf, alt); + + if (hidg->in_ep != NULL) { + /* restart endpoint */ + if (hidg->in_ep->driver_data != NULL) + usb_ep_disable(hidg->in_ep); + + status = config_ep_by_speed(f->config->cdev->gadget, f, + hidg->in_ep); + if (status) { + ERROR(cdev, "config_ep_by_speed FAILED!\n"); + goto fail; + } + status = usb_ep_enable(hidg->in_ep); + if (status < 0) { + ERROR(cdev, "Enable IN endpoint FAILED!\n"); + goto fail; + } + hidg->in_ep->driver_data = hidg; + } + + + if (hidg->out_ep != NULL) { + /* restart endpoint */ + if (hidg->out_ep->driver_data != NULL) + usb_ep_disable(hidg->out_ep); + + status = config_ep_by_speed(f->config->cdev->gadget, f, + hidg->out_ep); + if (status) { + ERROR(cdev, "config_ep_by_speed FAILED!\n"); + goto fail; + } + status = usb_ep_enable(hidg->out_ep); + if (status < 0) { + ERROR(cdev, "Enable IN endpoint FAILED!\n"); + goto fail; + } + hidg->out_ep->driver_data = hidg; + + /* + * allocate a bunch of read buffers and queue them all at once. + */ + for (i = 0; i < hidg->qlen && status == 0; i++) { + struct usb_request *req = + hidg_alloc_ep_req(hidg->out_ep, + hidg->report_length); + if (req) { + req->complete = hidg_set_report_complete; + req->context = hidg; + status = usb_ep_queue(hidg->out_ep, req, + GFP_ATOMIC); + if (status) + ERROR(cdev, "%s queue req --> %d\n", + hidg->out_ep->name, status); + } else { + usb_ep_disable(hidg->out_ep); + hidg->out_ep->driver_data = NULL; + status = -ENOMEM; + goto fail; + } + } + } + +fail: + return status; +} + +static const struct file_operations f_hidg_fops = { + .owner = THIS_MODULE, + .open = f_hidg_open, + .release = f_hidg_release, + .write = f_hidg_write, + .read = f_hidg_read, + .poll = f_hidg_poll, + .llseek = noop_llseek, +}; + +static int hidg_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_ep *ep; + struct f_hidg *hidg = func_to_hidg(f); + struct usb_string *us; + struct device *device; + int status; + dev_t dev; + + /* maybe allocate device-global string IDs, and patch descriptors */ + us = usb_gstrings_attach(c->cdev, ct_func_strings, + ARRAY_SIZE(ct_func_string_defs)); + if (IS_ERR(us)) + return PTR_ERR(us); + hidg_interface_desc.iInterface = us[CT_FUNC_HID_IDX].id; + + /* allocate instance-specific interface IDs, and patch descriptors */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + hidg_interface_desc.bInterfaceNumber = status; + + /* allocate instance-specific endpoints */ + status = -ENODEV; + ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_in_ep_desc); + if (!ep) + goto fail; + ep->driver_data = c->cdev; /* claim */ + hidg->in_ep = ep; + + ep = usb_ep_autoconfig(c->cdev->gadget, &hidg_fs_out_ep_desc); + if (!ep) + goto fail; + ep->driver_data = c->cdev; /* claim */ + hidg->out_ep = ep; + + /* preallocate request and buffer */ + status = -ENOMEM; + hidg->req = usb_ep_alloc_request(hidg->in_ep, GFP_KERNEL); + if (!hidg->req) + goto fail; + + hidg->req->buf = kmalloc(hidg->report_length, GFP_KERNEL); + if (!hidg->req->buf) + goto fail; + + /* set descriptor dynamic values */ + hidg_interface_desc.bInterfaceSubClass = hidg->bInterfaceSubClass; + hidg_interface_desc.bInterfaceProtocol = hidg->bInterfaceProtocol; + hidg_hs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length); + /* + * We can use hidg_desc struct here but we should not relay + * that its content won't change after returning from this function. + */ + hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT; + hidg_desc.desc[0].wDescriptorLength = + cpu_to_le16(hidg->report_desc_length); + + hidg_hs_in_ep_desc.bEndpointAddress = + hidg_fs_in_ep_desc.bEndpointAddress; + hidg_hs_out_ep_desc.bEndpointAddress = + hidg_fs_out_ep_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, hidg_fs_descriptors, + hidg_hs_descriptors, NULL); + if (status) + goto fail; + + mutex_init(&hidg->lock); + spin_lock_init(&hidg->spinlock); + init_waitqueue_head(&hidg->write_queue); + init_waitqueue_head(&hidg->read_queue); + INIT_LIST_HEAD(&hidg->completed_out_req); + + /* create char device */ + cdev_init(&hidg->cdev, &f_hidg_fops); + dev = MKDEV(major, hidg->minor); + status = cdev_add(&hidg->cdev, dev, 1); + if (status) + goto fail_free_descs; + + device = device_create(hidg_class, NULL, dev, NULL, + "%s%d", "hidg", hidg->minor); + if (IS_ERR(device)) { + status = PTR_ERR(device); + goto del; + } + + return 0; +del: + cdev_del(&hidg->cdev); +fail_free_descs: + usb_free_all_descriptors(f); +fail: + ERROR(f->config->cdev, "hidg_bind FAILED\n"); + if (hidg->req != NULL) { + kfree(hidg->req->buf); + if (hidg->in_ep != NULL) + usb_ep_free_request(hidg->in_ep, hidg->req); + } + + return status; +} + +static inline int hidg_get_minor(void) +{ + int ret; + + ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); + + return ret; +} + +static inline struct f_hid_opts *to_f_hid_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_hid_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_hid_opts); +CONFIGFS_ATTR_OPS(f_hid_opts); + +static void hid_attr_release(struct config_item *item) +{ + struct f_hid_opts *opts = to_f_hid_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations hidg_item_ops = { + .release = hid_attr_release, + .show_attribute = f_hid_opts_attr_show, + .store_attribute = f_hid_opts_attr_store, +}; + +#define F_HID_OPT(name, prec, limit) \ +static ssize_t f_hid_opts_##name##_show(struct f_hid_opts *opts, char *page)\ +{ \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", opts->name); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t f_hid_opts_##name##_store(struct f_hid_opts *opts, \ + const char *page, size_t len) \ +{ \ + int ret; \ + u##prec num; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou##prec(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (num > limit) { \ + ret = -EINVAL; \ + goto end; \ + } \ + opts->name = num; \ + ret = len; \ + \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +static struct f_hid_opts_attribute f_hid_opts_##name = \ + __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, f_hid_opts_##name##_show,\ + f_hid_opts_##name##_store) + +F_HID_OPT(subclass, 8, 255); +F_HID_OPT(protocol, 8, 255); +F_HID_OPT(report_length, 16, 65535); + +static ssize_t f_hid_opts_report_desc_show(struct f_hid_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = opts->report_desc_length; + memcpy(page, opts->report_desc, opts->report_desc_length); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_hid_opts_report_desc_store(struct f_hid_opts *opts, + const char *page, size_t len) +{ + int ret = -EBUSY; + char *d; + + mutex_lock(&opts->lock); + + if (opts->refcnt) + goto end; + if (len > PAGE_SIZE) { + ret = -ENOSPC; + goto end; + } + d = kmemdup(page, len, GFP_KERNEL); + if (!d) { + ret = -ENOMEM; + goto end; + } + kfree(opts->report_desc); + opts->report_desc = d; + opts->report_desc_length = len; + opts->report_desc_alloc = true; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_hid_opts_attribute f_hid_opts_report_desc = + __CONFIGFS_ATTR(report_desc, S_IRUGO | S_IWUSR, + f_hid_opts_report_desc_show, + f_hid_opts_report_desc_store); + +static struct configfs_attribute *hid_attrs[] = { + &f_hid_opts_subclass.attr, + &f_hid_opts_protocol.attr, + &f_hid_opts_report_length.attr, + &f_hid_opts_report_desc.attr, + NULL, +}; + +static struct config_item_type hid_func_type = { + .ct_item_ops = &hidg_item_ops, + .ct_attrs = hid_attrs, + .ct_owner = THIS_MODULE, +}; + +static inline void hidg_put_minor(int minor) +{ + ida_simple_remove(&hidg_ida, minor); +} + +static void hidg_free_inst(struct usb_function_instance *f) +{ + struct f_hid_opts *opts; + + opts = container_of(f, struct f_hid_opts, func_inst); + + mutex_lock(&hidg_ida_lock); + + hidg_put_minor(opts->minor); + if (idr_is_empty(&hidg_ida.idr)) + ghid_cleanup(); + + mutex_unlock(&hidg_ida_lock); + + if (opts->report_desc_alloc) + kfree(opts->report_desc); + + kfree(opts); +} + +static struct usb_function_instance *hidg_alloc_inst(void) +{ + struct f_hid_opts *opts; + struct usb_function_instance *ret; + int status = 0; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = hidg_free_inst; + ret = &opts->func_inst; + + mutex_lock(&hidg_ida_lock); + + if (idr_is_empty(&hidg_ida.idr)) { + status = ghid_setup(NULL, HIDG_MINORS); + if (status) { + ret = ERR_PTR(status); + kfree(opts); + goto unlock; + } + } + + opts->minor = hidg_get_minor(); + if (opts->minor < 0) { + ret = ERR_PTR(opts->minor); + kfree(opts); + if (idr_is_empty(&hidg_ida.idr)) + ghid_cleanup(); + goto unlock; + } + config_group_init_type_name(&opts->func_inst.group, "", &hid_func_type); + +unlock: + mutex_unlock(&hidg_ida_lock); + return ret; +} + +static void hidg_free(struct usb_function *f) +{ + struct f_hidg *hidg; + struct f_hid_opts *opts; + + hidg = func_to_hidg(f); + opts = container_of(f->fi, struct f_hid_opts, func_inst); + kfree(hidg->report_desc); + kfree(hidg); + mutex_lock(&opts->lock); + --opts->refcnt; + mutex_unlock(&opts->lock); +} + +static void hidg_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_hidg *hidg = func_to_hidg(f); + + device_destroy(hidg_class, MKDEV(major, hidg->minor)); + cdev_del(&hidg->cdev); + + /* disable/free request and end point */ + usb_ep_disable(hidg->in_ep); + kfree(hidg->req->buf); + usb_ep_free_request(hidg->in_ep, hidg->req); + + usb_free_all_descriptors(f); +} + +static struct usb_function *hidg_alloc(struct usb_function_instance *fi) +{ + struct f_hidg *hidg; + struct f_hid_opts *opts; + + /* allocate and initialize one new instance */ + hidg = kzalloc(sizeof(*hidg), GFP_KERNEL); + if (!hidg) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_hid_opts, func_inst); + + mutex_lock(&opts->lock); + ++opts->refcnt; + + hidg->minor = opts->minor; + hidg->bInterfaceSubClass = opts->subclass; + hidg->bInterfaceProtocol = opts->protocol; + hidg->report_length = opts->report_length; + hidg->report_desc_length = opts->report_desc_length; + if (opts->report_desc) { + hidg->report_desc = kmemdup(opts->report_desc, + opts->report_desc_length, + GFP_KERNEL); + if (!hidg->report_desc) { + kfree(hidg); + mutex_unlock(&opts->lock); + return ERR_PTR(-ENOMEM); + } + } + + mutex_unlock(&opts->lock); + + hidg->func.name = "hid"; + hidg->func.bind = hidg_bind; + hidg->func.unbind = hidg_unbind; + hidg->func.set_alt = hidg_set_alt; + hidg->func.disable = hidg_disable; + hidg->func.setup = hidg_setup; + hidg->func.free_func = hidg_free; + + /* this could me made configurable at some point */ + hidg->qlen = 4; + + return &hidg->func; +} + +DECLARE_USB_FUNCTION_INIT(hid, hidg_alloc_inst, hidg_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Fabien Chouteau"); + +int ghid_setup(struct usb_gadget *g, int count) +{ + int status; + dev_t dev; + + hidg_class = class_create(THIS_MODULE, "hidg"); + if (IS_ERR(hidg_class)) { + status = PTR_ERR(hidg_class); + hidg_class = NULL; + return status; + } + + status = alloc_chrdev_region(&dev, 0, count, "hidg"); + if (status) { + class_destroy(hidg_class); + hidg_class = NULL; + return status; + } + + major = MAJOR(dev); + minors = count; + + return 0; +} + +void ghid_cleanup(void) +{ + if (major) { + unregister_chrdev_region(MKDEV(major, 0), minors); + major = minors = 0; + } + + class_destroy(hidg_class); + hidg_class = NULL; +} diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c new file mode 100644 index 000000000..39f49f1ad --- /dev/null +++ b/drivers/usb/gadget/function/f_loopback.c @@ -0,0 +1,568 @@ +/* + * f_loopback.c - USB peripheral loopback configuration driver + * + * Copyright (C) 2003-2008 David Brownell + * Copyright (C) 2008 by Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/usb/composite.h> + +#include "g_zero.h" +#include "u_f.h" + +/* + * LOOPBACK FUNCTION ... a testing vehicle for USB peripherals, + * + * This takes messages of various sizes written OUT to a device, and loops + * them back so they can be read IN from it. It has been used by certain + * test applications. It supports limited testing of data queueing logic. + * + * + * This is currently packaged as a configuration driver, which can't be + * combined with other functions to make composite devices. However, it + * can be combined with other independent configurations. + */ +struct f_loopback { + struct usb_function function; + + struct usb_ep *in_ep; + struct usb_ep *out_ep; +}; + +static inline struct f_loopback *func_to_loop(struct usb_function *f) +{ + return container_of(f, struct f_loopback, function); +} + +static unsigned qlen; +static unsigned buflen; + +/*-------------------------------------------------------------------------*/ + +static struct usb_interface_descriptor loopback_intf = { + .bLength = sizeof loopback_intf, + .bDescriptorType = USB_DT_INTERFACE, + + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor fs_loop_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor fs_loop_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_loopback_descs[] = { + (struct usb_descriptor_header *) &loopback_intf, + (struct usb_descriptor_header *) &fs_loop_sink_desc, + (struct usb_descriptor_header *) &fs_loop_source_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor hs_loop_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor hs_loop_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *hs_loopback_descs[] = { + (struct usb_descriptor_header *) &loopback_intf, + (struct usb_descriptor_header *) &hs_loop_source_desc, + (struct usb_descriptor_header *) &hs_loop_sink_desc, + NULL, +}; + +/* super speed support: */ + +static struct usb_endpoint_descriptor ss_loop_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_loop_source_comp_desc = { + .bLength = USB_DT_SS_EP_COMP_SIZE, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = 0, +}; + +static struct usb_endpoint_descriptor ss_loop_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_loop_sink_comp_desc = { + .bLength = USB_DT_SS_EP_COMP_SIZE, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = 0, +}; + +static struct usb_descriptor_header *ss_loopback_descs[] = { + (struct usb_descriptor_header *) &loopback_intf, + (struct usb_descriptor_header *) &ss_loop_source_desc, + (struct usb_descriptor_header *) &ss_loop_source_comp_desc, + (struct usb_descriptor_header *) &ss_loop_sink_desc, + (struct usb_descriptor_header *) &ss_loop_sink_comp_desc, + NULL, +}; + +/* function-specific strings: */ + +static struct usb_string strings_loopback[] = { + [0].s = "loop input to output", + { } /* end of list */ +}; + +static struct usb_gadget_strings stringtab_loop = { + .language = 0x0409, /* en-us */ + .strings = strings_loopback, +}; + +static struct usb_gadget_strings *loopback_strings[] = { + &stringtab_loop, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static int loopback_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_loopback *loop = func_to_loop(f); + int id; + int ret; + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + loopback_intf.bInterfaceNumber = id; + + id = usb_string_id(cdev); + if (id < 0) + return id; + strings_loopback[0].id = id; + loopback_intf.iInterface = id; + + /* allocate endpoints */ + + loop->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_loop_source_desc); + if (!loop->in_ep) { +autoconf_fail: + ERROR(cdev, "%s: can't autoconfigure on %s\n", + f->name, cdev->gadget->name); + return -ENODEV; + } + loop->in_ep->driver_data = cdev; /* claim */ + + loop->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_loop_sink_desc); + if (!loop->out_ep) + goto autoconf_fail; + loop->out_ep->driver_data = cdev; /* claim */ + + /* support high speed hardware */ + hs_loop_source_desc.bEndpointAddress = + fs_loop_source_desc.bEndpointAddress; + hs_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress; + + /* support super speed hardware */ + ss_loop_source_desc.bEndpointAddress = + fs_loop_source_desc.bEndpointAddress; + ss_loop_sink_desc.bEndpointAddress = fs_loop_sink_desc.bEndpointAddress; + + ret = usb_assign_descriptors(f, fs_loopback_descs, hs_loopback_descs, + ss_loopback_descs); + if (ret) + return ret; + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", + (gadget_is_superspeed(c->cdev->gadget) ? "super" : + (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), + f->name, loop->in_ep->name, loop->out_ep->name); + return 0; +} + +static void lb_free_func(struct usb_function *f) +{ + struct f_lb_opts *opts; + + opts = container_of(f->fi, struct f_lb_opts, func_inst); + + mutex_lock(&opts->lock); + opts->refcnt--; + mutex_unlock(&opts->lock); + + usb_free_all_descriptors(f); + kfree(func_to_loop(f)); +} + +static void loopback_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_loopback *loop = ep->driver_data; + struct usb_composite_dev *cdev = loop->function.config->cdev; + int status = req->status; + + switch (status) { + + case 0: /* normal completion? */ + if (ep == loop->out_ep) { + req->zero = (req->actual < req->length); + req->length = req->actual; + } + + /* queue the buffer for some later OUT packet */ + req->length = buflen; + status = usb_ep_queue(ep, req, GFP_ATOMIC); + if (status == 0) + return; + + /* "should never get here" */ + /* FALLTHROUGH */ + + default: + ERROR(cdev, "%s loop complete --> %d, %d/%d\n", ep->name, + status, req->actual, req->length); + /* FALLTHROUGH */ + + /* NOTE: since this driver doesn't maintain an explicit record + * of requests it submitted (just maintains qlen count), we + * rely on the hardware driver to clean up on disconnect or + * endpoint disable. + */ + case -ECONNABORTED: /* hardware forced ep reset */ + case -ECONNRESET: /* request dequeued */ + case -ESHUTDOWN: /* disconnect from host */ + free_ep_req(ep, req); + return; + } +} + +static void disable_loopback(struct f_loopback *loop) +{ + struct usb_composite_dev *cdev; + + cdev = loop->function.config->cdev; + disable_endpoints(cdev, loop->in_ep, loop->out_ep, NULL, NULL); + VDBG(cdev, "%s disabled\n", loop->function.name); +} + +static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len) +{ + return alloc_ep_req(ep, len, buflen); +} + +static int enable_endpoint(struct usb_composite_dev *cdev, struct f_loopback *loop, + struct usb_ep *ep) +{ + struct usb_request *req; + unsigned i; + int result; + + /* + * one endpoint writes data back IN to the host while another endpoint + * just reads OUT packets + */ + result = config_ep_by_speed(cdev->gadget, &(loop->function), ep); + if (result) + goto fail0; + result = usb_ep_enable(ep); + if (result < 0) + goto fail0; + ep->driver_data = loop; + + /* + * allocate a bunch of read buffers and queue them all at once. + * we buffer at most 'qlen' transfers; fewer if any need more + * than 'buflen' bytes each. + */ + for (i = 0; i < qlen && result == 0; i++) { + req = lb_alloc_ep_req(ep, 0); + if (!req) + goto fail1; + + req->complete = loopback_complete; + result = usb_ep_queue(ep, req, GFP_ATOMIC); + if (result) { + ERROR(cdev, "%s queue req --> %d\n", + ep->name, result); + goto fail1; + } + } + + return 0; + +fail1: + usb_ep_disable(ep); + +fail0: + return result; +} + +static int +enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop) +{ + int result = 0; + + result = enable_endpoint(cdev, loop, loop->in_ep); + if (result) + return result; + + result = enable_endpoint(cdev, loop, loop->out_ep); + if (result) + return result; + + DBG(cdev, "%s enabled\n", loop->function.name); + return result; +} + +static int loopback_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct f_loopback *loop = func_to_loop(f); + struct usb_composite_dev *cdev = f->config->cdev; + + /* we know alt is zero */ + if (loop->in_ep->driver_data) + disable_loopback(loop); + return enable_loopback(cdev, loop); +} + +static void loopback_disable(struct usb_function *f) +{ + struct f_loopback *loop = func_to_loop(f); + + disable_loopback(loop); +} + +static struct usb_function *loopback_alloc(struct usb_function_instance *fi) +{ + struct f_loopback *loop; + struct f_lb_opts *lb_opts; + + loop = kzalloc(sizeof *loop, GFP_KERNEL); + if (!loop) + return ERR_PTR(-ENOMEM); + + lb_opts = container_of(fi, struct f_lb_opts, func_inst); + + mutex_lock(&lb_opts->lock); + lb_opts->refcnt++; + mutex_unlock(&lb_opts->lock); + + buflen = lb_opts->bulk_buflen; + qlen = lb_opts->qlen; + if (!qlen) + qlen = 32; + + loop->function.name = "loopback"; + loop->function.bind = loopback_bind; + loop->function.set_alt = loopback_set_alt; + loop->function.disable = loopback_disable; + loop->function.strings = loopback_strings; + + loop->function.free_func = lb_free_func; + + return &loop->function; +} + +static inline struct f_lb_opts *to_f_lb_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_lb_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_lb_opts); +CONFIGFS_ATTR_OPS(f_lb_opts); + +static void lb_attr_release(struct config_item *item) +{ + struct f_lb_opts *lb_opts = to_f_lb_opts(item); + + usb_put_function_instance(&lb_opts->func_inst); +} + +static struct configfs_item_operations lb_item_ops = { + .release = lb_attr_release, + .show_attribute = f_lb_opts_attr_show, + .store_attribute = f_lb_opts_attr_store, +}; + +static ssize_t f_lb_opts_qlen_show(struct f_lb_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%d", opts->qlen); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_lb_opts_qlen_store(struct f_lb_opts *opts, + const char *page, size_t len) +{ + int ret; + u32 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou32(page, 0, &num); + if (ret) + goto end; + + opts->qlen = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_lb_opts_attribute f_lb_opts_qlen = + __CONFIGFS_ATTR(qlen, S_IRUGO | S_IWUSR, + f_lb_opts_qlen_show, + f_lb_opts_qlen_store); + +static ssize_t f_lb_opts_bulk_buflen_show(struct f_lb_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%d", opts->bulk_buflen); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_lb_opts_bulk_buflen_store(struct f_lb_opts *opts, + const char *page, size_t len) +{ + int ret; + u32 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou32(page, 0, &num); + if (ret) + goto end; + + opts->bulk_buflen = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_lb_opts_attribute f_lb_opts_bulk_buflen = + __CONFIGFS_ATTR(buflen, S_IRUGO | S_IWUSR, + f_lb_opts_bulk_buflen_show, + f_lb_opts_bulk_buflen_store); + +static struct configfs_attribute *lb_attrs[] = { + &f_lb_opts_qlen.attr, + &f_lb_opts_bulk_buflen.attr, + NULL, +}; + +static struct config_item_type lb_func_type = { + .ct_item_ops = &lb_item_ops, + .ct_attrs = lb_attrs, + .ct_owner = THIS_MODULE, +}; + +static void lb_free_instance(struct usb_function_instance *fi) +{ + struct f_lb_opts *lb_opts; + + lb_opts = container_of(fi, struct f_lb_opts, func_inst); + kfree(lb_opts); +} + +static struct usb_function_instance *loopback_alloc_instance(void) +{ + struct f_lb_opts *lb_opts; + + lb_opts = kzalloc(sizeof(*lb_opts), GFP_KERNEL); + if (!lb_opts) + return ERR_PTR(-ENOMEM); + mutex_init(&lb_opts->lock); + lb_opts->func_inst.free_func_inst = lb_free_instance; + lb_opts->bulk_buflen = GZERO_BULK_BUFLEN; + lb_opts->qlen = GZERO_QLEN; + + config_group_init_type_name(&lb_opts->func_inst.group, "", + &lb_func_type); + + return &lb_opts->func_inst; +} +DECLARE_USB_FUNCTION(Loopback, loopback_alloc_instance, loopback_alloc); + +int __init lb_modinit(void) +{ + int ret; + + ret = usb_function_register(&Loopbackusb_func); + if (ret) + return ret; + return ret; +} +void __exit lb_modexit(void) +{ + usb_function_unregister(&Loopbackusb_func); +} + +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c new file mode 100644 index 000000000..15c307155 --- /dev/null +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -0,0 +1,3636 @@ +/* + * f_mass_storage.c -- Mass Storage USB Composite Function + * + * Copyright (C) 2003-2008 Alan Stern + * Copyright (C) 2009 Samsung Electronics + * Author: Michal Nazarewicz <mina86@mina86.com> + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * The Mass Storage Function acts as a USB Mass Storage device, + * appearing to the host as a disk drive or as a CD-ROM drive. In + * addition to providing an example of a genuinely useful composite + * function for a USB device, it also illustrates a technique of + * double-buffering for increased throughput. + * + * For more information about MSF and in particular its module + * parameters and sysfs interface read the + * <Documentation/usb/mass-storage.txt> file. + */ + +/* + * MSF is configured by specifying a fsg_config structure. It has the + * following fields: + * + * nluns Number of LUNs function have (anywhere from 1 + * to FSG_MAX_LUNS which is 8). + * luns An array of LUN configuration values. This + * should be filled for each LUN that + * function will include (ie. for "nluns" + * LUNs). Each element of the array has + * the following fields: + * ->filename The path to the backing file for the LUN. + * Required if LUN is not marked as + * removable. + * ->ro Flag specifying access to the LUN shall be + * read-only. This is implied if CD-ROM + * emulation is enabled as well as when + * it was impossible to open "filename" + * in R/W mode. + * ->removable Flag specifying that LUN shall be indicated as + * being removable. + * ->cdrom Flag specifying that LUN shall be reported as + * being a CD-ROM. + * ->nofua Flag specifying that FUA flag in SCSI WRITE(10,12) + * commands for this LUN shall be ignored. + * + * vendor_name + * product_name + * release Information used as a reply to INQUIRY + * request. To use default set to NULL, + * NULL, 0xffff respectively. The first + * field should be 8 and the second 16 + * characters or less. + * + * can_stall Set to permit function to halt bulk endpoints. + * Disabled on some USB devices known not + * to work correctly. You should set it + * to true. + * + * If "removable" is not set for a LUN then a backing file must be + * specified. If it is set, then NULL filename means the LUN's medium + * is not loaded (an empty string as "filename" in the fsg_config + * structure causes error). The CD-ROM emulation includes a single + * data track and no audio tracks; hence there need be only one + * backing file per LUN. + * + * This function is heavily based on "File-backed Storage Gadget" by + * Alan Stern which in turn is heavily based on "Gadget Zero" by David + * Brownell. The driver's SCSI command interface was based on the + * "Information technology - Small Computer System Interface - 2" + * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93, + * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. + * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which + * was based on the "Universal Serial Bus Mass Storage Class UFI + * Command Specification" document, Revision 1.0, December 14, 1998, + * available at + * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>. + */ + +/* + * Driver Design + * + * The MSF is fairly straightforward. There is a main kernel + * thread that handles most of the work. Interrupt routines field + * callbacks from the controller driver: bulk- and interrupt-request + * completion notifications, endpoint-0 events, and disconnect events. + * Completion events are passed to the main thread by wakeup calls. Many + * ep0 requests are handled at interrupt time, but SetInterface, + * SetConfiguration, and device reset requests are forwarded to the + * thread in the form of "exceptions" using SIGUSR1 signals (since they + * should interrupt any ongoing file I/O operations). + * + * The thread's main routine implements the standard command/data/status + * parts of a SCSI interaction. It and its subroutines are full of tests + * for pending signals/exceptions -- all this polling is necessary since + * the kernel has no setjmp/longjmp equivalents. (Maybe this is an + * indication that the driver really wants to be running in userspace.) + * An important point is that so long as the thread is alive it keeps an + * open reference to the backing file. This will prevent unmounting + * the backing file's underlying filesystem and could cause problems + * during system shutdown, for example. To prevent such problems, the + * thread catches INT, TERM, and KILL signals and converts them into + * an EXIT exception. + * + * In normal operation the main thread is started during the gadget's + * fsg_bind() callback and stopped during fsg_unbind(). But it can + * also exit when it receives a signal, and there's no point leaving + * the gadget running when the thread is dead. As of this moment, MSF + * provides no way to deregister the gadget when thread dies -- maybe + * a callback functions is needed. + * + * To provide maximum throughput, the driver uses a circular pipeline of + * buffer heads (struct fsg_buffhd). In principle the pipeline can be + * arbitrarily long; in practice the benefits don't justify having more + * than 2 stages (i.e., double buffering). But it helps to think of the + * pipeline as being a long one. Each buffer head contains a bulk-in and + * a bulk-out request pointer (since the buffer can be used for both + * output and input -- directions always are given from the host's + * point of view) as well as a pointer to the buffer and various state + * variables. + * + * Use of the pipeline follows a simple protocol. There is a variable + * (fsg->next_buffhd_to_fill) that points to the next buffer head to use. + * At any time that buffer head may still be in use from an earlier + * request, so each buffer head has a state variable indicating whether + * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the + * buffer head to be EMPTY, filling the buffer either by file I/O or by + * USB I/O (during which the buffer head is BUSY), and marking the buffer + * head FULL when the I/O is complete. Then the buffer will be emptied + * (again possibly by USB I/O, during which it is marked BUSY) and + * finally marked EMPTY again (possibly by a completion routine). + * + * A module parameter tells the driver to avoid stalling the bulk + * endpoints wherever the transport specification allows. This is + * necessary for some UDCs like the SuperH, which cannot reliably clear a + * halt on a bulk endpoint. However, under certain circumstances the + * Bulk-only specification requires a stall. In such cases the driver + * will halt the endpoint and set a flag indicating that it should clear + * the halt in software during the next device reset. Hopefully this + * will permit everything to work correctly. Furthermore, although the + * specification allows the bulk-out endpoint to halt when the host sends + * too much data, implementing this would cause an unavoidable race. + * The driver will always use the "no-stall" approach for OUT transfers. + * + * One subtle point concerns sending status-stage responses for ep0 + * requests. Some of these requests, such as device reset, can involve + * interrupting an ongoing file I/O operation, which might take an + * arbitrarily long time. During that delay the host might give up on + * the original ep0 request and issue a new one. When that happens the + * driver should not notify the host about completion of the original + * request, as the host will no longer be waiting for it. So the driver + * assigns to each ep0 request a unique tag, and it keeps track of the + * tag value of the request associated with a long-running exception + * (device-reset, interface-change, or configuration-change). When the + * exception handler is finished, the status-stage response is submitted + * only if the current ep0 request tag is equal to the exception request + * tag. Thus only the most recently received ep0 request will get a + * status-stage response. + * + * Warning: This driver source file is too long. It ought to be split up + * into a header file plus about 3 separate .c files, to handle the details + * of the Gadget, USB Mass Storage, and SCSI protocols. + */ + + +/* #define VERBOSE_DEBUG */ +/* #define DUMP_MSGS */ + +#include <linux/blkdev.h> +#include <linux/completion.h> +#include <linux/dcache.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/fcntl.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/kref.h> +#include <linux/kthread.h> +#include <linux/limits.h> +#include <linux/rwsem.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/freezer.h> +#include <linux/module.h> + +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/composite.h> + +#include "gadget_chips.h" +#include "configfs.h" + + +/*------------------------------------------------------------------------*/ + +#define FSG_DRIVER_DESC "Mass Storage Function" +#define FSG_DRIVER_VERSION "2009/09/11" + +static const char fsg_string_interface[] = "Mass Storage"; + +#include "storage_common.h" +#include "f_mass_storage.h" + +/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */ +static struct usb_string fsg_strings[] = { + {FSG_STRING_INTERFACE, fsg_string_interface}, + {} +}; + +static struct usb_gadget_strings fsg_stringtab = { + .language = 0x0409, /* en-us */ + .strings = fsg_strings, +}; + +static struct usb_gadget_strings *fsg_strings_array[] = { + &fsg_stringtab, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +struct fsg_dev; +struct fsg_common; + +/* Data shared by all the FSG instances. */ +struct fsg_common { + struct usb_gadget *gadget; + struct usb_composite_dev *cdev; + struct fsg_dev *fsg, *new_fsg; + wait_queue_head_t fsg_wait; + + /* filesem protects: backing files in use */ + struct rw_semaphore filesem; + + /* lock protects: state, all the req_busy's */ + spinlock_t lock; + + struct usb_ep *ep0; /* Copy of gadget->ep0 */ + struct usb_request *ep0req; /* Copy of cdev->req */ + unsigned int ep0_req_tag; + + struct fsg_buffhd *next_buffhd_to_fill; + struct fsg_buffhd *next_buffhd_to_drain; + struct fsg_buffhd *buffhds; + unsigned int fsg_num_buffers; + + int cmnd_size; + u8 cmnd[MAX_COMMAND_SIZE]; + + unsigned int nluns; + unsigned int lun; + struct fsg_lun **luns; + struct fsg_lun *curlun; + + unsigned int bulk_out_maxpacket; + enum fsg_state state; /* For exception handling */ + unsigned int exception_req_tag; + + enum data_direction data_dir; + u32 data_size; + u32 data_size_from_cmnd; + u32 tag; + u32 residue; + u32 usb_amount_left; + + unsigned int can_stall:1; + unsigned int free_storage_on_release:1; + unsigned int phase_error:1; + unsigned int short_packet_received:1; + unsigned int bad_lun_okay:1; + unsigned int running:1; + unsigned int sysfs:1; + + int thread_wakeup_needed; + struct completion thread_notifier; + struct task_struct *thread_task; + + /* Callback functions. */ + const struct fsg_operations *ops; + /* Gadget's private data. */ + void *private_data; + + /* + * Vendor (8 chars), product (16 chars), release (4 + * hexadecimal digits) and NUL byte + */ + char inquiry_string[8 + 16 + 4 + 1]; + + struct kref ref; +}; + +struct fsg_dev { + struct usb_function function; + struct usb_gadget *gadget; /* Copy of cdev->gadget */ + struct fsg_common *common; + + u16 interface_number; + + unsigned int bulk_in_enabled:1; + unsigned int bulk_out_enabled:1; + + unsigned long atomic_bitflags; +#define IGNORE_BULK_OUT 0 + + struct usb_ep *bulk_in; + struct usb_ep *bulk_out; +}; + +static inline int __fsg_is_set(struct fsg_common *common, + const char *func, unsigned line) +{ + if (common->fsg) + return 1; + ERROR(common, "common->fsg is NULL in %s at %u\n", func, line); + WARN_ON(1); + return 0; +} + +#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__)) + +static inline struct fsg_dev *fsg_from_func(struct usb_function *f) +{ + return container_of(f, struct fsg_dev, function); +} + +typedef void (*fsg_routine_t)(struct fsg_dev *); + +static int exception_in_progress(struct fsg_common *common) +{ + return common->state > FSG_STATE_IDLE; +} + +/* Make bulk-out requests be divisible by the maxpacket size */ +static void set_bulk_out_req_length(struct fsg_common *common, + struct fsg_buffhd *bh, unsigned int length) +{ + unsigned int rem; + + bh->bulk_out_intended_length = length; + rem = length % common->bulk_out_maxpacket; + if (rem > 0) + length += common->bulk_out_maxpacket - rem; + bh->outreq->length = length; +} + + +/*-------------------------------------------------------------------------*/ + +static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) +{ + const char *name; + + if (ep == fsg->bulk_in) + name = "bulk-in"; + else if (ep == fsg->bulk_out) + name = "bulk-out"; + else + name = ep->name; + DBG(fsg, "%s set halt\n", name); + return usb_ep_set_halt(ep); +} + + +/*-------------------------------------------------------------------------*/ + +/* These routines may be called in process context or in_irq */ + +/* Caller must hold fsg->lock */ +static void wakeup_thread(struct fsg_common *common) +{ + smp_wmb(); /* ensure the write of bh->state is complete */ + /* Tell the main thread that something has happened */ + common->thread_wakeup_needed = 1; + if (common->thread_task) + wake_up_process(common->thread_task); +} + +static void raise_exception(struct fsg_common *common, enum fsg_state new_state) +{ + unsigned long flags; + + /* + * Do nothing if a higher-priority exception is already in progress. + * If a lower-or-equal priority exception is in progress, preempt it + * and notify the main thread by sending it a signal. + */ + spin_lock_irqsave(&common->lock, flags); + if (common->state <= new_state) { + common->exception_req_tag = common->ep0_req_tag; + common->state = new_state; + if (common->thread_task) + send_sig_info(SIGUSR1, SEND_SIG_FORCED, + common->thread_task); + } + spin_unlock_irqrestore(&common->lock, flags); +} + + +/*-------------------------------------------------------------------------*/ + +static int ep0_queue(struct fsg_common *common) +{ + int rc; + + rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC); + common->ep0->driver_data = common; + if (rc != 0 && rc != -ESHUTDOWN) { + /* We can't do much more than wait for a reset */ + WARNING(common, "error in submission: %s --> %d\n", + common->ep0->name, rc); + } + return rc; +} + + +/*-------------------------------------------------------------------------*/ + +/* Completion handlers. These always run in_irq. */ + +static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct fsg_common *common = ep->driver_data; + struct fsg_buffhd *bh = req->context; + + if (req->status || req->actual != req->length) + DBG(common, "%s --> %d, %u/%u\n", __func__, + req->status, req->actual, req->length); + if (req->status == -ECONNRESET) /* Request was cancelled */ + usb_ep_fifo_flush(ep); + + /* Hold the lock while we update the request and buffer states */ + smp_wmb(); + spin_lock(&common->lock); + bh->inreq_busy = 0; + bh->state = BUF_STATE_EMPTY; + wakeup_thread(common); + spin_unlock(&common->lock); +} + +static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct fsg_common *common = ep->driver_data; + struct fsg_buffhd *bh = req->context; + + dump_msg(common, "bulk-out", req->buf, req->actual); + if (req->status || req->actual != bh->bulk_out_intended_length) + DBG(common, "%s --> %d, %u/%u\n", __func__, + req->status, req->actual, bh->bulk_out_intended_length); + if (req->status == -ECONNRESET) /* Request was cancelled */ + usb_ep_fifo_flush(ep); + + /* Hold the lock while we update the request and buffer states */ + smp_wmb(); + spin_lock(&common->lock); + bh->outreq_busy = 0; + bh->state = BUF_STATE_FULL; + wakeup_thread(common); + spin_unlock(&common->lock); +} + +static int fsg_setup(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct fsg_dev *fsg = fsg_from_func(f); + struct usb_request *req = fsg->common->ep0req; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + if (!fsg_is_set(fsg->common)) + return -EOPNOTSUPP; + + ++fsg->common->ep0_req_tag; /* Record arrival of a new request */ + req->context = NULL; + req->length = 0; + dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl)); + + switch (ctrl->bRequest) { + + case US_BULK_RESET_REQUEST: + if (ctrl->bRequestType != + (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) + break; + if (w_index != fsg->interface_number || w_value != 0 || + w_length != 0) + return -EDOM; + + /* + * Raise an exception to stop the current operation + * and reinitialize our state. + */ + DBG(fsg, "bulk reset request\n"); + raise_exception(fsg->common, FSG_STATE_RESET); + return USB_GADGET_DELAYED_STATUS; + + case US_BULK_GET_MAX_LUN: + if (ctrl->bRequestType != + (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE)) + break; + if (w_index != fsg->interface_number || w_value != 0 || + w_length != 1) + return -EDOM; + VDBG(fsg, "get max LUN\n"); + *(u8 *)req->buf = fsg->common->nluns - 1; + + /* Respond with data/status */ + req->length = min((u16)1, w_length); + return ep0_queue(fsg->common); + } + + VDBG(fsg, + "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n", + ctrl->bRequestType, ctrl->bRequest, + le16_to_cpu(ctrl->wValue), w_index, w_length); + return -EOPNOTSUPP; +} + + +/*-------------------------------------------------------------------------*/ + +/* All the following routines run in process context */ + +/* Use this for bulk or interrupt transfers, not ep0 */ +static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, + struct usb_request *req, int *pbusy, + enum fsg_buffer_state *state) +{ + int rc; + + if (ep == fsg->bulk_in) + dump_msg(fsg, "bulk-in", req->buf, req->length); + + spin_lock_irq(&fsg->common->lock); + *pbusy = 1; + *state = BUF_STATE_BUSY; + spin_unlock_irq(&fsg->common->lock); + + rc = usb_ep_queue(ep, req, GFP_KERNEL); + if (rc == 0) + return; /* All good, we're done */ + + *pbusy = 0; + *state = BUF_STATE_EMPTY; + + /* We can't do much more than wait for a reset */ + + /* + * Note: currently the net2280 driver fails zero-length + * submissions if DMA is enabled. + */ + if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP && req->length == 0)) + WARNING(fsg, "error in submission: %s --> %d\n", ep->name, rc); +} + +static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh) +{ + if (!fsg_is_set(common)) + return false; + start_transfer(common->fsg, common->fsg->bulk_in, + bh->inreq, &bh->inreq_busy, &bh->state); + return true; +} + +static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) +{ + if (!fsg_is_set(common)) + return false; + start_transfer(common->fsg, common->fsg->bulk_out, + bh->outreq, &bh->outreq_busy, &bh->state); + return true; +} + +static int sleep_thread(struct fsg_common *common, bool can_freeze) +{ + int rc = 0; + + /* Wait until a signal arrives or we are woken up */ + for (;;) { + if (can_freeze) + try_to_freeze(); + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) { + rc = -EINTR; + break; + } + if (common->thread_wakeup_needed) + break; + schedule(); + } + __set_current_state(TASK_RUNNING); + common->thread_wakeup_needed = 0; + smp_rmb(); /* ensure the latest bh->state is visible */ + return rc; +} + + +/*-------------------------------------------------------------------------*/ + +static int do_read(struct fsg_common *common) +{ + struct fsg_lun *curlun = common->curlun; + u32 lba; + struct fsg_buffhd *bh; + int rc; + u32 amount_left; + loff_t file_offset, file_offset_tmp; + unsigned int amount; + ssize_t nread; + + /* + * Get the starting Logical Block Address and check that it's + * not too big. + */ + if (common->cmnd[0] == READ_6) + lba = get_unaligned_be24(&common->cmnd[1]); + else { + lba = get_unaligned_be32(&common->cmnd[2]); + + /* + * We allow DPO (Disable Page Out = don't save data in the + * cache) and FUA (Force Unit Access = don't read from the + * cache), but we don't implement them. + */ + if ((common->cmnd[1] & ~0x18) != 0) { + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + } + if (lba >= curlun->num_sectors) { + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; + return -EINVAL; + } + file_offset = ((loff_t) lba) << curlun->blkbits; + + /* Carry out the file reads */ + amount_left = common->data_size_from_cmnd; + if (unlikely(amount_left == 0)) + return -EIO; /* No default reply */ + + for (;;) { + /* + * Figure out how much we need to read: + * Try to read the remaining amount. + * But don't read more than the buffer size. + * And don't try to read past the end of the file. + */ + amount = min(amount_left, FSG_BUFLEN); + amount = min((loff_t)amount, + curlun->file_length - file_offset); + + /* Wait for the next buffer to become available */ + bh = common->next_buffhd_to_fill; + while (bh->state != BUF_STATE_EMPTY) { + rc = sleep_thread(common, false); + if (rc) + return rc; + } + + /* + * If we were asked to read past the end of file, + * end with an empty buffer. + */ + if (amount == 0) { + curlun->sense_data = + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; + curlun->sense_data_info = + file_offset >> curlun->blkbits; + curlun->info_valid = 1; + bh->inreq->length = 0; + bh->state = BUF_STATE_FULL; + break; + } + + /* Perform the read */ + file_offset_tmp = file_offset; + nread = vfs_read(curlun->filp, + (char __user *)bh->buf, + amount, &file_offset_tmp); + VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, + (unsigned long long)file_offset, (int)nread); + if (signal_pending(current)) + return -EINTR; + + if (nread < 0) { + LDBG(curlun, "error in file read: %d\n", (int)nread); + nread = 0; + } else if (nread < amount) { + LDBG(curlun, "partial file read: %d/%u\n", + (int)nread, amount); + nread = round_down(nread, curlun->blksize); + } + file_offset += nread; + amount_left -= nread; + common->residue -= nread; + + /* + * Except at the end of the transfer, nread will be + * equal to the buffer size, which is divisible by the + * bulk-in maxpacket size. + */ + bh->inreq->length = nread; + bh->state = BUF_STATE_FULL; + + /* If an error occurred, report it and its position */ + if (nread < amount) { + curlun->sense_data = SS_UNRECOVERED_READ_ERROR; + curlun->sense_data_info = + file_offset >> curlun->blkbits; + curlun->info_valid = 1; + break; + } + + if (amount_left == 0) + break; /* No more left to read */ + + /* Send this buffer and go read some more */ + bh->inreq->zero = 0; + if (!start_in_transfer(common, bh)) + /* Don't know what to do if common->fsg is NULL */ + return -EIO; + common->next_buffhd_to_fill = bh->next; + } + + return -EIO; /* No default reply */ +} + + +/*-------------------------------------------------------------------------*/ + +static int do_write(struct fsg_common *common) +{ + struct fsg_lun *curlun = common->curlun; + u32 lba; + struct fsg_buffhd *bh; + int get_some_more; + u32 amount_left_to_req, amount_left_to_write; + loff_t usb_offset, file_offset, file_offset_tmp; + unsigned int amount; + ssize_t nwritten; + int rc; + + if (curlun->ro) { + curlun->sense_data = SS_WRITE_PROTECTED; + return -EINVAL; + } + spin_lock(&curlun->filp->f_lock); + curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */ + spin_unlock(&curlun->filp->f_lock); + + /* + * Get the starting Logical Block Address and check that it's + * not too big + */ + if (common->cmnd[0] == WRITE_6) + lba = get_unaligned_be24(&common->cmnd[1]); + else { + lba = get_unaligned_be32(&common->cmnd[2]); + + /* + * We allow DPO (Disable Page Out = don't save data in the + * cache) and FUA (Force Unit Access = write directly to the + * medium). We don't implement DPO; we implement FUA by + * performing synchronous output. + */ + if (common->cmnd[1] & ~0x18) { + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */ + spin_lock(&curlun->filp->f_lock); + curlun->filp->f_flags |= O_SYNC; + spin_unlock(&curlun->filp->f_lock); + } + } + if (lba >= curlun->num_sectors) { + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; + return -EINVAL; + } + + /* Carry out the file writes */ + get_some_more = 1; + file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits; + amount_left_to_req = common->data_size_from_cmnd; + amount_left_to_write = common->data_size_from_cmnd; + + while (amount_left_to_write > 0) { + + /* Queue a request for more data from the host */ + bh = common->next_buffhd_to_fill; + if (bh->state == BUF_STATE_EMPTY && get_some_more) { + + /* + * Figure out how much we want to get: + * Try to get the remaining amount, + * but not more than the buffer size. + */ + amount = min(amount_left_to_req, FSG_BUFLEN); + + /* Beyond the end of the backing file? */ + if (usb_offset >= curlun->file_length) { + get_some_more = 0; + curlun->sense_data = + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; + curlun->sense_data_info = + usb_offset >> curlun->blkbits; + curlun->info_valid = 1; + continue; + } + + /* Get the next buffer */ + usb_offset += amount; + common->usb_amount_left -= amount; + amount_left_to_req -= amount; + if (amount_left_to_req == 0) + get_some_more = 0; + + /* + * Except at the end of the transfer, amount will be + * equal to the buffer size, which is divisible by + * the bulk-out maxpacket size. + */ + set_bulk_out_req_length(common, bh, amount); + if (!start_out_transfer(common, bh)) + /* Dunno what to do if common->fsg is NULL */ + return -EIO; + common->next_buffhd_to_fill = bh->next; + continue; + } + + /* Write the received data to the backing file */ + bh = common->next_buffhd_to_drain; + if (bh->state == BUF_STATE_EMPTY && !get_some_more) + break; /* We stopped early */ + if (bh->state == BUF_STATE_FULL) { + smp_rmb(); + common->next_buffhd_to_drain = bh->next; + bh->state = BUF_STATE_EMPTY; + + /* Did something go wrong with the transfer? */ + if (bh->outreq->status != 0) { + curlun->sense_data = SS_COMMUNICATION_FAILURE; + curlun->sense_data_info = + file_offset >> curlun->blkbits; + curlun->info_valid = 1; + break; + } + + amount = bh->outreq->actual; + if (curlun->file_length - file_offset < amount) { + LERROR(curlun, + "write %u @ %llu beyond end %llu\n", + amount, (unsigned long long)file_offset, + (unsigned long long)curlun->file_length); + amount = curlun->file_length - file_offset; + } + + /* Don't accept excess data. The spec doesn't say + * what to do in this case. We'll ignore the error. + */ + amount = min(amount, bh->bulk_out_intended_length); + + /* Don't write a partial block */ + amount = round_down(amount, curlun->blksize); + if (amount == 0) + goto empty_write; + + /* Perform the write */ + file_offset_tmp = file_offset; + nwritten = vfs_write(curlun->filp, + (char __user *)bh->buf, + amount, &file_offset_tmp); + VLDBG(curlun, "file write %u @ %llu -> %d\n", amount, + (unsigned long long)file_offset, (int)nwritten); + if (signal_pending(current)) + return -EINTR; /* Interrupted! */ + + if (nwritten < 0) { + LDBG(curlun, "error in file write: %d\n", + (int)nwritten); + nwritten = 0; + } else if (nwritten < amount) { + LDBG(curlun, "partial file write: %d/%u\n", + (int)nwritten, amount); + nwritten = round_down(nwritten, curlun->blksize); + } + file_offset += nwritten; + amount_left_to_write -= nwritten; + common->residue -= nwritten; + + /* If an error occurred, report it and its position */ + if (nwritten < amount) { + curlun->sense_data = SS_WRITE_ERROR; + curlun->sense_data_info = + file_offset >> curlun->blkbits; + curlun->info_valid = 1; + break; + } + + empty_write: + /* Did the host decide to stop early? */ + if (bh->outreq->actual < bh->bulk_out_intended_length) { + common->short_packet_received = 1; + break; + } + continue; + } + + /* Wait for something to happen */ + rc = sleep_thread(common, false); + if (rc) + return rc; + } + + return -EIO; /* No default reply */ +} + + +/*-------------------------------------------------------------------------*/ + +static int do_synchronize_cache(struct fsg_common *common) +{ + struct fsg_lun *curlun = common->curlun; + int rc; + + /* We ignore the requested LBA and write out all file's + * dirty data buffers. */ + rc = fsg_lun_fsync_sub(curlun); + if (rc) + curlun->sense_data = SS_WRITE_ERROR; + return 0; +} + + +/*-------------------------------------------------------------------------*/ + +static void invalidate_sub(struct fsg_lun *curlun) +{ + struct file *filp = curlun->filp; + struct inode *inode = file_inode(filp); + unsigned long rc; + + rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); + VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); +} + +static int do_verify(struct fsg_common *common) +{ + struct fsg_lun *curlun = common->curlun; + u32 lba; + u32 verification_length; + struct fsg_buffhd *bh = common->next_buffhd_to_fill; + loff_t file_offset, file_offset_tmp; + u32 amount_left; + unsigned int amount; + ssize_t nread; + + /* + * Get the starting Logical Block Address and check that it's + * not too big. + */ + lba = get_unaligned_be32(&common->cmnd[2]); + if (lba >= curlun->num_sectors) { + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; + return -EINVAL; + } + + /* + * We allow DPO (Disable Page Out = don't save data in the + * cache) but we don't implement it. + */ + if (common->cmnd[1] & ~0x10) { + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + + verification_length = get_unaligned_be16(&common->cmnd[7]); + if (unlikely(verification_length == 0)) + return -EIO; /* No default reply */ + + /* Prepare to carry out the file verify */ + amount_left = verification_length << curlun->blkbits; + file_offset = ((loff_t) lba) << curlun->blkbits; + + /* Write out all the dirty buffers before invalidating them */ + fsg_lun_fsync_sub(curlun); + if (signal_pending(current)) + return -EINTR; + + invalidate_sub(curlun); + if (signal_pending(current)) + return -EINTR; + + /* Just try to read the requested blocks */ + while (amount_left > 0) { + /* + * Figure out how much we need to read: + * Try to read the remaining amount, but not more than + * the buffer size. + * And don't try to read past the end of the file. + */ + amount = min(amount_left, FSG_BUFLEN); + amount = min((loff_t)amount, + curlun->file_length - file_offset); + if (amount == 0) { + curlun->sense_data = + SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; + curlun->sense_data_info = + file_offset >> curlun->blkbits; + curlun->info_valid = 1; + break; + } + + /* Perform the read */ + file_offset_tmp = file_offset; + nread = vfs_read(curlun->filp, + (char __user *) bh->buf, + amount, &file_offset_tmp); + VLDBG(curlun, "file read %u @ %llu -> %d\n", amount, + (unsigned long long) file_offset, + (int) nread); + if (signal_pending(current)) + return -EINTR; + + if (nread < 0) { + LDBG(curlun, "error in file verify: %d\n", (int)nread); + nread = 0; + } else if (nread < amount) { + LDBG(curlun, "partial file verify: %d/%u\n", + (int)nread, amount); + nread = round_down(nread, curlun->blksize); + } + if (nread == 0) { + curlun->sense_data = SS_UNRECOVERED_READ_ERROR; + curlun->sense_data_info = + file_offset >> curlun->blkbits; + curlun->info_valid = 1; + break; + } + file_offset += nread; + amount_left -= nread; + } + return 0; +} + + +/*-------------------------------------------------------------------------*/ + +static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + u8 *buf = (u8 *) bh->buf; + + if (!curlun) { /* Unsupported LUNs are okay */ + common->bad_lun_okay = 1; + memset(buf, 0, 36); + buf[0] = TYPE_NO_LUN; /* Unsupported, no device-type */ + buf[4] = 31; /* Additional length */ + return 36; + } + + buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK; + buf[1] = curlun->removable ? 0x80 : 0; + buf[2] = 2; /* ANSI SCSI level 2 */ + buf[3] = 2; /* SCSI-2 INQUIRY data format */ + buf[4] = 31; /* Additional length */ + buf[5] = 0; /* No special options */ + buf[6] = 0; + buf[7] = 0; + memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string); + return 36; +} + +static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + u8 *buf = (u8 *) bh->buf; + u32 sd, sdinfo; + int valid; + + /* + * From the SCSI-2 spec., section 7.9 (Unit attention condition): + * + * If a REQUEST SENSE command is received from an initiator + * with a pending unit attention condition (before the target + * generates the contingent allegiance condition), then the + * target shall either: + * a) report any pending sense data and preserve the unit + * attention condition on the logical unit, or, + * b) report the unit attention condition, may discard any + * pending sense data, and clear the unit attention + * condition on the logical unit for that initiator. + * + * FSG normally uses option a); enable this code to use option b). + */ +#if 0 + if (curlun && curlun->unit_attention_data != SS_NO_SENSE) { + curlun->sense_data = curlun->unit_attention_data; + curlun->unit_attention_data = SS_NO_SENSE; + } +#endif + + if (!curlun) { /* Unsupported LUNs are okay */ + common->bad_lun_okay = 1; + sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; + sdinfo = 0; + valid = 0; + } else { + sd = curlun->sense_data; + sdinfo = curlun->sense_data_info; + valid = curlun->info_valid << 7; + curlun->sense_data = SS_NO_SENSE; + curlun->sense_data_info = 0; + curlun->info_valid = 0; + } + + memset(buf, 0, 18); + buf[0] = valid | 0x70; /* Valid, current error */ + buf[2] = SK(sd); + put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */ + buf[7] = 18 - 8; /* Additional sense length */ + buf[12] = ASC(sd); + buf[13] = ASCQ(sd); + return 18; +} + +static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + u32 lba = get_unaligned_be32(&common->cmnd[2]); + int pmi = common->cmnd[8]; + u8 *buf = (u8 *)bh->buf; + + /* Check the PMI and LBA fields */ + if (pmi > 1 || (pmi == 0 && lba != 0)) { + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + + put_unaligned_be32(curlun->num_sectors - 1, &buf[0]); + /* Max logical block */ + put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ + return 8; +} + +static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + int msf = common->cmnd[1] & 0x02; + u32 lba = get_unaligned_be32(&common->cmnd[2]); + u8 *buf = (u8 *)bh->buf; + + if (common->cmnd[1] & ~0x02) { /* Mask away MSF */ + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + if (lba >= curlun->num_sectors) { + curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE; + return -EINVAL; + } + + memset(buf, 0, 8); + buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */ + store_cdrom_address(&buf[4], msf, lba); + return 8; +} + +static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + int msf = common->cmnd[1] & 0x02; + int start_track = common->cmnd[6]; + u8 *buf = (u8 *)bh->buf; + + if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */ + start_track > 1) { + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + + memset(buf, 0, 20); + buf[1] = (20-2); /* TOC data length */ + buf[2] = 1; /* First track number */ + buf[3] = 1; /* Last track number */ + buf[5] = 0x16; /* Data track, copying allowed */ + buf[6] = 0x01; /* Only track is number 1 */ + store_cdrom_address(&buf[8], msf, 0); + + buf[13] = 0x16; /* Lead-out track is data */ + buf[14] = 0xAA; /* Lead-out track number */ + store_cdrom_address(&buf[16], msf, curlun->num_sectors); + return 20; +} + +static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + int mscmnd = common->cmnd[0]; + u8 *buf = (u8 *) bh->buf; + u8 *buf0 = buf; + int pc, page_code; + int changeable_values, all_pages; + int valid_page = 0; + int len, limit; + + if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */ + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + pc = common->cmnd[2] >> 6; + page_code = common->cmnd[2] & 0x3f; + if (pc == 3) { + curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED; + return -EINVAL; + } + changeable_values = (pc == 1); + all_pages = (page_code == 0x3f); + + /* + * Write the mode parameter header. Fixed values are: default + * medium type, no cache control (DPOFUA), and no block descriptors. + * The only variable value is the WriteProtect bit. We will fill in + * the mode data length later. + */ + memset(buf, 0, 8); + if (mscmnd == MODE_SENSE) { + buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ + buf += 4; + limit = 255; + } else { /* MODE_SENSE_10 */ + buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */ + buf += 8; + limit = 65535; /* Should really be FSG_BUFLEN */ + } + + /* No block descriptors */ + + /* + * The mode pages, in numerical order. The only page we support + * is the Caching page. + */ + if (page_code == 0x08 || all_pages) { + valid_page = 1; + buf[0] = 0x08; /* Page code */ + buf[1] = 10; /* Page length */ + memset(buf+2, 0, 10); /* None of the fields are changeable */ + + if (!changeable_values) { + buf[2] = 0x04; /* Write cache enable, */ + /* Read cache not disabled */ + /* No cache retention priorities */ + put_unaligned_be16(0xffff, &buf[4]); + /* Don't disable prefetch */ + /* Minimum prefetch = 0 */ + put_unaligned_be16(0xffff, &buf[8]); + /* Maximum prefetch */ + put_unaligned_be16(0xffff, &buf[10]); + /* Maximum prefetch ceiling */ + } + buf += 12; + } + + /* + * Check that a valid page was requested and the mode data length + * isn't too long. + */ + len = buf - buf0; + if (!valid_page || len > limit) { + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + + /* Store the mode data length */ + if (mscmnd == MODE_SENSE) + buf0[0] = len - 1; + else + put_unaligned_be16(len - 2, buf0); + return len; +} + +static int do_start_stop(struct fsg_common *common) +{ + struct fsg_lun *curlun = common->curlun; + int loej, start; + + if (!curlun) { + return -EINVAL; + } else if (!curlun->removable) { + curlun->sense_data = SS_INVALID_COMMAND; + return -EINVAL; + } else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */ + (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */ + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + + loej = common->cmnd[4] & 0x02; + start = common->cmnd[4] & 0x01; + + /* + * Our emulation doesn't support mounting; the medium is + * available for use as soon as it is loaded. + */ + if (start) { + if (!fsg_lun_is_open(curlun)) { + curlun->sense_data = SS_MEDIUM_NOT_PRESENT; + return -EINVAL; + } + return 0; + } + + /* Are we allowed to unload the media? */ + if (curlun->prevent_medium_removal) { + LDBG(curlun, "unload attempt prevented\n"); + curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED; + return -EINVAL; + } + + if (!loej) + return 0; + + up_read(&common->filesem); + down_write(&common->filesem); + fsg_lun_close(curlun); + up_write(&common->filesem); + down_read(&common->filesem); + + return 0; +} + +static int do_prevent_allow(struct fsg_common *common) +{ + struct fsg_lun *curlun = common->curlun; + int prevent; + + if (!common->curlun) { + return -EINVAL; + } else if (!common->curlun->removable) { + common->curlun->sense_data = SS_INVALID_COMMAND; + return -EINVAL; + } + + prevent = common->cmnd[4] & 0x01; + if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */ + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + + if (curlun->prevent_medium_removal && !prevent) + fsg_lun_fsync_sub(curlun); + curlun->prevent_medium_removal = prevent; + return 0; +} + +static int do_read_format_capacities(struct fsg_common *common, + struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + u8 *buf = (u8 *) bh->buf; + + buf[0] = buf[1] = buf[2] = 0; + buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */ + buf += 4; + + put_unaligned_be32(curlun->num_sectors, &buf[0]); + /* Number of blocks */ + put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */ + buf[4] = 0x02; /* Current capacity */ + return 12; +} + +static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh) +{ + struct fsg_lun *curlun = common->curlun; + + /* We don't support MODE SELECT */ + if (curlun) + curlun->sense_data = SS_INVALID_COMMAND; + return -EINVAL; +} + + +/*-------------------------------------------------------------------------*/ + +static int halt_bulk_in_endpoint(struct fsg_dev *fsg) +{ + int rc; + + rc = fsg_set_halt(fsg, fsg->bulk_in); + if (rc == -EAGAIN) + VDBG(fsg, "delayed bulk-in endpoint halt\n"); + while (rc != 0) { + if (rc != -EAGAIN) { + WARNING(fsg, "usb_ep_set_halt -> %d\n", rc); + rc = 0; + break; + } + + /* Wait for a short time and then try again */ + if (msleep_interruptible(100) != 0) + return -EINTR; + rc = usb_ep_set_halt(fsg->bulk_in); + } + return rc; +} + +static int wedge_bulk_in_endpoint(struct fsg_dev *fsg) +{ + int rc; + + DBG(fsg, "bulk-in set wedge\n"); + rc = usb_ep_set_wedge(fsg->bulk_in); + if (rc == -EAGAIN) + VDBG(fsg, "delayed bulk-in endpoint wedge\n"); + while (rc != 0) { + if (rc != -EAGAIN) { + WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc); + rc = 0; + break; + } + + /* Wait for a short time and then try again */ + if (msleep_interruptible(100) != 0) + return -EINTR; + rc = usb_ep_set_wedge(fsg->bulk_in); + } + return rc; +} + +static int throw_away_data(struct fsg_common *common) +{ + struct fsg_buffhd *bh; + u32 amount; + int rc; + + for (bh = common->next_buffhd_to_drain; + bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0; + bh = common->next_buffhd_to_drain) { + + /* Throw away the data in a filled buffer */ + if (bh->state == BUF_STATE_FULL) { + smp_rmb(); + bh->state = BUF_STATE_EMPTY; + common->next_buffhd_to_drain = bh->next; + + /* A short packet or an error ends everything */ + if (bh->outreq->actual < bh->bulk_out_intended_length || + bh->outreq->status != 0) { + raise_exception(common, + FSG_STATE_ABORT_BULK_OUT); + return -EINTR; + } + continue; + } + + /* Try to submit another request if we need one */ + bh = common->next_buffhd_to_fill; + if (bh->state == BUF_STATE_EMPTY + && common->usb_amount_left > 0) { + amount = min(common->usb_amount_left, FSG_BUFLEN); + + /* + * Except at the end of the transfer, amount will be + * equal to the buffer size, which is divisible by + * the bulk-out maxpacket size. + */ + set_bulk_out_req_length(common, bh, amount); + if (!start_out_transfer(common, bh)) + /* Dunno what to do if common->fsg is NULL */ + return -EIO; + common->next_buffhd_to_fill = bh->next; + common->usb_amount_left -= amount; + continue; + } + + /* Otherwise wait for something to happen */ + rc = sleep_thread(common, true); + if (rc) + return rc; + } + return 0; +} + +static int finish_reply(struct fsg_common *common) +{ + struct fsg_buffhd *bh = common->next_buffhd_to_fill; + int rc = 0; + + switch (common->data_dir) { + case DATA_DIR_NONE: + break; /* Nothing to send */ + + /* + * If we don't know whether the host wants to read or write, + * this must be CB or CBI with an unknown command. We mustn't + * try to send or receive any data. So stall both bulk pipes + * if we can and wait for a reset. + */ + case DATA_DIR_UNKNOWN: + if (!common->can_stall) { + /* Nothing */ + } else if (fsg_is_set(common)) { + fsg_set_halt(common->fsg, common->fsg->bulk_out); + rc = halt_bulk_in_endpoint(common->fsg); + } else { + /* Don't know what to do if common->fsg is NULL */ + rc = -EIO; + } + break; + + /* All but the last buffer of data must have already been sent */ + case DATA_DIR_TO_HOST: + if (common->data_size == 0) { + /* Nothing to send */ + + /* Don't know what to do if common->fsg is NULL */ + } else if (!fsg_is_set(common)) { + rc = -EIO; + + /* If there's no residue, simply send the last buffer */ + } else if (common->residue == 0) { + bh->inreq->zero = 0; + if (!start_in_transfer(common, bh)) + return -EIO; + common->next_buffhd_to_fill = bh->next; + + /* + * For Bulk-only, mark the end of the data with a short + * packet. If we are allowed to stall, halt the bulk-in + * endpoint. (Note: This violates the Bulk-Only Transport + * specification, which requires us to pad the data if we + * don't halt the endpoint. Presumably nobody will mind.) + */ + } else { + bh->inreq->zero = 1; + if (!start_in_transfer(common, bh)) + rc = -EIO; + common->next_buffhd_to_fill = bh->next; + if (common->can_stall) + rc = halt_bulk_in_endpoint(common->fsg); + } + break; + + /* + * We have processed all we want from the data the host has sent. + * There may still be outstanding bulk-out requests. + */ + case DATA_DIR_FROM_HOST: + if (common->residue == 0) { + /* Nothing to receive */ + + /* Did the host stop sending unexpectedly early? */ + } else if (common->short_packet_received) { + raise_exception(common, FSG_STATE_ABORT_BULK_OUT); + rc = -EINTR; + + /* + * We haven't processed all the incoming data. Even though + * we may be allowed to stall, doing so would cause a race. + * The controller may already have ACK'ed all the remaining + * bulk-out packets, in which case the host wouldn't see a + * STALL. Not realizing the endpoint was halted, it wouldn't + * clear the halt -- leading to problems later on. + */ +#if 0 + } else if (common->can_stall) { + if (fsg_is_set(common)) + fsg_set_halt(common->fsg, + common->fsg->bulk_out); + raise_exception(common, FSG_STATE_ABORT_BULK_OUT); + rc = -EINTR; +#endif + + /* + * We can't stall. Read in the excess data and throw it + * all away. + */ + } else { + rc = throw_away_data(common); + } + break; + } + return rc; +} + +static int send_status(struct fsg_common *common) +{ + struct fsg_lun *curlun = common->curlun; + struct fsg_buffhd *bh; + struct bulk_cs_wrap *csw; + int rc; + u8 status = US_BULK_STAT_OK; + u32 sd, sdinfo = 0; + + /* Wait for the next buffer to become available */ + bh = common->next_buffhd_to_fill; + while (bh->state != BUF_STATE_EMPTY) { + rc = sleep_thread(common, true); + if (rc) + return rc; + } + + if (curlun) { + sd = curlun->sense_data; + sdinfo = curlun->sense_data_info; + } else if (common->bad_lun_okay) + sd = SS_NO_SENSE; + else + sd = SS_LOGICAL_UNIT_NOT_SUPPORTED; + + if (common->phase_error) { + DBG(common, "sending phase-error status\n"); + status = US_BULK_STAT_PHASE; + sd = SS_INVALID_COMMAND; + } else if (sd != SS_NO_SENSE) { + DBG(common, "sending command-failure status\n"); + status = US_BULK_STAT_FAIL; + VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;" + " info x%x\n", + SK(sd), ASC(sd), ASCQ(sd), sdinfo); + } + + /* Store and send the Bulk-only CSW */ + csw = (void *)bh->buf; + + csw->Signature = cpu_to_le32(US_BULK_CS_SIGN); + csw->Tag = common->tag; + csw->Residue = cpu_to_le32(common->residue); + csw->Status = status; + + bh->inreq->length = US_BULK_CS_WRAP_LEN; + bh->inreq->zero = 0; + if (!start_in_transfer(common, bh)) + /* Don't know what to do if common->fsg is NULL */ + return -EIO; + + common->next_buffhd_to_fill = bh->next; + return 0; +} + + +/*-------------------------------------------------------------------------*/ + +/* + * Check whether the command is properly formed and whether its data size + * and direction agree with the values we already have. + */ +static int check_command(struct fsg_common *common, int cmnd_size, + enum data_direction data_dir, unsigned int mask, + int needs_medium, const char *name) +{ + int i; + unsigned int lun = common->cmnd[1] >> 5; + static const char dirletter[4] = {'u', 'o', 'i', 'n'}; + char hdlen[20]; + struct fsg_lun *curlun; + + hdlen[0] = 0; + if (common->data_dir != DATA_DIR_UNKNOWN) + sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir], + common->data_size); + VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n", + name, cmnd_size, dirletter[(int) data_dir], + common->data_size_from_cmnd, common->cmnd_size, hdlen); + + /* + * We can't reply at all until we know the correct data direction + * and size. + */ + if (common->data_size_from_cmnd == 0) + data_dir = DATA_DIR_NONE; + if (common->data_size < common->data_size_from_cmnd) { + /* + * Host data size < Device data size is a phase error. + * Carry out the command, but only transfer as much as + * we are allowed. + */ + common->data_size_from_cmnd = common->data_size; + common->phase_error = 1; + } + common->residue = common->data_size; + common->usb_amount_left = common->data_size; + + /* Conflicting data directions is a phase error */ + if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) { + common->phase_error = 1; + return -EINVAL; + } + + /* Verify the length of the command itself */ + if (cmnd_size != common->cmnd_size) { + + /* + * Special case workaround: There are plenty of buggy SCSI + * implementations. Many have issues with cbw->Length + * field passing a wrong command size. For those cases we + * always try to work around the problem by using the length + * sent by the host side provided it is at least as large + * as the correct command length. + * Examples of such cases would be MS-Windows, which issues + * REQUEST SENSE with cbw->Length == 12 where it should + * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and + * REQUEST SENSE with cbw->Length == 10 where it should + * be 6 as well. + */ + if (cmnd_size <= common->cmnd_size) { + DBG(common, "%s is buggy! Expected length %d " + "but we got %d\n", name, + cmnd_size, common->cmnd_size); + cmnd_size = common->cmnd_size; + } else { + common->phase_error = 1; + return -EINVAL; + } + } + + /* Check that the LUN values are consistent */ + if (common->lun != lun) + DBG(common, "using LUN %u from CBW, not LUN %u from CDB\n", + common->lun, lun); + + /* Check the LUN */ + curlun = common->curlun; + if (curlun) { + if (common->cmnd[0] != REQUEST_SENSE) { + curlun->sense_data = SS_NO_SENSE; + curlun->sense_data_info = 0; + curlun->info_valid = 0; + } + } else { + common->bad_lun_okay = 0; + + /* + * INQUIRY and REQUEST SENSE commands are explicitly allowed + * to use unsupported LUNs; all others may not. + */ + if (common->cmnd[0] != INQUIRY && + common->cmnd[0] != REQUEST_SENSE) { + DBG(common, "unsupported LUN %u\n", common->lun); + return -EINVAL; + } + } + + /* + * If a unit attention condition exists, only INQUIRY and + * REQUEST SENSE commands are allowed; anything else must fail. + */ + if (curlun && curlun->unit_attention_data != SS_NO_SENSE && + common->cmnd[0] != INQUIRY && + common->cmnd[0] != REQUEST_SENSE) { + curlun->sense_data = curlun->unit_attention_data; + curlun->unit_attention_data = SS_NO_SENSE; + return -EINVAL; + } + + /* Check that only command bytes listed in the mask are non-zero */ + common->cmnd[1] &= 0x1f; /* Mask away the LUN */ + for (i = 1; i < cmnd_size; ++i) { + if (common->cmnd[i] && !(mask & (1 << i))) { + if (curlun) + curlun->sense_data = SS_INVALID_FIELD_IN_CDB; + return -EINVAL; + } + } + + /* If the medium isn't mounted and the command needs to access + * it, return an error. */ + if (curlun && !fsg_lun_is_open(curlun) && needs_medium) { + curlun->sense_data = SS_MEDIUM_NOT_PRESENT; + return -EINVAL; + } + + return 0; +} + +/* wrapper of check_command for data size in blocks handling */ +static int check_command_size_in_blocks(struct fsg_common *common, + int cmnd_size, enum data_direction data_dir, + unsigned int mask, int needs_medium, const char *name) +{ + if (common->curlun) + common->data_size_from_cmnd <<= common->curlun->blkbits; + return check_command(common, cmnd_size, data_dir, + mask, needs_medium, name); +} + +static int do_scsi_command(struct fsg_common *common) +{ + struct fsg_buffhd *bh; + int rc; + int reply = -EINVAL; + int i; + static char unknown[16]; + + dump_cdb(common); + + /* Wait for the next buffer to become available for data or status */ + bh = common->next_buffhd_to_fill; + common->next_buffhd_to_drain = bh; + while (bh->state != BUF_STATE_EMPTY) { + rc = sleep_thread(common, true); + if (rc) + return rc; + } + common->phase_error = 0; + common->short_packet_received = 0; + + down_read(&common->filesem); /* We're using the backing file */ + switch (common->cmnd[0]) { + + case INQUIRY: + common->data_size_from_cmnd = common->cmnd[4]; + reply = check_command(common, 6, DATA_DIR_TO_HOST, + (1<<4), 0, + "INQUIRY"); + if (reply == 0) + reply = do_inquiry(common, bh); + break; + + case MODE_SELECT: + common->data_size_from_cmnd = common->cmnd[4]; + reply = check_command(common, 6, DATA_DIR_FROM_HOST, + (1<<1) | (1<<4), 0, + "MODE SELECT(6)"); + if (reply == 0) + reply = do_mode_select(common, bh); + break; + + case MODE_SELECT_10: + common->data_size_from_cmnd = + get_unaligned_be16(&common->cmnd[7]); + reply = check_command(common, 10, DATA_DIR_FROM_HOST, + (1<<1) | (3<<7), 0, + "MODE SELECT(10)"); + if (reply == 0) + reply = do_mode_select(common, bh); + break; + + case MODE_SENSE: + common->data_size_from_cmnd = common->cmnd[4]; + reply = check_command(common, 6, DATA_DIR_TO_HOST, + (1<<1) | (1<<2) | (1<<4), 0, + "MODE SENSE(6)"); + if (reply == 0) + reply = do_mode_sense(common, bh); + break; + + case MODE_SENSE_10: + common->data_size_from_cmnd = + get_unaligned_be16(&common->cmnd[7]); + reply = check_command(common, 10, DATA_DIR_TO_HOST, + (1<<1) | (1<<2) | (3<<7), 0, + "MODE SENSE(10)"); + if (reply == 0) + reply = do_mode_sense(common, bh); + break; + + case ALLOW_MEDIUM_REMOVAL: + common->data_size_from_cmnd = 0; + reply = check_command(common, 6, DATA_DIR_NONE, + (1<<4), 0, + "PREVENT-ALLOW MEDIUM REMOVAL"); + if (reply == 0) + reply = do_prevent_allow(common); + break; + + case READ_6: + i = common->cmnd[4]; + common->data_size_from_cmnd = (i == 0) ? 256 : i; + reply = check_command_size_in_blocks(common, 6, + DATA_DIR_TO_HOST, + (7<<1) | (1<<4), 1, + "READ(6)"); + if (reply == 0) + reply = do_read(common); + break; + + case READ_10: + common->data_size_from_cmnd = + get_unaligned_be16(&common->cmnd[7]); + reply = check_command_size_in_blocks(common, 10, + DATA_DIR_TO_HOST, + (1<<1) | (0xf<<2) | (3<<7), 1, + "READ(10)"); + if (reply == 0) + reply = do_read(common); + break; + + case READ_12: + common->data_size_from_cmnd = + get_unaligned_be32(&common->cmnd[6]); + reply = check_command_size_in_blocks(common, 12, + DATA_DIR_TO_HOST, + (1<<1) | (0xf<<2) | (0xf<<6), 1, + "READ(12)"); + if (reply == 0) + reply = do_read(common); + break; + + case READ_CAPACITY: + common->data_size_from_cmnd = 8; + reply = check_command(common, 10, DATA_DIR_TO_HOST, + (0xf<<2) | (1<<8), 1, + "READ CAPACITY"); + if (reply == 0) + reply = do_read_capacity(common, bh); + break; + + case READ_HEADER: + if (!common->curlun || !common->curlun->cdrom) + goto unknown_cmnd; + common->data_size_from_cmnd = + get_unaligned_be16(&common->cmnd[7]); + reply = check_command(common, 10, DATA_DIR_TO_HOST, + (3<<7) | (0x1f<<1), 1, + "READ HEADER"); + if (reply == 0) + reply = do_read_header(common, bh); + break; + + case READ_TOC: + if (!common->curlun || !common->curlun->cdrom) + goto unknown_cmnd; + common->data_size_from_cmnd = + get_unaligned_be16(&common->cmnd[7]); + reply = check_command(common, 10, DATA_DIR_TO_HOST, + (7<<6) | (1<<1), 1, + "READ TOC"); + if (reply == 0) + reply = do_read_toc(common, bh); + break; + + case READ_FORMAT_CAPACITIES: + common->data_size_from_cmnd = + get_unaligned_be16(&common->cmnd[7]); + reply = check_command(common, 10, DATA_DIR_TO_HOST, + (3<<7), 1, + "READ FORMAT CAPACITIES"); + if (reply == 0) + reply = do_read_format_capacities(common, bh); + break; + + case REQUEST_SENSE: + common->data_size_from_cmnd = common->cmnd[4]; + reply = check_command(common, 6, DATA_DIR_TO_HOST, + (1<<4), 0, + "REQUEST SENSE"); + if (reply == 0) + reply = do_request_sense(common, bh); + break; + + case START_STOP: + common->data_size_from_cmnd = 0; + reply = check_command(common, 6, DATA_DIR_NONE, + (1<<1) | (1<<4), 0, + "START-STOP UNIT"); + if (reply == 0) + reply = do_start_stop(common); + break; + + case SYNCHRONIZE_CACHE: + common->data_size_from_cmnd = 0; + reply = check_command(common, 10, DATA_DIR_NONE, + (0xf<<2) | (3<<7), 1, + "SYNCHRONIZE CACHE"); + if (reply == 0) + reply = do_synchronize_cache(common); + break; + + case TEST_UNIT_READY: + common->data_size_from_cmnd = 0; + reply = check_command(common, 6, DATA_DIR_NONE, + 0, 1, + "TEST UNIT READY"); + break; + + /* + * Although optional, this command is used by MS-Windows. We + * support a minimal version: BytChk must be 0. + */ + case VERIFY: + common->data_size_from_cmnd = 0; + reply = check_command(common, 10, DATA_DIR_NONE, + (1<<1) | (0xf<<2) | (3<<7), 1, + "VERIFY"); + if (reply == 0) + reply = do_verify(common); + break; + + case WRITE_6: + i = common->cmnd[4]; + common->data_size_from_cmnd = (i == 0) ? 256 : i; + reply = check_command_size_in_blocks(common, 6, + DATA_DIR_FROM_HOST, + (7<<1) | (1<<4), 1, + "WRITE(6)"); + if (reply == 0) + reply = do_write(common); + break; + + case WRITE_10: + common->data_size_from_cmnd = + get_unaligned_be16(&common->cmnd[7]); + reply = check_command_size_in_blocks(common, 10, + DATA_DIR_FROM_HOST, + (1<<1) | (0xf<<2) | (3<<7), 1, + "WRITE(10)"); + if (reply == 0) + reply = do_write(common); + break; + + case WRITE_12: + common->data_size_from_cmnd = + get_unaligned_be32(&common->cmnd[6]); + reply = check_command_size_in_blocks(common, 12, + DATA_DIR_FROM_HOST, + (1<<1) | (0xf<<2) | (0xf<<6), 1, + "WRITE(12)"); + if (reply == 0) + reply = do_write(common); + break; + + /* + * Some mandatory commands that we recognize but don't implement. + * They don't mean much in this setting. It's left as an exercise + * for anyone interested to implement RESERVE and RELEASE in terms + * of Posix locks. + */ + case FORMAT_UNIT: + case RELEASE: + case RESERVE: + case SEND_DIAGNOSTIC: + /* Fall through */ + + default: +unknown_cmnd: + common->data_size_from_cmnd = 0; + sprintf(unknown, "Unknown x%02x", common->cmnd[0]); + reply = check_command(common, common->cmnd_size, + DATA_DIR_UNKNOWN, ~0, 0, unknown); + if (reply == 0) { + common->curlun->sense_data = SS_INVALID_COMMAND; + reply = -EINVAL; + } + break; + } + up_read(&common->filesem); + + if (reply == -EINTR || signal_pending(current)) + return -EINTR; + + /* Set up the single reply buffer for finish_reply() */ + if (reply == -EINVAL) + reply = 0; /* Error reply length */ + if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) { + reply = min((u32)reply, common->data_size_from_cmnd); + bh->inreq->length = reply; + bh->state = BUF_STATE_FULL; + common->residue -= reply; + } /* Otherwise it's already set */ + + return 0; +} + + +/*-------------------------------------------------------------------------*/ + +static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) +{ + struct usb_request *req = bh->outreq; + struct bulk_cb_wrap *cbw = req->buf; + struct fsg_common *common = fsg->common; + + /* Was this a real packet? Should it be ignored? */ + if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) + return -EINVAL; + + /* Is the CBW valid? */ + if (req->actual != US_BULK_CB_WRAP_LEN || + cbw->Signature != cpu_to_le32( + US_BULK_CB_SIGN)) { + DBG(fsg, "invalid CBW: len %u sig 0x%x\n", + req->actual, + le32_to_cpu(cbw->Signature)); + + /* + * The Bulk-only spec says we MUST stall the IN endpoint + * (6.6.1), so it's unavoidable. It also says we must + * retain this state until the next reset, but there's + * no way to tell the controller driver it should ignore + * Clear-Feature(HALT) requests. + * + * We aren't required to halt the OUT endpoint; instead + * we can simply accept and discard any data received + * until the next reset. + */ + wedge_bulk_in_endpoint(fsg); + set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); + return -EINVAL; + } + + /* Is the CBW meaningful? */ + if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN || + cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) { + DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " + "cmdlen %u\n", + cbw->Lun, cbw->Flags, cbw->Length); + + /* + * We can do anything we want here, so let's stall the + * bulk pipes if we are allowed to. + */ + if (common->can_stall) { + fsg_set_halt(fsg, fsg->bulk_out); + halt_bulk_in_endpoint(fsg); + } + return -EINVAL; + } + + /* Save the command for later */ + common->cmnd_size = cbw->Length; + memcpy(common->cmnd, cbw->CDB, common->cmnd_size); + if (cbw->Flags & US_BULK_FLAG_IN) + common->data_dir = DATA_DIR_TO_HOST; + else + common->data_dir = DATA_DIR_FROM_HOST; + common->data_size = le32_to_cpu(cbw->DataTransferLength); + if (common->data_size == 0) + common->data_dir = DATA_DIR_NONE; + common->lun = cbw->Lun; + if (common->lun < common->nluns) + common->curlun = common->luns[common->lun]; + else + common->curlun = NULL; + common->tag = cbw->Tag; + return 0; +} + +static int get_next_command(struct fsg_common *common) +{ + struct fsg_buffhd *bh; + int rc = 0; + + /* Wait for the next buffer to become available */ + bh = common->next_buffhd_to_fill; + while (bh->state != BUF_STATE_EMPTY) { + rc = sleep_thread(common, true); + if (rc) + return rc; + } + + /* Queue a request to read a Bulk-only CBW */ + set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN); + if (!start_out_transfer(common, bh)) + /* Don't know what to do if common->fsg is NULL */ + return -EIO; + + /* + * We will drain the buffer in software, which means we + * can reuse it for the next filling. No need to advance + * next_buffhd_to_fill. + */ + + /* Wait for the CBW to arrive */ + while (bh->state != BUF_STATE_FULL) { + rc = sleep_thread(common, true); + if (rc) + return rc; + } + smp_rmb(); + rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO; + bh->state = BUF_STATE_EMPTY; + + return rc; +} + + +/*-------------------------------------------------------------------------*/ + +static int alloc_request(struct fsg_common *common, struct usb_ep *ep, + struct usb_request **preq) +{ + *preq = usb_ep_alloc_request(ep, GFP_ATOMIC); + if (*preq) + return 0; + ERROR(common, "can't allocate request for %s\n", ep->name); + return -ENOMEM; +} + +/* Reset interface setting and re-init endpoint state (toggle etc). */ +static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg) +{ + struct fsg_dev *fsg; + int i, rc = 0; + + if (common->running) + DBG(common, "reset interface\n"); + +reset: + /* Deallocate the requests */ + if (common->fsg) { + fsg = common->fsg; + + for (i = 0; i < common->fsg_num_buffers; ++i) { + struct fsg_buffhd *bh = &common->buffhds[i]; + + if (bh->inreq) { + usb_ep_free_request(fsg->bulk_in, bh->inreq); + bh->inreq = NULL; + } + if (bh->outreq) { + usb_ep_free_request(fsg->bulk_out, bh->outreq); + bh->outreq = NULL; + } + } + + /* Disable the endpoints */ + if (fsg->bulk_in_enabled) { + usb_ep_disable(fsg->bulk_in); + fsg->bulk_in->driver_data = NULL; + fsg->bulk_in_enabled = 0; + } + if (fsg->bulk_out_enabled) { + usb_ep_disable(fsg->bulk_out); + fsg->bulk_out->driver_data = NULL; + fsg->bulk_out_enabled = 0; + } + + common->fsg = NULL; + wake_up(&common->fsg_wait); + } + + common->running = 0; + if (!new_fsg || rc) + return rc; + + common->fsg = new_fsg; + fsg = common->fsg; + + /* Enable the endpoints */ + rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in); + if (rc) + goto reset; + rc = usb_ep_enable(fsg->bulk_in); + if (rc) + goto reset; + fsg->bulk_in->driver_data = common; + fsg->bulk_in_enabled = 1; + + rc = config_ep_by_speed(common->gadget, &(fsg->function), + fsg->bulk_out); + if (rc) + goto reset; + rc = usb_ep_enable(fsg->bulk_out); + if (rc) + goto reset; + fsg->bulk_out->driver_data = common; + fsg->bulk_out_enabled = 1; + common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc); + clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); + + /* Allocate the requests */ + for (i = 0; i < common->fsg_num_buffers; ++i) { + struct fsg_buffhd *bh = &common->buffhds[i]; + + rc = alloc_request(common, fsg->bulk_in, &bh->inreq); + if (rc) + goto reset; + rc = alloc_request(common, fsg->bulk_out, &bh->outreq); + if (rc) + goto reset; + bh->inreq->buf = bh->outreq->buf = bh->buf; + bh->inreq->context = bh->outreq->context = bh; + bh->inreq->complete = bulk_in_complete; + bh->outreq->complete = bulk_out_complete; + } + + common->running = 1; + for (i = 0; i < common->nluns; ++i) + if (common->luns[i]) + common->luns[i]->unit_attention_data = + SS_RESET_OCCURRED; + return rc; +} + + +/****************************** ALT CONFIGS ******************************/ + +static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct fsg_dev *fsg = fsg_from_func(f); + fsg->common->new_fsg = fsg; + raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + return USB_GADGET_DELAYED_STATUS; +} + +static void fsg_disable(struct usb_function *f) +{ + struct fsg_dev *fsg = fsg_from_func(f); + fsg->common->new_fsg = NULL; + raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); +} + + +/*-------------------------------------------------------------------------*/ + +static void handle_exception(struct fsg_common *common) +{ + siginfo_t info; + int i; + struct fsg_buffhd *bh; + enum fsg_state old_state; + struct fsg_lun *curlun; + unsigned int exception_req_tag; + + /* + * Clear the existing signals. Anything but SIGUSR1 is converted + * into a high-priority EXIT exception. + */ + for (;;) { + int sig = + dequeue_signal_lock(current, ¤t->blocked, &info); + if (!sig) + break; + if (sig != SIGUSR1) { + if (common->state < FSG_STATE_EXIT) + DBG(common, "Main thread exiting on signal\n"); + raise_exception(common, FSG_STATE_EXIT); + } + } + + /* Cancel all the pending transfers */ + if (likely(common->fsg)) { + for (i = 0; i < common->fsg_num_buffers; ++i) { + bh = &common->buffhds[i]; + if (bh->inreq_busy) + usb_ep_dequeue(common->fsg->bulk_in, bh->inreq); + if (bh->outreq_busy) + usb_ep_dequeue(common->fsg->bulk_out, + bh->outreq); + } + + /* Wait until everything is idle */ + for (;;) { + int num_active = 0; + for (i = 0; i < common->fsg_num_buffers; ++i) { + bh = &common->buffhds[i]; + num_active += bh->inreq_busy + bh->outreq_busy; + } + if (num_active == 0) + break; + if (sleep_thread(common, true)) + return; + } + + /* Clear out the controller's fifos */ + if (common->fsg->bulk_in_enabled) + usb_ep_fifo_flush(common->fsg->bulk_in); + if (common->fsg->bulk_out_enabled) + usb_ep_fifo_flush(common->fsg->bulk_out); + } + + /* + * Reset the I/O buffer states and pointers, the SCSI + * state, and the exception. Then invoke the handler. + */ + spin_lock_irq(&common->lock); + + for (i = 0; i < common->fsg_num_buffers; ++i) { + bh = &common->buffhds[i]; + bh->state = BUF_STATE_EMPTY; + } + common->next_buffhd_to_fill = &common->buffhds[0]; + common->next_buffhd_to_drain = &common->buffhds[0]; + exception_req_tag = common->exception_req_tag; + old_state = common->state; + + if (old_state == FSG_STATE_ABORT_BULK_OUT) + common->state = FSG_STATE_STATUS_PHASE; + else { + for (i = 0; i < common->nluns; ++i) { + curlun = common->luns[i]; + if (!curlun) + continue; + curlun->prevent_medium_removal = 0; + curlun->sense_data = SS_NO_SENSE; + curlun->unit_attention_data = SS_NO_SENSE; + curlun->sense_data_info = 0; + curlun->info_valid = 0; + } + common->state = FSG_STATE_IDLE; + } + spin_unlock_irq(&common->lock); + + /* Carry out any extra actions required for the exception */ + switch (old_state) { + case FSG_STATE_ABORT_BULK_OUT: + send_status(common); + spin_lock_irq(&common->lock); + if (common->state == FSG_STATE_STATUS_PHASE) + common->state = FSG_STATE_IDLE; + spin_unlock_irq(&common->lock); + break; + + case FSG_STATE_RESET: + /* + * In case we were forced against our will to halt a + * bulk endpoint, clear the halt now. (The SuperH UDC + * requires this.) + */ + if (!fsg_is_set(common)) + break; + if (test_and_clear_bit(IGNORE_BULK_OUT, + &common->fsg->atomic_bitflags)) + usb_ep_clear_halt(common->fsg->bulk_in); + + if (common->ep0_req_tag == exception_req_tag) + ep0_queue(common); /* Complete the status stage */ + + /* + * Technically this should go here, but it would only be + * a waste of time. Ditto for the INTERFACE_CHANGE and + * CONFIG_CHANGE cases. + */ + /* for (i = 0; i < common->nluns; ++i) */ + /* if (common->luns[i]) */ + /* common->luns[i]->unit_attention_data = */ + /* SS_RESET_OCCURRED; */ + break; + + case FSG_STATE_CONFIG_CHANGE: + do_set_interface(common, common->new_fsg); + if (common->new_fsg) + usb_composite_setup_continue(common->cdev); + break; + + case FSG_STATE_EXIT: + case FSG_STATE_TERMINATED: + do_set_interface(common, NULL); /* Free resources */ + spin_lock_irq(&common->lock); + common->state = FSG_STATE_TERMINATED; /* Stop the thread */ + spin_unlock_irq(&common->lock); + break; + + case FSG_STATE_INTERFACE_CHANGE: + case FSG_STATE_DISCONNECT: + case FSG_STATE_COMMAND_PHASE: + case FSG_STATE_DATA_PHASE: + case FSG_STATE_STATUS_PHASE: + case FSG_STATE_IDLE: + break; + } +} + + +/*-------------------------------------------------------------------------*/ + +static int fsg_main_thread(void *common_) +{ + struct fsg_common *common = common_; + + /* + * Allow the thread to be killed by a signal, but set the signal mask + * to block everything but INT, TERM, KILL, and USR1. + */ + allow_signal(SIGINT); + allow_signal(SIGTERM); + allow_signal(SIGKILL); + allow_signal(SIGUSR1); + + /* Allow the thread to be frozen */ + set_freezable(); + + /* + * Arrange for userspace references to be interpreted as kernel + * pointers. That way we can pass a kernel pointer to a routine + * that expects a __user pointer and it will work okay. + */ + set_fs(get_ds()); + + /* The main loop */ + while (common->state != FSG_STATE_TERMINATED) { + if (exception_in_progress(common) || signal_pending(current)) { + handle_exception(common); + continue; + } + + if (!common->running) { + sleep_thread(common, true); + continue; + } + + if (get_next_command(common)) + continue; + + spin_lock_irq(&common->lock); + if (!exception_in_progress(common)) + common->state = FSG_STATE_DATA_PHASE; + spin_unlock_irq(&common->lock); + + if (do_scsi_command(common) || finish_reply(common)) + continue; + + spin_lock_irq(&common->lock); + if (!exception_in_progress(common)) + common->state = FSG_STATE_STATUS_PHASE; + spin_unlock_irq(&common->lock); + + if (send_status(common)) + continue; + + spin_lock_irq(&common->lock); + if (!exception_in_progress(common)) + common->state = FSG_STATE_IDLE; + spin_unlock_irq(&common->lock); + } + + spin_lock_irq(&common->lock); + common->thread_task = NULL; + spin_unlock_irq(&common->lock); + + if (!common->ops || !common->ops->thread_exits + || common->ops->thread_exits(common) < 0) { + struct fsg_lun **curlun_it = common->luns; + unsigned i = common->nluns; + + down_write(&common->filesem); + for (; i--; ++curlun_it) { + struct fsg_lun *curlun = *curlun_it; + if (!curlun || !fsg_lun_is_open(curlun)) + continue; + + fsg_lun_close(curlun); + curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; + } + up_write(&common->filesem); + } + + /* Let fsg_unbind() know the thread has exited */ + complete_and_exit(&common->thread_notifier, 0); +} + + +/*************************** DEVICE ATTRIBUTES ***************************/ + +static ssize_t ro_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fsg_lun *curlun = fsg_lun_from_dev(dev); + + return fsg_show_ro(curlun, buf); +} + +static ssize_t nofua_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fsg_lun *curlun = fsg_lun_from_dev(dev); + + return fsg_show_nofua(curlun, buf); +} + +static ssize_t file_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fsg_lun *curlun = fsg_lun_from_dev(dev); + struct rw_semaphore *filesem = dev_get_drvdata(dev); + + return fsg_show_file(curlun, filesem, buf); +} + +static ssize_t ro_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fsg_lun *curlun = fsg_lun_from_dev(dev); + struct rw_semaphore *filesem = dev_get_drvdata(dev); + + return fsg_store_ro(curlun, filesem, buf, count); +} + +static ssize_t nofua_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fsg_lun *curlun = fsg_lun_from_dev(dev); + + return fsg_store_nofua(curlun, buf, count); +} + +static ssize_t file_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fsg_lun *curlun = fsg_lun_from_dev(dev); + struct rw_semaphore *filesem = dev_get_drvdata(dev); + + return fsg_store_file(curlun, filesem, buf, count); +} + +static DEVICE_ATTR_RW(nofua); +/* mode wil be set in fsg_lun_attr_is_visible() */ +static DEVICE_ATTR(ro, 0, ro_show, ro_store); +static DEVICE_ATTR(file, 0, file_show, file_store); + +/****************************** FSG COMMON ******************************/ + +static void fsg_common_release(struct kref *ref); + +static void fsg_lun_release(struct device *dev) +{ + /* Nothing needs to be done */ +} + +void fsg_common_get(struct fsg_common *common) +{ + kref_get(&common->ref); +} +EXPORT_SYMBOL_GPL(fsg_common_get); + +void fsg_common_put(struct fsg_common *common) +{ + kref_put(&common->ref, fsg_common_release); +} +EXPORT_SYMBOL_GPL(fsg_common_put); + +/* check if fsg_num_buffers is within a valid range */ +static inline int fsg_num_buffers_validate(unsigned int fsg_num_buffers) +{ + if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4) + return 0; + pr_err("fsg_num_buffers %u is out of range (%d to %d)\n", + fsg_num_buffers, 2, 4); + return -EINVAL; +} + +static struct fsg_common *fsg_common_setup(struct fsg_common *common) +{ + if (!common) { + common = kzalloc(sizeof(*common), GFP_KERNEL); + if (!common) + return ERR_PTR(-ENOMEM); + common->free_storage_on_release = 1; + } else { + common->free_storage_on_release = 0; + } + init_rwsem(&common->filesem); + spin_lock_init(&common->lock); + kref_init(&common->ref); + init_completion(&common->thread_notifier); + init_waitqueue_head(&common->fsg_wait); + common->state = FSG_STATE_TERMINATED; + + return common; +} + +void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs) +{ + common->sysfs = sysfs; +} +EXPORT_SYMBOL_GPL(fsg_common_set_sysfs); + +static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n) +{ + if (buffhds) { + struct fsg_buffhd *bh = buffhds; + while (n--) { + kfree(bh->buf); + ++bh; + } + kfree(buffhds); + } +} + +int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n) +{ + struct fsg_buffhd *bh, *buffhds; + int i, rc; + + rc = fsg_num_buffers_validate(n); + if (rc != 0) + return rc; + + buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL); + if (!buffhds) + return -ENOMEM; + + /* Data buffers cyclic list */ + bh = buffhds; + i = n; + goto buffhds_first_it; + do { + bh->next = bh + 1; + ++bh; +buffhds_first_it: + bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL); + if (unlikely(!bh->buf)) + goto error_release; + } while (--i); + bh->next = buffhds; + + _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); + common->fsg_num_buffers = n; + common->buffhds = buffhds; + + return 0; + +error_release: + /* + * "buf"s pointed to by heads after n - i are NULL + * so releasing them won't hurt + */ + _fsg_common_free_buffers(buffhds, n); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(fsg_common_set_num_buffers); + +void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs) +{ + if (sysfs) + device_unregister(&lun->dev); + fsg_lun_close(lun); + kfree(lun); +} +EXPORT_SYMBOL_GPL(fsg_common_remove_lun); + +static void _fsg_common_remove_luns(struct fsg_common *common, int n) +{ + int i; + + for (i = 0; i < n; ++i) + if (common->luns[i]) { + fsg_common_remove_lun(common->luns[i], common->sysfs); + common->luns[i] = NULL; + } +} +EXPORT_SYMBOL_GPL(fsg_common_remove_luns); + +void fsg_common_remove_luns(struct fsg_common *common) +{ + _fsg_common_remove_luns(common, common->nluns); +} + +void fsg_common_free_luns(struct fsg_common *common) +{ + fsg_common_remove_luns(common); + kfree(common->luns); + common->luns = NULL; +} +EXPORT_SYMBOL_GPL(fsg_common_free_luns); + +int fsg_common_set_nluns(struct fsg_common *common, int nluns) +{ + struct fsg_lun **curlun; + + /* Find out how many LUNs there should be */ + if (nluns < 1 || nluns > FSG_MAX_LUNS) { + pr_err("invalid number of LUNs: %u\n", nluns); + return -EINVAL; + } + + curlun = kcalloc(FSG_MAX_LUNS, sizeof(*curlun), GFP_KERNEL); + if (unlikely(!curlun)) + return -ENOMEM; + + if (common->luns) + fsg_common_free_luns(common); + + common->luns = curlun; + common->nluns = nluns; + + return 0; +} +EXPORT_SYMBOL_GPL(fsg_common_set_nluns); + +void fsg_common_set_ops(struct fsg_common *common, + const struct fsg_operations *ops) +{ + common->ops = ops; +} +EXPORT_SYMBOL_GPL(fsg_common_set_ops); + +void fsg_common_free_buffers(struct fsg_common *common) +{ + _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); + common->buffhds = NULL; +} +EXPORT_SYMBOL_GPL(fsg_common_free_buffers); + +int fsg_common_set_cdev(struct fsg_common *common, + struct usb_composite_dev *cdev, bool can_stall) +{ + struct usb_string *us; + + common->gadget = cdev->gadget; + common->ep0 = cdev->gadget->ep0; + common->ep0req = cdev->req; + common->cdev = cdev; + + us = usb_gstrings_attach(cdev, fsg_strings_array, + ARRAY_SIZE(fsg_strings)); + if (IS_ERR(us)) + return PTR_ERR(us); + + fsg_intf_desc.iInterface = us[FSG_STRING_INTERFACE].id; + + /* + * Some peripheral controllers are known not to be able to + * halt bulk endpoints correctly. If one of them is present, + * disable stalls. + */ + common->can_stall = can_stall && !(gadget_is_at91(common->gadget)); + + return 0; +} +EXPORT_SYMBOL_GPL(fsg_common_set_cdev); + +static struct attribute *fsg_lun_dev_attrs[] = { + &dev_attr_ro.attr, + &dev_attr_file.attr, + &dev_attr_nofua.attr, + NULL +}; + +static umode_t fsg_lun_dev_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct fsg_lun *lun = fsg_lun_from_dev(dev); + + if (attr == &dev_attr_ro.attr) + return lun->cdrom ? S_IRUGO : (S_IWUSR | S_IRUGO); + if (attr == &dev_attr_file.attr) + return lun->removable ? (S_IWUSR | S_IRUGO) : S_IRUGO; + return attr->mode; +} + +static const struct attribute_group fsg_lun_dev_group = { + .attrs = fsg_lun_dev_attrs, + .is_visible = fsg_lun_dev_is_visible, +}; + +static const struct attribute_group *fsg_lun_dev_groups[] = { + &fsg_lun_dev_group, + NULL +}; + +int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, + unsigned int id, const char *name, + const char **name_pfx) +{ + struct fsg_lun *lun; + char *pathbuf, *p; + int rc = -ENOMEM; + + if (!common->nluns || !common->luns) + return -ENODEV; + + if (common->luns[id]) + return -EBUSY; + + if (!cfg->filename && !cfg->removable) { + pr_err("no file given for LUN%d\n", id); + return -EINVAL; + } + + lun = kzalloc(sizeof(*lun), GFP_KERNEL); + if (!lun) + return -ENOMEM; + + lun->name_pfx = name_pfx; + + lun->cdrom = !!cfg->cdrom; + lun->ro = cfg->cdrom || cfg->ro; + lun->initially_ro = lun->ro; + lun->removable = !!cfg->removable; + + if (!common->sysfs) { + /* we DON'T own the name!*/ + lun->name = name; + } else { + lun->dev.release = fsg_lun_release; + lun->dev.parent = &common->gadget->dev; + lun->dev.groups = fsg_lun_dev_groups; + dev_set_drvdata(&lun->dev, &common->filesem); + dev_set_name(&lun->dev, "%s", name); + lun->name = dev_name(&lun->dev); + + rc = device_register(&lun->dev); + if (rc) { + pr_info("failed to register LUN%d: %d\n", id, rc); + put_device(&lun->dev); + goto error_sysfs; + } + } + + common->luns[id] = lun; + + if (cfg->filename) { + rc = fsg_lun_open(lun, cfg->filename); + if (rc) + goto error_lun; + } + + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); + p = "(no medium)"; + if (fsg_lun_is_open(lun)) { + p = "(error)"; + if (pathbuf) { + p = d_path(&lun->filp->f_path, pathbuf, PATH_MAX); + if (IS_ERR(p)) + p = "(error)"; + } + } + pr_info("LUN: %s%s%sfile: %s\n", + lun->removable ? "removable " : "", + lun->ro ? "read only " : "", + lun->cdrom ? "CD-ROM " : "", + p); + kfree(pathbuf); + + return 0; + +error_lun: + if (common->sysfs) + device_unregister(&lun->dev); + fsg_lun_close(lun); + common->luns[id] = NULL; +error_sysfs: + kfree(lun); + return rc; +} +EXPORT_SYMBOL_GPL(fsg_common_create_lun); + +int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg) +{ + char buf[8]; /* enough for 100000000 different numbers, decimal */ + int i, rc; + + for (i = 0; i < common->nluns; ++i) { + snprintf(buf, sizeof(buf), "lun%d", i); + rc = fsg_common_create_lun(common, &cfg->luns[i], i, buf, NULL); + if (rc) + goto fail; + } + + pr_info("Number of LUNs=%d\n", common->nluns); + + return 0; + +fail: + _fsg_common_remove_luns(common, i); + return rc; +} +EXPORT_SYMBOL_GPL(fsg_common_create_luns); + +void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn, + const char *pn) +{ + int i; + + /* Prepare inquiryString */ + i = get_default_bcdDevice(); + snprintf(common->inquiry_string, sizeof(common->inquiry_string), + "%-8s%-16s%04x", vn ?: "Linux", + /* Assume product name dependent on the first LUN */ + pn ?: ((*common->luns)->cdrom + ? "File-CD Gadget" + : "File-Stor Gadget"), + i); +} +EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string); + +int fsg_common_run_thread(struct fsg_common *common) +{ + common->state = FSG_STATE_IDLE; + /* Tell the thread to start working */ + common->thread_task = + kthread_create(fsg_main_thread, common, "file-storage"); + if (IS_ERR(common->thread_task)) { + common->state = FSG_STATE_TERMINATED; + return PTR_ERR(common->thread_task); + } + + DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task)); + + wake_up_process(common->thread_task); + + return 0; +} +EXPORT_SYMBOL_GPL(fsg_common_run_thread); + +static void fsg_common_release(struct kref *ref) +{ + struct fsg_common *common = container_of(ref, struct fsg_common, ref); + + /* If the thread isn't already dead, tell it to exit now */ + if (common->state != FSG_STATE_TERMINATED) { + raise_exception(common, FSG_STATE_EXIT); + wait_for_completion(&common->thread_notifier); + } + + if (likely(common->luns)) { + struct fsg_lun **lun_it = common->luns; + unsigned i = common->nluns; + + /* In error recovery common->nluns may be zero. */ + for (; i; --i, ++lun_it) { + struct fsg_lun *lun = *lun_it; + if (!lun) + continue; + fsg_lun_close(lun); + if (common->sysfs) + device_unregister(&lun->dev); + kfree(lun); + } + + kfree(common->luns); + } + + _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); + if (common->free_storage_on_release) + kfree(common); +} + + +/*-------------------------------------------------------------------------*/ + +static int fsg_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct fsg_dev *fsg = fsg_from_func(f); + struct usb_gadget *gadget = c->cdev->gadget; + int i; + struct usb_ep *ep; + unsigned max_burst; + int ret; + struct fsg_opts *opts; + + opts = fsg_opts_from_func_inst(f->fi); + if (!opts->no_configfs) { + ret = fsg_common_set_cdev(fsg->common, c->cdev, + fsg->common->can_stall); + if (ret) + return ret; + fsg_common_set_inquiry_string(fsg->common, NULL, NULL); + ret = fsg_common_run_thread(fsg->common); + if (ret) + return ret; + } + + fsg->gadget = gadget; + + /* New interface */ + i = usb_interface_id(c, f); + if (i < 0) + return i; + fsg_intf_desc.bInterfaceNumber = i; + fsg->interface_number = i; + + /* Find all the endpoints we will use */ + ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc); + if (!ep) + goto autoconf_fail; + ep->driver_data = fsg->common; /* claim the endpoint */ + fsg->bulk_in = ep; + + ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc); + if (!ep) + goto autoconf_fail; + ep->driver_data = fsg->common; /* claim the endpoint */ + fsg->bulk_out = ep; + + /* Assume endpoint addresses are the same for both speeds */ + fsg_hs_bulk_in_desc.bEndpointAddress = + fsg_fs_bulk_in_desc.bEndpointAddress; + fsg_hs_bulk_out_desc.bEndpointAddress = + fsg_fs_bulk_out_desc.bEndpointAddress; + + /* Calculate bMaxBurst, we know packet size is 1024 */ + max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15); + + fsg_ss_bulk_in_desc.bEndpointAddress = + fsg_fs_bulk_in_desc.bEndpointAddress; + fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst; + + fsg_ss_bulk_out_desc.bEndpointAddress = + fsg_fs_bulk_out_desc.bEndpointAddress; + fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst; + + ret = usb_assign_descriptors(f, fsg_fs_function, fsg_hs_function, + fsg_ss_function); + if (ret) + goto autoconf_fail; + + return 0; + +autoconf_fail: + ERROR(fsg, "unable to autoconfigure all endpoints\n"); + return -ENOTSUPP; +} + +/****************************** ALLOCATE FUNCTION *************************/ + +static void fsg_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct fsg_dev *fsg = fsg_from_func(f); + struct fsg_common *common = fsg->common; + + DBG(fsg, "unbind\n"); + if (fsg->common->fsg == fsg) { + fsg->common->new_fsg = NULL; + raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE); + /* FIXME: make interruptible or killable somehow? */ + wait_event(common->fsg_wait, common->fsg != fsg); + } + + usb_free_all_descriptors(&fsg->function); +} + +static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct fsg_lun_opts, group); +} + +static inline struct fsg_opts *to_fsg_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct fsg_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(fsg_lun_opts); +CONFIGFS_ATTR_OPS(fsg_lun_opts); + +static void fsg_lun_attr_release(struct config_item *item) +{ + struct fsg_lun_opts *lun_opts; + + lun_opts = to_fsg_lun_opts(item); + kfree(lun_opts); +} + +static struct configfs_item_operations fsg_lun_item_ops = { + .release = fsg_lun_attr_release, + .show_attribute = fsg_lun_opts_attr_show, + .store_attribute = fsg_lun_opts_attr_store, +}; + +static ssize_t fsg_lun_opts_file_show(struct fsg_lun_opts *opts, char *page) +{ + struct fsg_opts *fsg_opts; + + fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); + + return fsg_show_file(opts->lun, &fsg_opts->common->filesem, page); +} + +static ssize_t fsg_lun_opts_file_store(struct fsg_lun_opts *opts, + const char *page, size_t len) +{ + struct fsg_opts *fsg_opts; + + fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); + + return fsg_store_file(opts->lun, &fsg_opts->common->filesem, page, len); +} + +static struct fsg_lun_opts_attribute fsg_lun_opts_file = + __CONFIGFS_ATTR(file, S_IRUGO | S_IWUSR, fsg_lun_opts_file_show, + fsg_lun_opts_file_store); + +static ssize_t fsg_lun_opts_ro_show(struct fsg_lun_opts *opts, char *page) +{ + return fsg_show_ro(opts->lun, page); +} + +static ssize_t fsg_lun_opts_ro_store(struct fsg_lun_opts *opts, + const char *page, size_t len) +{ + struct fsg_opts *fsg_opts; + + fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); + + return fsg_store_ro(opts->lun, &fsg_opts->common->filesem, page, len); +} + +static struct fsg_lun_opts_attribute fsg_lun_opts_ro = + __CONFIGFS_ATTR(ro, S_IRUGO | S_IWUSR, fsg_lun_opts_ro_show, + fsg_lun_opts_ro_store); + +static ssize_t fsg_lun_opts_removable_show(struct fsg_lun_opts *opts, + char *page) +{ + return fsg_show_removable(opts->lun, page); +} + +static ssize_t fsg_lun_opts_removable_store(struct fsg_lun_opts *opts, + const char *page, size_t len) +{ + return fsg_store_removable(opts->lun, page, len); +} + +static struct fsg_lun_opts_attribute fsg_lun_opts_removable = + __CONFIGFS_ATTR(removable, S_IRUGO | S_IWUSR, + fsg_lun_opts_removable_show, + fsg_lun_opts_removable_store); + +static ssize_t fsg_lun_opts_cdrom_show(struct fsg_lun_opts *opts, char *page) +{ + return fsg_show_cdrom(opts->lun, page); +} + +static ssize_t fsg_lun_opts_cdrom_store(struct fsg_lun_opts *opts, + const char *page, size_t len) +{ + struct fsg_opts *fsg_opts; + + fsg_opts = to_fsg_opts(opts->group.cg_item.ci_parent); + + return fsg_store_cdrom(opts->lun, &fsg_opts->common->filesem, page, + len); +} + +static struct fsg_lun_opts_attribute fsg_lun_opts_cdrom = + __CONFIGFS_ATTR(cdrom, S_IRUGO | S_IWUSR, fsg_lun_opts_cdrom_show, + fsg_lun_opts_cdrom_store); + +static ssize_t fsg_lun_opts_nofua_show(struct fsg_lun_opts *opts, char *page) +{ + return fsg_show_nofua(opts->lun, page); +} + +static ssize_t fsg_lun_opts_nofua_store(struct fsg_lun_opts *opts, + const char *page, size_t len) +{ + return fsg_store_nofua(opts->lun, page, len); +} + +static struct fsg_lun_opts_attribute fsg_lun_opts_nofua = + __CONFIGFS_ATTR(nofua, S_IRUGO | S_IWUSR, fsg_lun_opts_nofua_show, + fsg_lun_opts_nofua_store); + +static struct configfs_attribute *fsg_lun_attrs[] = { + &fsg_lun_opts_file.attr, + &fsg_lun_opts_ro.attr, + &fsg_lun_opts_removable.attr, + &fsg_lun_opts_cdrom.attr, + &fsg_lun_opts_nofua.attr, + NULL, +}; + +static struct config_item_type fsg_lun_type = { + .ct_item_ops = &fsg_lun_item_ops, + .ct_attrs = fsg_lun_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *fsg_lun_make(struct config_group *group, + const char *name) +{ + struct fsg_lun_opts *opts; + struct fsg_opts *fsg_opts; + struct fsg_lun_config config; + char *num_str; + u8 num; + int ret; + + num_str = strchr(name, '.'); + if (!num_str) { + pr_err("Unable to locate . in LUN.NUMBER\n"); + return ERR_PTR(-EINVAL); + } + num_str++; + + ret = kstrtou8(num_str, 0, &num); + if (ret) + return ERR_PTR(ret); + + fsg_opts = to_fsg_opts(&group->cg_item); + if (num >= FSG_MAX_LUNS) + return ERR_PTR(-ERANGE); + + mutex_lock(&fsg_opts->lock); + if (fsg_opts->refcnt || fsg_opts->common->luns[num]) { + ret = -EBUSY; + goto out; + } + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) { + ret = -ENOMEM; + goto out; + } + + memset(&config, 0, sizeof(config)); + config.removable = true; + + ret = fsg_common_create_lun(fsg_opts->common, &config, num, name, + (const char **)&group->cg_item.ci_name); + if (ret) { + kfree(opts); + goto out; + } + opts->lun = fsg_opts->common->luns[num]; + opts->lun_id = num; + mutex_unlock(&fsg_opts->lock); + + config_group_init_type_name(&opts->group, name, &fsg_lun_type); + + return &opts->group; +out: + mutex_unlock(&fsg_opts->lock); + return ERR_PTR(ret); +} + +static void fsg_lun_drop(struct config_group *group, struct config_item *item) +{ + struct fsg_lun_opts *lun_opts; + struct fsg_opts *fsg_opts; + + lun_opts = to_fsg_lun_opts(item); + fsg_opts = to_fsg_opts(&group->cg_item); + + mutex_lock(&fsg_opts->lock); + if (fsg_opts->refcnt) { + struct config_item *gadget; + + gadget = group->cg_item.ci_parent->ci_parent; + unregister_gadget_item(gadget); + } + + fsg_common_remove_lun(lun_opts->lun, fsg_opts->common->sysfs); + fsg_opts->common->luns[lun_opts->lun_id] = NULL; + lun_opts->lun_id = 0; + mutex_unlock(&fsg_opts->lock); + + config_item_put(item); +} + +CONFIGFS_ATTR_STRUCT(fsg_opts); +CONFIGFS_ATTR_OPS(fsg_opts); + +static void fsg_attr_release(struct config_item *item) +{ + struct fsg_opts *opts = to_fsg_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations fsg_item_ops = { + .release = fsg_attr_release, + .show_attribute = fsg_opts_attr_show, + .store_attribute = fsg_opts_attr_store, +}; + +static ssize_t fsg_opts_stall_show(struct fsg_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%d", opts->common->can_stall); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t fsg_opts_stall_store(struct fsg_opts *opts, const char *page, + size_t len) +{ + int ret; + bool stall; + + mutex_lock(&opts->lock); + + if (opts->refcnt) { + mutex_unlock(&opts->lock); + return -EBUSY; + } + + ret = strtobool(page, &stall); + if (!ret) { + opts->common->can_stall = stall; + ret = len; + } + + mutex_unlock(&opts->lock); + + return ret; +} + +static struct fsg_opts_attribute fsg_opts_stall = + __CONFIGFS_ATTR(stall, S_IRUGO | S_IWUSR, fsg_opts_stall_show, + fsg_opts_stall_store); + +#ifdef CONFIG_USB_GADGET_DEBUG_FILES +static ssize_t fsg_opts_num_buffers_show(struct fsg_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%d", opts->common->fsg_num_buffers); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t fsg_opts_num_buffers_store(struct fsg_opts *opts, + const char *page, size_t len) +{ + int ret; + u8 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + ret = kstrtou8(page, 0, &num); + if (ret) + goto end; + + ret = fsg_num_buffers_validate(num); + if (ret) + goto end; + + fsg_common_set_num_buffers(opts->common, num); + ret = len; + +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct fsg_opts_attribute fsg_opts_num_buffers = + __CONFIGFS_ATTR(num_buffers, S_IRUGO | S_IWUSR, + fsg_opts_num_buffers_show, + fsg_opts_num_buffers_store); + +#endif + +static struct configfs_attribute *fsg_attrs[] = { + &fsg_opts_stall.attr, +#ifdef CONFIG_USB_GADGET_DEBUG_FILES + &fsg_opts_num_buffers.attr, +#endif + NULL, +}; + +static struct configfs_group_operations fsg_group_ops = { + .make_group = fsg_lun_make, + .drop_item = fsg_lun_drop, +}; + +static struct config_item_type fsg_func_type = { + .ct_item_ops = &fsg_item_ops, + .ct_group_ops = &fsg_group_ops, + .ct_attrs = fsg_attrs, + .ct_owner = THIS_MODULE, +}; + +static void fsg_free_inst(struct usb_function_instance *fi) +{ + struct fsg_opts *opts; + + opts = fsg_opts_from_func_inst(fi); + fsg_common_put(opts->common); + kfree(opts); +} + +static struct usb_function_instance *fsg_alloc_inst(void) +{ + struct fsg_opts *opts; + struct fsg_lun_config config; + int rc; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = fsg_free_inst; + opts->common = fsg_common_setup(opts->common); + if (IS_ERR(opts->common)) { + rc = PTR_ERR(opts->common); + goto release_opts; + } + rc = fsg_common_set_nluns(opts->common, FSG_MAX_LUNS); + if (rc) + goto release_opts; + + rc = fsg_common_set_num_buffers(opts->common, + CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS); + if (rc) + goto release_luns; + + pr_info(FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n"); + + memset(&config, 0, sizeof(config)); + config.removable = true; + rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0", + (const char **)&opts->func_inst.group.cg_item.ci_name); + opts->lun0.lun = opts->common->luns[0]; + opts->lun0.lun_id = 0; + config_group_init_type_name(&opts->lun0.group, "lun.0", &fsg_lun_type); + opts->default_groups[0] = &opts->lun0.group; + opts->func_inst.group.default_groups = opts->default_groups; + + config_group_init_type_name(&opts->func_inst.group, "", &fsg_func_type); + + return &opts->func_inst; + +release_luns: + kfree(opts->common->luns); +release_opts: + kfree(opts); + return ERR_PTR(rc); +} + +static void fsg_free(struct usb_function *f) +{ + struct fsg_dev *fsg; + struct fsg_opts *opts; + + fsg = container_of(f, struct fsg_dev, function); + opts = container_of(f->fi, struct fsg_opts, func_inst); + + mutex_lock(&opts->lock); + opts->refcnt--; + mutex_unlock(&opts->lock); + + kfree(fsg); +} + +static struct usb_function *fsg_alloc(struct usb_function_instance *fi) +{ + struct fsg_opts *opts = fsg_opts_from_func_inst(fi); + struct fsg_common *common = opts->common; + struct fsg_dev *fsg; + unsigned nluns, i; + + fsg = kzalloc(sizeof(*fsg), GFP_KERNEL); + if (unlikely(!fsg)) + return ERR_PTR(-ENOMEM); + + mutex_lock(&opts->lock); + if (!opts->refcnt) { + for (nluns = i = 0; i < FSG_MAX_LUNS; ++i) + if (common->luns[i]) + nluns = i + 1; + if (!nluns) + pr_warn("No LUNS defined, continuing anyway\n"); + else + common->nluns = nluns; + pr_info("Number of LUNs=%u\n", common->nluns); + } + opts->refcnt++; + mutex_unlock(&opts->lock); + + fsg->function.name = FSG_DRIVER_DESC; + fsg->function.bind = fsg_bind; + fsg->function.unbind = fsg_unbind; + fsg->function.setup = fsg_setup; + fsg->function.set_alt = fsg_set_alt; + fsg->function.disable = fsg_disable; + fsg->function.free_func = fsg_free; + + fsg->common = common; + + return &fsg->function; +} + +DECLARE_USB_FUNCTION_INIT(mass_storage, fsg_alloc_inst, fsg_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Nazarewicz"); + +/************************* Module parameters *************************/ + + +void fsg_config_from_params(struct fsg_config *cfg, + const struct fsg_module_parameters *params, + unsigned int fsg_num_buffers) +{ + struct fsg_lun_config *lun; + unsigned i; + + /* Configure LUNs */ + cfg->nluns = + min(params->luns ?: (params->file_count ?: 1u), + (unsigned)FSG_MAX_LUNS); + for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) { + lun->ro = !!params->ro[i]; + lun->cdrom = !!params->cdrom[i]; + lun->removable = !!params->removable[i]; + lun->filename = + params->file_count > i && params->file[i][0] + ? params->file[i] + : NULL; + } + + /* Let MSF use defaults */ + cfg->vendor_name = NULL; + cfg->product_name = NULL; + + cfg->ops = NULL; + cfg->private_data = NULL; + + /* Finalise */ + cfg->can_stall = params->stall; + cfg->fsg_num_buffers = fsg_num_buffers; +} +EXPORT_SYMBOL_GPL(fsg_config_from_params); diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h new file mode 100644 index 000000000..b4866fcef --- /dev/null +++ b/drivers/usb/gadget/function/f_mass_storage.h @@ -0,0 +1,166 @@ +#ifndef USB_F_MASS_STORAGE_H +#define USB_F_MASS_STORAGE_H + +#include <linux/usb/composite.h> +#include "storage_common.h" + +struct fsg_module_parameters { + char *file[FSG_MAX_LUNS]; + bool ro[FSG_MAX_LUNS]; + bool removable[FSG_MAX_LUNS]; + bool cdrom[FSG_MAX_LUNS]; + bool nofua[FSG_MAX_LUNS]; + + unsigned int file_count, ro_count, removable_count, cdrom_count; + unsigned int nofua_count; + unsigned int luns; /* nluns */ + bool stall; /* can_stall */ +}; + +#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \ + module_param_array_named(prefix ## name, params.name, type, \ + &prefix ## params.name ## _count, \ + S_IRUGO); \ + MODULE_PARM_DESC(prefix ## name, desc) + +#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \ + module_param_named(prefix ## name, params.name, type, \ + S_IRUGO); \ + MODULE_PARM_DESC(prefix ## name, desc) + +#define __FSG_MODULE_PARAMETERS(prefix, params) \ + _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \ + "names of backing files or devices"); \ + _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \ + "true to force read-only"); \ + _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \ + "true to simulate removable media"); \ + _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \ + "true to simulate CD-ROM instead of disk"); \ + _FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool, \ + "true to ignore SCSI WRITE(10,12) FUA bit"); \ + _FSG_MODULE_PARAM(prefix, params, luns, uint, \ + "number of LUNs"); \ + _FSG_MODULE_PARAM(prefix, params, stall, bool, \ + "false to prevent bulk stalls") + +#ifdef CONFIG_USB_GADGET_DEBUG_FILES + +#define FSG_MODULE_PARAMETERS(prefix, params) \ + __FSG_MODULE_PARAMETERS(prefix, params); \ + module_param_named(num_buffers, fsg_num_buffers, uint, S_IRUGO);\ + MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers") +#else + +#define FSG_MODULE_PARAMETERS(prefix, params) \ + __FSG_MODULE_PARAMETERS(prefix, params) + +#endif + +struct fsg_common; + +/* FSF callback functions */ +struct fsg_operations { + /* + * Callback function to call when thread exits. If no + * callback is set or it returns value lower then zero MSF + * will force eject all LUNs it operates on (including those + * marked as non-removable or with prevent_medium_removal flag + * set). + */ + int (*thread_exits)(struct fsg_common *common); +}; + +struct fsg_lun_opts { + struct config_group group; + struct fsg_lun *lun; + int lun_id; +}; + +struct fsg_opts { + struct fsg_common *common; + struct usb_function_instance func_inst; + struct fsg_lun_opts lun0; + struct config_group *default_groups[2]; + bool no_configfs; /* for legacy gadgets */ + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +struct fsg_lun_config { + const char *filename; + char ro; + char removable; + char cdrom; + char nofua; +}; + +struct fsg_config { + unsigned nluns; + struct fsg_lun_config luns[FSG_MAX_LUNS]; + + /* Callback functions. */ + const struct fsg_operations *ops; + /* Gadget's private data. */ + void *private_data; + + const char *vendor_name; /* 8 characters or less */ + const char *product_name; /* 16 characters or less */ + + char can_stall; + unsigned int fsg_num_buffers; +}; + +static inline struct fsg_opts * +fsg_opts_from_func_inst(const struct usb_function_instance *fi) +{ + return container_of(fi, struct fsg_opts, func_inst); +} + +void fsg_common_get(struct fsg_common *common); + +void fsg_common_put(struct fsg_common *common); + +void fsg_common_set_sysfs(struct fsg_common *common, bool sysfs); + +int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n); + +void fsg_common_free_buffers(struct fsg_common *common); + +int fsg_common_set_cdev(struct fsg_common *common, + struct usb_composite_dev *cdev, bool can_stall); + +void fsg_common_remove_lun(struct fsg_lun *lun, bool sysfs); + +void fsg_common_remove_luns(struct fsg_common *common); + +void fsg_common_free_luns(struct fsg_common *common); + +int fsg_common_set_nluns(struct fsg_common *common, int nluns); + +void fsg_common_set_ops(struct fsg_common *common, + const struct fsg_operations *ops); + +int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, + unsigned int id, const char *name, + const char **name_pfx); + +int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg); + +void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn, + const char *pn); + +int fsg_common_run_thread(struct fsg_common *common); + +void fsg_config_from_params(struct fsg_config *cfg, + const struct fsg_module_parameters *params, + unsigned int fsg_num_buffers); + +#endif /* USB_F_MASS_STORAGE_H */ diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c new file mode 100644 index 000000000..6316aa5b1 --- /dev/null +++ b/drivers/usb/gadget/function/f_midi.c @@ -0,0 +1,1176 @@ +/* + * f_midi.c -- USB MIDI class function driver + * + * Copyright (C) 2006 Thumtronics Pty Ltd. + * Developed for Thumtronics by Grey Innovation + * Ben Williamson <ben.williamson@greyinnovation.com> + * + * Rewritten for the composite framework + * Copyright (C) 2011 Daniel Mack <zonque@gmail.com> + * + * Based on drivers/usb/gadget/f_audio.c, + * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org> + * Copyright (C) 2008 Analog Devices, Inc + * + * and drivers/usb/gadget/midi.c, + * Copyright (C) 2006 Thumtronics Pty Ltd. + * Ben Williamson <ben.williamson@greyinnovation.com> + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/device.h> + +#include <sound/core.h> +#include <sound/initval.h> +#include <sound/rawmidi.h> + +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/audio.h> +#include <linux/usb/midi.h> + +#include "u_f.h" +#include "u_midi.h" + +MODULE_AUTHOR("Ben Williamson"); +MODULE_LICENSE("GPL v2"); + +static const char f_midi_shortname[] = "f_midi"; +static const char f_midi_longname[] = "MIDI Gadget"; + +/* + * We can only handle 16 cables on one single endpoint, as cable numbers are + * stored in 4-bit fields. And as the interface currently only holds one + * single endpoint, this is the maximum number of ports we can allow. + */ +#define MAX_PORTS 16 + +/* + * This is a gadget, and the IN/OUT naming is from the host's perspective. + * USB -> OUT endpoint -> rawmidi + * USB <- IN endpoint <- rawmidi + */ +struct gmidi_in_port { + struct f_midi *midi; + int active; + uint8_t cable; + uint8_t state; +#define STATE_UNKNOWN 0 +#define STATE_1PARAM 1 +#define STATE_2PARAM_1 2 +#define STATE_2PARAM_2 3 +#define STATE_SYSEX_0 4 +#define STATE_SYSEX_1 5 +#define STATE_SYSEX_2 6 + uint8_t data[2]; +}; + +struct f_midi { + struct usb_function func; + struct usb_gadget *gadget; + struct usb_ep *in_ep, *out_ep; + struct snd_card *card; + struct snd_rawmidi *rmidi; + + struct snd_rawmidi_substream *in_substream[MAX_PORTS]; + struct snd_rawmidi_substream *out_substream[MAX_PORTS]; + struct gmidi_in_port *in_port[MAX_PORTS]; + + unsigned long out_triggered; + struct tasklet_struct tasklet; + unsigned int in_ports; + unsigned int out_ports; + int index; + char *id; + unsigned int buflen, qlen; +}; + +static inline struct f_midi *func_to_midi(struct usb_function *f) +{ + return container_of(f, struct f_midi, func); +} + +static void f_midi_transmit(struct f_midi *midi, struct usb_request *req); + +DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); +DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1); +DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(16); + +/* B.3.1 Standard AC Interface Descriptor */ +static struct usb_interface_descriptor ac_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + /* .bNumEndpoints = DYNAMIC */ + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, + /* .iInterface = DYNAMIC */ +}; + +/* B.3.2 Class-Specific AC Interface Descriptor */ +static struct uac1_ac_header_descriptor_1 ac_header_desc = { + .bLength = UAC_DT_AC_HEADER_SIZE(1), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = USB_MS_HEADER, + .bcdADC = cpu_to_le16(0x0100), + .wTotalLength = cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)), + .bInCollection = 1, + /* .baInterfaceNr = DYNAMIC */ +}; + +/* B.4.1 Standard MS Interface Descriptor */ +static struct usb_interface_descriptor ms_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_MIDISTREAMING, + /* .iInterface = DYNAMIC */ +}; + +/* B.4.2 Class-Specific MS Interface Descriptor */ +static struct usb_ms_header_descriptor ms_header_desc = { + .bLength = USB_DT_MS_HEADER_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = USB_MS_HEADER, + .bcdMSC = cpu_to_le16(0x0100), + /* .wTotalLength = DYNAMIC */ +}; + +/* B.5.1 Standard Bulk OUT Endpoint Descriptor */ +static struct usb_endpoint_descriptor bulk_out_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +/* B.5.2 Class-specific MS Bulk OUT Endpoint Descriptor */ +static struct usb_ms_endpoint_descriptor_16 ms_out_desc = { + /* .bLength = DYNAMIC */ + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubtype = USB_MS_GENERAL, + /* .bNumEmbMIDIJack = DYNAMIC */ + /* .baAssocJackID = DYNAMIC */ +}; + +/* B.6.1 Standard Bulk IN Endpoint Descriptor */ +static struct usb_endpoint_descriptor bulk_in_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */ +static struct usb_ms_endpoint_descriptor_16 ms_in_desc = { + /* .bLength = DYNAMIC */ + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubtype = USB_MS_GENERAL, + /* .bNumEmbMIDIJack = DYNAMIC */ + /* .baAssocJackID = DYNAMIC */ +}; + +/* string IDs are assigned dynamically */ + +#define STRING_FUNC_IDX 0 + +static struct usb_string midi_string_defs[] = { + [STRING_FUNC_IDX].s = "MIDI function", + { } /* end of list */ +}; + +static struct usb_gadget_strings midi_stringtab = { + .language = 0x0409, /* en-us */ + .strings = midi_string_defs, +}; + +static struct usb_gadget_strings *midi_strings[] = { + &midi_stringtab, + NULL, +}; + +static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep, + unsigned length) +{ + return alloc_ep_req(ep, length, length); +} + +static void free_ep_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static const uint8_t f_midi_cin_length[] = { + 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 +}; + +/* + * Receives a chunk of MIDI data. + */ +static void f_midi_read_data(struct usb_ep *ep, int cable, + uint8_t *data, int length) +{ + struct f_midi *midi = ep->driver_data; + struct snd_rawmidi_substream *substream = midi->out_substream[cable]; + + if (!substream) + /* Nobody is listening - throw it on the floor. */ + return; + + if (!test_bit(cable, &midi->out_triggered)) + return; + + snd_rawmidi_receive(substream, data, length); +} + +static void f_midi_handle_out_data(struct usb_ep *ep, struct usb_request *req) +{ + unsigned int i; + u8 *buf = req->buf; + + for (i = 0; i + 3 < req->actual; i += 4) + if (buf[i] != 0) { + int cable = buf[i] >> 4; + int length = f_midi_cin_length[buf[i] & 0x0f]; + f_midi_read_data(ep, cable, &buf[i + 1], length); + } +} + +static void +f_midi_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_midi *midi = ep->driver_data; + struct usb_composite_dev *cdev = midi->func.config->cdev; + int status = req->status; + + switch (status) { + case 0: /* normal completion */ + if (ep == midi->out_ep) { + /* We received stuff. req is queued again, below */ + f_midi_handle_out_data(ep, req); + } else if (ep == midi->in_ep) { + /* Our transmit completed. See if there's more to go. + * f_midi_transmit eats req, don't queue it again. */ + f_midi_transmit(midi, req); + return; + } + break; + + /* this endpoint is normally active while we're configured */ + case -ECONNABORTED: /* hardware forced ep reset */ + case -ECONNRESET: /* request dequeued */ + case -ESHUTDOWN: /* disconnect from host */ + VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status, + req->actual, req->length); + if (ep == midi->out_ep) + f_midi_handle_out_data(ep, req); + + free_ep_req(ep, req); + return; + + case -EOVERFLOW: /* buffer overrun on read means that + * we didn't provide a big enough buffer. + */ + default: + DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name, + status, req->actual, req->length); + break; + case -EREMOTEIO: /* short read */ + break; + } + + status = usb_ep_queue(ep, req, GFP_ATOMIC); + if (status) { + ERROR(cdev, "kill %s: resubmit %d bytes --> %d\n", + ep->name, req->length, status); + usb_ep_set_halt(ep); + /* FIXME recover later ... somehow */ + } +} + +static int f_midi_start_ep(struct f_midi *midi, + struct usb_function *f, + struct usb_ep *ep) +{ + int err; + struct usb_composite_dev *cdev = f->config->cdev; + + if (ep->driver_data) + usb_ep_disable(ep); + + err = config_ep_by_speed(midi->gadget, f, ep); + if (err) { + ERROR(cdev, "can't configure %s: %d\n", ep->name, err); + return err; + } + + err = usb_ep_enable(ep); + if (err) { + ERROR(cdev, "can't start %s: %d\n", ep->name, err); + return err; + } + + ep->driver_data = midi; + + return 0; +} + +static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_midi *midi = func_to_midi(f); + struct usb_composite_dev *cdev = f->config->cdev; + unsigned i; + int err; + + err = f_midi_start_ep(midi, f, midi->in_ep); + if (err) + return err; + + err = f_midi_start_ep(midi, f, midi->out_ep); + if (err) + return err; + + if (midi->out_ep->driver_data) + usb_ep_disable(midi->out_ep); + + err = config_ep_by_speed(midi->gadget, f, midi->out_ep); + if (err) { + ERROR(cdev, "can't configure %s: %d\n", + midi->out_ep->name, err); + return err; + } + + err = usb_ep_enable(midi->out_ep); + if (err) { + ERROR(cdev, "can't start %s: %d\n", + midi->out_ep->name, err); + return err; + } + + midi->out_ep->driver_data = midi; + + /* allocate a bunch of read buffers and queue them all at once. */ + for (i = 0; i < midi->qlen && err == 0; i++) { + struct usb_request *req = + midi_alloc_ep_req(midi->out_ep, midi->buflen); + if (req == NULL) + return -ENOMEM; + + req->complete = f_midi_complete; + err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC); + if (err) { + ERROR(midi, "%s queue req: %d\n", + midi->out_ep->name, err); + } + } + + return 0; +} + +static void f_midi_disable(struct usb_function *f) +{ + struct f_midi *midi = func_to_midi(f); + struct usb_composite_dev *cdev = f->config->cdev; + + DBG(cdev, "disable\n"); + + /* + * just disable endpoints, forcing completion of pending i/o. + * all our completion handlers free their requests in this case. + */ + usb_ep_disable(midi->in_ep); + usb_ep_disable(midi->out_ep); +} + +static int f_midi_snd_free(struct snd_device *device) +{ + return 0; +} + +static void f_midi_transmit_packet(struct usb_request *req, uint8_t p0, + uint8_t p1, uint8_t p2, uint8_t p3) +{ + unsigned length = req->length; + u8 *buf = (u8 *)req->buf + length; + + buf[0] = p0; + buf[1] = p1; + buf[2] = p2; + buf[3] = p3; + req->length = length + 4; +} + +/* + * Converts MIDI commands to USB MIDI packets. + */ +static void f_midi_transmit_byte(struct usb_request *req, + struct gmidi_in_port *port, uint8_t b) +{ + uint8_t p0 = port->cable << 4; + + if (b >= 0xf8) { + f_midi_transmit_packet(req, p0 | 0x0f, b, 0, 0); + } else if (b >= 0xf0) { + switch (b) { + case 0xf0: + port->data[0] = b; + port->state = STATE_SYSEX_1; + break; + case 0xf1: + case 0xf3: + port->data[0] = b; + port->state = STATE_1PARAM; + break; + case 0xf2: + port->data[0] = b; + port->state = STATE_2PARAM_1; + break; + case 0xf4: + case 0xf5: + port->state = STATE_UNKNOWN; + break; + case 0xf6: + f_midi_transmit_packet(req, p0 | 0x05, 0xf6, 0, 0); + port->state = STATE_UNKNOWN; + break; + case 0xf7: + switch (port->state) { + case STATE_SYSEX_0: + f_midi_transmit_packet(req, + p0 | 0x05, 0xf7, 0, 0); + break; + case STATE_SYSEX_1: + f_midi_transmit_packet(req, + p0 | 0x06, port->data[0], 0xf7, 0); + break; + case STATE_SYSEX_2: + f_midi_transmit_packet(req, + p0 | 0x07, port->data[0], + port->data[1], 0xf7); + break; + } + port->state = STATE_UNKNOWN; + break; + } + } else if (b >= 0x80) { + port->data[0] = b; + if (b >= 0xc0 && b <= 0xdf) + port->state = STATE_1PARAM; + else + port->state = STATE_2PARAM_1; + } else { /* b < 0x80 */ + switch (port->state) { + case STATE_1PARAM: + if (port->data[0] < 0xf0) { + p0 |= port->data[0] >> 4; + } else { + p0 |= 0x02; + port->state = STATE_UNKNOWN; + } + f_midi_transmit_packet(req, p0, port->data[0], b, 0); + break; + case STATE_2PARAM_1: + port->data[1] = b; + port->state = STATE_2PARAM_2; + break; + case STATE_2PARAM_2: + if (port->data[0] < 0xf0) { + p0 |= port->data[0] >> 4; + port->state = STATE_2PARAM_1; + } else { + p0 |= 0x03; + port->state = STATE_UNKNOWN; + } + f_midi_transmit_packet(req, + p0, port->data[0], port->data[1], b); + break; + case STATE_SYSEX_0: + port->data[0] = b; + port->state = STATE_SYSEX_1; + break; + case STATE_SYSEX_1: + port->data[1] = b; + port->state = STATE_SYSEX_2; + break; + case STATE_SYSEX_2: + f_midi_transmit_packet(req, + p0 | 0x04, port->data[0], port->data[1], b); + port->state = STATE_SYSEX_0; + break; + } + } +} + +static void f_midi_transmit(struct f_midi *midi, struct usb_request *req) +{ + struct usb_ep *ep = midi->in_ep; + int i; + + if (!ep) + return; + + if (!req) + req = midi_alloc_ep_req(ep, midi->buflen); + + if (!req) { + ERROR(midi, "%s: alloc_ep_request failed\n", __func__); + return; + } + req->length = 0; + req->complete = f_midi_complete; + + for (i = 0; i < MAX_PORTS; i++) { + struct gmidi_in_port *port = midi->in_port[i]; + struct snd_rawmidi_substream *substream = midi->in_substream[i]; + + if (!port || !port->active || !substream) + continue; + + while (req->length + 3 < midi->buflen) { + uint8_t b; + if (snd_rawmidi_transmit(substream, &b, 1) != 1) { + port->active = 0; + break; + } + f_midi_transmit_byte(req, port, b); + } + } + + if (req->length > 0) + usb_ep_queue(ep, req, GFP_ATOMIC); + else + free_ep_req(ep, req); +} + +static void f_midi_in_tasklet(unsigned long data) +{ + struct f_midi *midi = (struct f_midi *) data; + f_midi_transmit(midi, NULL); +} + +static int f_midi_in_open(struct snd_rawmidi_substream *substream) +{ + struct f_midi *midi = substream->rmidi->private_data; + + if (!midi->in_port[substream->number]) + return -EINVAL; + + VDBG(midi, "%s()\n", __func__); + midi->in_substream[substream->number] = substream; + midi->in_port[substream->number]->state = STATE_UNKNOWN; + return 0; +} + +static int f_midi_in_close(struct snd_rawmidi_substream *substream) +{ + struct f_midi *midi = substream->rmidi->private_data; + + VDBG(midi, "%s()\n", __func__); + return 0; +} + +static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up) +{ + struct f_midi *midi = substream->rmidi->private_data; + + if (!midi->in_port[substream->number]) + return; + + VDBG(midi, "%s() %d\n", __func__, up); + midi->in_port[substream->number]->active = up; + if (up) + tasklet_hi_schedule(&midi->tasklet); +} + +static int f_midi_out_open(struct snd_rawmidi_substream *substream) +{ + struct f_midi *midi = substream->rmidi->private_data; + + if (substream->number >= MAX_PORTS) + return -EINVAL; + + VDBG(midi, "%s()\n", __func__); + midi->out_substream[substream->number] = substream; + return 0; +} + +static int f_midi_out_close(struct snd_rawmidi_substream *substream) +{ + struct f_midi *midi = substream->rmidi->private_data; + + VDBG(midi, "%s()\n", __func__); + return 0; +} + +static void f_midi_out_trigger(struct snd_rawmidi_substream *substream, int up) +{ + struct f_midi *midi = substream->rmidi->private_data; + + VDBG(midi, "%s()\n", __func__); + + if (up) + set_bit(substream->number, &midi->out_triggered); + else + clear_bit(substream->number, &midi->out_triggered); +} + +static struct snd_rawmidi_ops gmidi_in_ops = { + .open = f_midi_in_open, + .close = f_midi_in_close, + .trigger = f_midi_in_trigger, +}; + +static struct snd_rawmidi_ops gmidi_out_ops = { + .open = f_midi_out_open, + .close = f_midi_out_close, + .trigger = f_midi_out_trigger +}; + +static inline void f_midi_unregister_card(struct f_midi *midi) +{ + if (midi->card) { + snd_card_free(midi->card); + midi->card = NULL; + } +} + +/* register as a sound "card" */ +static int f_midi_register_card(struct f_midi *midi) +{ + struct snd_card *card; + struct snd_rawmidi *rmidi; + int err; + static struct snd_device_ops ops = { + .dev_free = f_midi_snd_free, + }; + + err = snd_card_new(&midi->gadget->dev, midi->index, midi->id, + THIS_MODULE, 0, &card); + if (err < 0) { + ERROR(midi, "snd_card_new() failed\n"); + goto fail; + } + midi->card = card; + + err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, midi, &ops); + if (err < 0) { + ERROR(midi, "snd_device_new() failed: error %d\n", err); + goto fail; + } + + strcpy(card->driver, f_midi_longname); + strcpy(card->longname, f_midi_longname); + strcpy(card->shortname, f_midi_shortname); + + /* Set up rawmidi */ + snd_component_add(card, "MIDI"); + err = snd_rawmidi_new(card, card->longname, 0, + midi->out_ports, midi->in_ports, &rmidi); + if (err < 0) { + ERROR(midi, "snd_rawmidi_new() failed: error %d\n", err); + goto fail; + } + midi->rmidi = rmidi; + strcpy(rmidi->name, card->shortname); + rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | + SNDRV_RAWMIDI_INFO_INPUT | + SNDRV_RAWMIDI_INFO_DUPLEX; + rmidi->private_data = midi; + + /* + * Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT. + * It's an upside-down world being a gadget. + */ + snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &gmidi_in_ops); + snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &gmidi_out_ops); + + /* register it - we're ready to go */ + err = snd_card_register(card); + if (err < 0) { + ERROR(midi, "snd_card_register() failed\n"); + goto fail; + } + + VDBG(midi, "%s() finished ok\n", __func__); + return 0; + +fail: + f_midi_unregister_card(midi); + return err; +} + +/* MIDI function driver setup/binding */ + +static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_descriptor_header **midi_function; + struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS]; + struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS]; + struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS]; + struct usb_midi_out_jack_descriptor_1 jack_out_emb_desc[MAX_PORTS]; + struct usb_composite_dev *cdev = c->cdev; + struct f_midi *midi = func_to_midi(f); + struct usb_string *us; + int status, n, jack = 1, i = 0; + + midi->gadget = cdev->gadget; + tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi); + status = f_midi_register_card(midi); + if (status < 0) + goto fail_register; + + /* maybe allocate device-global string ID */ + us = usb_gstrings_attach(c->cdev, midi_strings, + ARRAY_SIZE(midi_string_defs)); + if (IS_ERR(us)) { + status = PTR_ERR(us); + goto fail; + } + ac_interface_desc.iInterface = us[STRING_FUNC_IDX].id; + + /* We have two interfaces, AudioControl and MIDIStreaming */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ac_interface_desc.bInterfaceNumber = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ms_interface_desc.bInterfaceNumber = status; + ac_header_desc.baInterfaceNr[0] = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc); + if (!midi->in_ep) + goto fail; + midi->in_ep->driver_data = cdev; /* claim */ + + midi->out_ep = usb_ep_autoconfig(cdev->gadget, &bulk_out_desc); + if (!midi->out_ep) + goto fail; + midi->out_ep->driver_data = cdev; /* claim */ + + /* allocate temporary function list */ + midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function), + GFP_KERNEL); + if (!midi_function) { + status = -ENOMEM; + goto fail; + } + + /* + * construct the function's descriptor set. As the number of + * input and output MIDI ports is configurable, we have to do + * it that way. + */ + + /* add the headers - these are always the same */ + midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc; + midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc; + midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc; + + /* calculate the header's wTotalLength */ + n = USB_DT_MS_HEADER_SIZE + + (midi->in_ports + midi->out_ports) * + (USB_DT_MIDI_IN_SIZE + USB_DT_MIDI_OUT_SIZE(1)); + ms_header_desc.wTotalLength = cpu_to_le16(n); + + midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc; + + /* configure the external IN jacks, each linked to an embedded OUT jack */ + for (n = 0; n < midi->in_ports; n++) { + struct usb_midi_in_jack_descriptor *in_ext = &jack_in_ext_desc[n]; + struct usb_midi_out_jack_descriptor_1 *out_emb = &jack_out_emb_desc[n]; + + in_ext->bLength = USB_DT_MIDI_IN_SIZE; + in_ext->bDescriptorType = USB_DT_CS_INTERFACE; + in_ext->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; + in_ext->bJackType = USB_MS_EXTERNAL; + in_ext->bJackID = jack++; + in_ext->iJack = 0; + midi_function[i++] = (struct usb_descriptor_header *) in_ext; + + out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1); + out_emb->bDescriptorType = USB_DT_CS_INTERFACE; + out_emb->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; + out_emb->bJackType = USB_MS_EMBEDDED; + out_emb->bJackID = jack++; + out_emb->bNrInputPins = 1; + out_emb->pins[0].baSourcePin = 1; + out_emb->pins[0].baSourceID = in_ext->bJackID; + out_emb->iJack = 0; + midi_function[i++] = (struct usb_descriptor_header *) out_emb; + + /* link it to the endpoint */ + ms_in_desc.baAssocJackID[n] = out_emb->bJackID; + } + + /* configure the external OUT jacks, each linked to an embedded IN jack */ + for (n = 0; n < midi->out_ports; n++) { + struct usb_midi_in_jack_descriptor *in_emb = &jack_in_emb_desc[n]; + struct usb_midi_out_jack_descriptor_1 *out_ext = &jack_out_ext_desc[n]; + + in_emb->bLength = USB_DT_MIDI_IN_SIZE; + in_emb->bDescriptorType = USB_DT_CS_INTERFACE; + in_emb->bDescriptorSubtype = USB_MS_MIDI_IN_JACK; + in_emb->bJackType = USB_MS_EMBEDDED; + in_emb->bJackID = jack++; + in_emb->iJack = 0; + midi_function[i++] = (struct usb_descriptor_header *) in_emb; + + out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1); + out_ext->bDescriptorType = USB_DT_CS_INTERFACE; + out_ext->bDescriptorSubtype = USB_MS_MIDI_OUT_JACK; + out_ext->bJackType = USB_MS_EXTERNAL; + out_ext->bJackID = jack++; + out_ext->bNrInputPins = 1; + out_ext->iJack = 0; + out_ext->pins[0].baSourceID = in_emb->bJackID; + out_ext->pins[0].baSourcePin = 1; + midi_function[i++] = (struct usb_descriptor_header *) out_ext; + + /* link it to the endpoint */ + ms_out_desc.baAssocJackID[n] = in_emb->bJackID; + } + + /* configure the endpoint descriptors ... */ + ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); + ms_out_desc.bNumEmbMIDIJack = midi->in_ports; + + ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); + ms_in_desc.bNumEmbMIDIJack = midi->out_ports; + + /* ... and add them to the list */ + midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc; + midi_function[i++] = (struct usb_descriptor_header *) &ms_out_desc; + midi_function[i++] = (struct usb_descriptor_header *) &bulk_in_desc; + midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc; + midi_function[i++] = NULL; + + /* + * support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + /* copy descriptors, and track endpoint copies */ + f->fs_descriptors = usb_copy_descriptors(midi_function); + if (!f->fs_descriptors) + goto fail_f_midi; + + if (gadget_is_dualspeed(c->cdev->gadget)) { + bulk_in_desc.wMaxPacketSize = cpu_to_le16(512); + bulk_out_desc.wMaxPacketSize = cpu_to_le16(512); + f->hs_descriptors = usb_copy_descriptors(midi_function); + if (!f->hs_descriptors) + goto fail_f_midi; + } + + kfree(midi_function); + + return 0; + +fail_f_midi: + kfree(midi_function); + usb_free_descriptors(f->hs_descriptors); +fail: + f_midi_unregister_card(midi); +fail_register: + /* we might as well release our claims on endpoints */ + if (midi->out_ep) + midi->out_ep->driver_data = NULL; + if (midi->in_ep) + midi->in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); + + return status; +} + +static inline struct f_midi_opts *to_f_midi_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_midi_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_midi_opts); +CONFIGFS_ATTR_OPS(f_midi_opts); + +static void midi_attr_release(struct config_item *item) +{ + struct f_midi_opts *opts = to_f_midi_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations midi_item_ops = { + .release = midi_attr_release, + .show_attribute = f_midi_opts_attr_show, + .store_attribute = f_midi_opts_attr_store, +}; + +#define F_MIDI_OPT(name, test_limit, limit) \ +static ssize_t f_midi_opts_##name##_show(struct f_midi_opts *opts, char *page) \ +{ \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", opts->name); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t f_midi_opts_##name##_store(struct f_midi_opts *opts, \ + const char *page, size_t len) \ +{ \ + int ret; \ + u32 num; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou32(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (test_limit && num > limit) { \ + ret = -EINVAL; \ + goto end; \ + } \ + opts->name = num; \ + ret = len; \ + \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +static struct f_midi_opts_attribute f_midi_opts_##name = \ + __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, f_midi_opts_##name##_show, \ + f_midi_opts_##name##_store) + +F_MIDI_OPT(index, true, SNDRV_CARDS); +F_MIDI_OPT(buflen, false, 0); +F_MIDI_OPT(qlen, false, 0); +F_MIDI_OPT(in_ports, true, MAX_PORTS); +F_MIDI_OPT(out_ports, true, MAX_PORTS); + +static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + if (opts->id) { + result = strlcpy(page, opts->id, PAGE_SIZE); + } else { + page[0] = 0; + result = 0; + } + + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_midi_opts_id_store(struct f_midi_opts *opts, + const char *page, size_t len) +{ + int ret; + char *c; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + c = kstrndup(page, len, GFP_KERNEL); + if (!c) { + ret = -ENOMEM; + goto end; + } + if (opts->id_allocated) + kfree(opts->id); + opts->id = c; + opts->id_allocated = true; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_midi_opts_attribute f_midi_opts_id = + __CONFIGFS_ATTR(id, S_IRUGO | S_IWUSR, f_midi_opts_id_show, + f_midi_opts_id_store); + +static struct configfs_attribute *midi_attrs[] = { + &f_midi_opts_index.attr, + &f_midi_opts_buflen.attr, + &f_midi_opts_qlen.attr, + &f_midi_opts_in_ports.attr, + &f_midi_opts_out_ports.attr, + &f_midi_opts_id.attr, + NULL, +}; + +static struct config_item_type midi_func_type = { + .ct_item_ops = &midi_item_ops, + .ct_attrs = midi_attrs, + .ct_owner = THIS_MODULE, +}; + +static void f_midi_free_inst(struct usb_function_instance *f) +{ + struct f_midi_opts *opts; + + opts = container_of(f, struct f_midi_opts, func_inst); + + if (opts->id_allocated) + kfree(opts->id); + + kfree(opts); +} + +static struct usb_function_instance *f_midi_alloc_inst(void) +{ + struct f_midi_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = f_midi_free_inst; + opts->index = SNDRV_DEFAULT_IDX1; + opts->id = SNDRV_DEFAULT_STR1; + opts->buflen = 256; + opts->qlen = 32; + opts->in_ports = 1; + opts->out_ports = 1; + + config_group_init_type_name(&opts->func_inst.group, "", + &midi_func_type); + + return &opts->func_inst; +} + +static void f_midi_free(struct usb_function *f) +{ + struct f_midi *midi; + struct f_midi_opts *opts; + int i; + + midi = func_to_midi(f); + opts = container_of(f->fi, struct f_midi_opts, func_inst); + kfree(midi->id); + mutex_lock(&opts->lock); + for (i = opts->in_ports - 1; i >= 0; --i) + kfree(midi->in_port[i]); + kfree(midi); + --opts->refcnt; + mutex_unlock(&opts->lock); +} + +static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = f->config->cdev; + struct f_midi *midi = func_to_midi(f); + struct snd_card *card; + + DBG(cdev, "unbind\n"); + + /* just to be sure */ + f_midi_disable(f); + + card = midi->card; + midi->card = NULL; + if (card) + snd_card_free(card); + + usb_free_all_descriptors(f); +} + +static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) +{ + struct f_midi *midi; + struct f_midi_opts *opts; + int status, i; + + opts = container_of(fi, struct f_midi_opts, func_inst); + + mutex_lock(&opts->lock); + /* sanity check */ + if (opts->in_ports > MAX_PORTS || opts->out_ports > MAX_PORTS) { + mutex_unlock(&opts->lock); + return ERR_PTR(-EINVAL); + } + + /* allocate and initialize one new instance */ + midi = kzalloc(sizeof(*midi), GFP_KERNEL); + if (!midi) { + mutex_unlock(&opts->lock); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < opts->in_ports; i++) { + struct gmidi_in_port *port = kzalloc(sizeof(*port), GFP_KERNEL); + + if (!port) { + status = -ENOMEM; + mutex_unlock(&opts->lock); + goto setup_fail; + } + + port->midi = midi; + port->active = 0; + port->cable = i; + midi->in_port[i] = port; + } + + /* set up ALSA midi devices */ + midi->id = kstrdup(opts->id, GFP_KERNEL); + if (opts->id && !midi->id) { + status = -ENOMEM; + mutex_unlock(&opts->lock); + goto kstrdup_fail; + } + midi->in_ports = opts->in_ports; + midi->out_ports = opts->out_ports; + midi->index = opts->index; + midi->buflen = opts->buflen; + midi->qlen = opts->qlen; + ++opts->refcnt; + mutex_unlock(&opts->lock); + + midi->func.name = "gmidi function"; + midi->func.bind = f_midi_bind; + midi->func.unbind = f_midi_unbind; + midi->func.set_alt = f_midi_set_alt; + midi->func.disable = f_midi_disable; + midi->func.free_func = f_midi_free; + + return &midi->func; + +kstrdup_fail: + f_midi_unregister_card(midi); +setup_fail: + for (--i; i >= 0; i--) + kfree(midi->in_port[i]); + kfree(midi); + return ERR_PTR(status); +} + +DECLARE_USB_FUNCTION_INIT(midi, f_midi_alloc_inst, f_midi_alloc); diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c new file mode 100644 index 000000000..bdcda9f51 --- /dev/null +++ b/drivers/usb/gadget/function/f_ncm.c @@ -0,0 +1,1632 @@ +/* + * f_ncm.c -- USB CDC Network (NCM) link function driver + * + * Copyright (C) 2010 Nokia Corporation + * Contact: Yauheni Kaliuta <yauheni.kaliuta@nokia.com> + * + * The driver borrows from f_ecm.c which is: + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/etherdevice.h> +#include <linux/crc32.h> + +#include <linux/usb/cdc.h> + +#include "u_ether.h" +#include "u_ether_configfs.h" +#include "u_ncm.h" + +/* + * This function is a "CDC Network Control Model" (CDC NCM) Ethernet link. + * NCM is intended to be used with high-speed network attachments. + * + * Note that NCM requires the use of "alternate settings" for its data + * interface. This means that the set_alt() method has real work to do, + * and also means that a get_alt() method is required. + */ + +/* to trigger crc/non-crc ndp signature */ + +#define NCM_NDP_HDR_CRC_MASK 0x01000000 +#define NCM_NDP_HDR_CRC 0x01000000 +#define NCM_NDP_HDR_NOCRC 0x00000000 + +enum ncm_notify_state { + NCM_NOTIFY_NONE, /* don't notify */ + NCM_NOTIFY_CONNECT, /* issue CONNECT next */ + NCM_NOTIFY_SPEED, /* issue SPEED_CHANGE next */ +}; + +struct f_ncm { + struct gether port; + u8 ctrl_id, data_id; + + char ethaddr[14]; + + struct usb_ep *notify; + struct usb_request *notify_req; + u8 notify_state; + bool is_open; + + const struct ndp_parser_opts *parser_opts; + bool is_crc; + u32 ndp_sign; + + /* + * for notification, it is accessed from both + * callback and ethernet open/close + */ + spinlock_t lock; + + struct net_device *netdev; + + /* For multi-frame NDP TX */ + struct sk_buff *skb_tx_data; + struct sk_buff *skb_tx_ndp; + u16 ndp_dgram_count; + bool timer_force_tx; + struct tasklet_struct tx_tasklet; + struct hrtimer task_timer; + + bool timer_stopping; +}; + +static inline struct f_ncm *func_to_ncm(struct usb_function *f) +{ + return container_of(f, struct f_ncm, port.func); +} + +/* peak (theoretical) bulk transfer rate in bits-per-second */ +static inline unsigned ncm_bitrate(struct usb_gadget *g) +{ + if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else + return 19 * 64 * 1 * 1000 * 8; +} + +/*-------------------------------------------------------------------------*/ + +/* + * We cannot group frames so use just the minimal size which ok to put + * one max-size ethernet frame. + * If the host can group frames, allow it to do that, 16K is selected, + * because it's used by default by the current linux host driver + */ +#define NTB_DEFAULT_IN_SIZE 16384 +#define NTB_OUT_SIZE 16384 + +/* Allocation for storing the NDP, 32 should suffice for a + * 16k packet. This allows a maximum of 32 * 507 Byte packets to + * be transmitted in a single 16kB skb, though when sending full size + * packets this limit will be plenty. + * Smaller packets are not likely to be trying to maximize the + * throughput and will be mstly sending smaller infrequent frames. + */ +#define TX_MAX_NUM_DPE 32 + +/* Delay for the transmit to wait before sending an unfilled NTB frame. */ +#define TX_TIMEOUT_NSECS 300000 + +#define FORMATS_SUPPORTED (USB_CDC_NCM_NTB16_SUPPORTED | \ + USB_CDC_NCM_NTB32_SUPPORTED) + +static struct usb_cdc_ncm_ntb_parameters ntb_parameters = { + .wLength = cpu_to_le16(sizeof(ntb_parameters)), + .bmNtbFormatsSupported = cpu_to_le16(FORMATS_SUPPORTED), + .dwNtbInMaxSize = cpu_to_le32(NTB_DEFAULT_IN_SIZE), + .wNdpInDivisor = cpu_to_le16(4), + .wNdpInPayloadRemainder = cpu_to_le16(0), + .wNdpInAlignment = cpu_to_le16(4), + + .dwNtbOutMaxSize = cpu_to_le32(NTB_OUT_SIZE), + .wNdpOutDivisor = cpu_to_le16(4), + .wNdpOutPayloadRemainder = cpu_to_le16(0), + .wNdpOutAlignment = cpu_to_le16(4), +}; + +/* + * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one + * packet, to simplify cancellation; and a big transfer interval, to + * waste less bandwidth. + */ + +#define NCM_STATUS_INTERVAL_MS 32 +#define NCM_STATUS_BYTECOUNT 16 /* 8 byte header + data */ + +static struct usb_interface_assoc_descriptor ncm_iad_desc = { + .bLength = sizeof ncm_iad_desc, + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + + /* .bFirstInterface = DYNAMIC, */ + .bInterfaceCount = 2, /* control + data */ + .bFunctionClass = USB_CLASS_COMM, + .bFunctionSubClass = USB_CDC_SUBCLASS_NCM, + .bFunctionProtocol = USB_CDC_PROTO_NONE, + /* .iFunction = DYNAMIC */ +}; + +/* interface descriptor: */ + +static struct usb_interface_descriptor ncm_control_intf = { + .bLength = sizeof ncm_control_intf, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_cdc_header_desc ncm_header_desc = { + .bLength = sizeof ncm_header_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + + .bcdCDC = cpu_to_le16(0x0110), +}; + +static struct usb_cdc_union_desc ncm_union_desc = { + .bLength = sizeof(ncm_union_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_UNION_TYPE, + /* .bMasterInterface0 = DYNAMIC */ + /* .bSlaveInterface0 = DYNAMIC */ +}; + +static struct usb_cdc_ether_desc ecm_desc = { + .bLength = sizeof ecm_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, + + /* this descriptor actually adds value, surprise! */ + /* .iMACAddress = DYNAMIC */ + .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ + .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), + .wNumberMCFilters = cpu_to_le16(0), + .bNumberPowerFilters = 0, +}; + +#define NCAPS (USB_CDC_NCM_NCAP_ETH_FILTER | USB_CDC_NCM_NCAP_CRC_MODE) + +static struct usb_cdc_ncm_desc ncm_desc = { + .bLength = sizeof ncm_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_NCM_TYPE, + + .bcdNcmVersion = cpu_to_le16(0x0100), + /* can process SetEthernetPacketFilter */ + .bmNetworkCapabilities = NCAPS, +}; + +/* the default data interface has no endpoints ... */ + +static struct usb_interface_descriptor ncm_data_nop_intf = { + .bLength = sizeof ncm_data_nop_intf, + .bDescriptorType = USB_DT_INTERFACE, + + .bInterfaceNumber = 1, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB, + /* .iInterface = DYNAMIC */ +}; + +/* ... but the "real" data interface has two bulk endpoints */ + +static struct usb_interface_descriptor ncm_data_intf = { + .bLength = sizeof ncm_data_intf, + .bDescriptorType = USB_DT_INTERFACE, + + .bInterfaceNumber = 1, + .bAlternateSetting = 1, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = USB_CDC_NCM_PROTO_NTB, + /* .iInterface = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor fs_ncm_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT), + .bInterval = NCM_STATUS_INTERVAL_MS, +}; + +static struct usb_endpoint_descriptor fs_ncm_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor fs_ncm_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *ncm_fs_function[] = { + (struct usb_descriptor_header *) &ncm_iad_desc, + /* CDC NCM control descriptors */ + (struct usb_descriptor_header *) &ncm_control_intf, + (struct usb_descriptor_header *) &ncm_header_desc, + (struct usb_descriptor_header *) &ncm_union_desc, + (struct usb_descriptor_header *) &ecm_desc, + (struct usb_descriptor_header *) &ncm_desc, + (struct usb_descriptor_header *) &fs_ncm_notify_desc, + /* data interface, altsettings 0 and 1 */ + (struct usb_descriptor_header *) &ncm_data_nop_intf, + (struct usb_descriptor_header *) &ncm_data_intf, + (struct usb_descriptor_header *) &fs_ncm_in_desc, + (struct usb_descriptor_header *) &fs_ncm_out_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor hs_ncm_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT), + .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS), +}; +static struct usb_endpoint_descriptor hs_ncm_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor hs_ncm_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *ncm_hs_function[] = { + (struct usb_descriptor_header *) &ncm_iad_desc, + /* CDC NCM control descriptors */ + (struct usb_descriptor_header *) &ncm_control_intf, + (struct usb_descriptor_header *) &ncm_header_desc, + (struct usb_descriptor_header *) &ncm_union_desc, + (struct usb_descriptor_header *) &ecm_desc, + (struct usb_descriptor_header *) &ncm_desc, + (struct usb_descriptor_header *) &hs_ncm_notify_desc, + /* data interface, altsettings 0 and 1 */ + (struct usb_descriptor_header *) &ncm_data_nop_intf, + (struct usb_descriptor_header *) &ncm_data_intf, + (struct usb_descriptor_header *) &hs_ncm_in_desc, + (struct usb_descriptor_header *) &hs_ncm_out_desc, + NULL, +}; + +/* string descriptors: */ + +#define STRING_CTRL_IDX 0 +#define STRING_MAC_IDX 1 +#define STRING_DATA_IDX 2 +#define STRING_IAD_IDX 3 + +static struct usb_string ncm_string_defs[] = { + [STRING_CTRL_IDX].s = "CDC Network Control Model (NCM)", + [STRING_MAC_IDX].s = "", + [STRING_DATA_IDX].s = "CDC Network Data", + [STRING_IAD_IDX].s = "CDC NCM", + { } /* end of list */ +}; + +static struct usb_gadget_strings ncm_string_table = { + .language = 0x0409, /* en-us */ + .strings = ncm_string_defs, +}; + +static struct usb_gadget_strings *ncm_strings[] = { + &ncm_string_table, + NULL, +}; + +/* + * Here are options for NCM Datagram Pointer table (NDP) parser. + * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3), + * in NDP16 offsets and sizes fields are 1 16bit word wide, + * in NDP32 -- 2 16bit words wide. Also signatures are different. + * To make the parser code the same, put the differences in the structure, + * and switch pointers to the structures when the format is changed. + */ + +struct ndp_parser_opts { + u32 nth_sign; + u32 ndp_sign; + unsigned nth_size; + unsigned ndp_size; + unsigned dpe_size; + unsigned ndplen_align; + /* sizes in u16 units */ + unsigned dgram_item_len; /* index or length */ + unsigned block_length; + unsigned ndp_index; + unsigned reserved1; + unsigned reserved2; + unsigned next_ndp_index; +}; + +#define INIT_NDP16_OPTS { \ + .nth_sign = USB_CDC_NCM_NTH16_SIGN, \ + .ndp_sign = USB_CDC_NCM_NDP16_NOCRC_SIGN, \ + .nth_size = sizeof(struct usb_cdc_ncm_nth16), \ + .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \ + .dpe_size = sizeof(struct usb_cdc_ncm_dpe16), \ + .ndplen_align = 4, \ + .dgram_item_len = 1, \ + .block_length = 1, \ + .ndp_index = 1, \ + .reserved1 = 0, \ + .reserved2 = 0, \ + .next_ndp_index = 1, \ + } + + +#define INIT_NDP32_OPTS { \ + .nth_sign = USB_CDC_NCM_NTH32_SIGN, \ + .ndp_sign = USB_CDC_NCM_NDP32_NOCRC_SIGN, \ + .nth_size = sizeof(struct usb_cdc_ncm_nth32), \ + .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \ + .dpe_size = sizeof(struct usb_cdc_ncm_dpe32), \ + .ndplen_align = 8, \ + .dgram_item_len = 2, \ + .block_length = 2, \ + .ndp_index = 2, \ + .reserved1 = 1, \ + .reserved2 = 2, \ + .next_ndp_index = 2, \ + } + +static const struct ndp_parser_opts ndp16_opts = INIT_NDP16_OPTS; +static const struct ndp_parser_opts ndp32_opts = INIT_NDP32_OPTS; + +static inline void put_ncm(__le16 **p, unsigned size, unsigned val) +{ + switch (size) { + case 1: + put_unaligned_le16((u16)val, *p); + break; + case 2: + put_unaligned_le32((u32)val, *p); + + break; + default: + BUG(); + } + + *p += size; +} + +static inline unsigned get_ncm(__le16 **p, unsigned size) +{ + unsigned tmp; + + switch (size) { + case 1: + tmp = get_unaligned_le16(*p); + break; + case 2: + tmp = get_unaligned_le32(*p); + break; + default: + BUG(); + } + + *p += size; + return tmp; +} + +/*-------------------------------------------------------------------------*/ + +static inline void ncm_reset_values(struct f_ncm *ncm) +{ + ncm->parser_opts = &ndp16_opts; + ncm->is_crc = false; + ncm->port.cdc_filter = DEFAULT_FILTER; + + /* doesn't make sense for ncm, fixed size used */ + ncm->port.header_len = 0; + + ncm->port.fixed_out_len = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + ncm->port.fixed_in_len = NTB_DEFAULT_IN_SIZE; +} + +/* + * Context: ncm->lock held + */ +static void ncm_do_notify(struct f_ncm *ncm) +{ + struct usb_request *req = ncm->notify_req; + struct usb_cdc_notification *event; + struct usb_composite_dev *cdev = ncm->port.func.config->cdev; + __le32 *data; + int status; + + /* notification already in flight? */ + if (!req) + return; + + event = req->buf; + switch (ncm->notify_state) { + case NCM_NOTIFY_NONE: + return; + + case NCM_NOTIFY_CONNECT: + event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; + if (ncm->is_open) + event->wValue = cpu_to_le16(1); + else + event->wValue = cpu_to_le16(0); + event->wLength = 0; + req->length = sizeof *event; + + DBG(cdev, "notify connect %s\n", + ncm->is_open ? "true" : "false"); + ncm->notify_state = NCM_NOTIFY_NONE; + break; + + case NCM_NOTIFY_SPEED: + event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; + event->wValue = cpu_to_le16(0); + event->wLength = cpu_to_le16(8); + req->length = NCM_STATUS_BYTECOUNT; + + /* SPEED_CHANGE data is up/down speeds in bits/sec */ + data = req->buf + sizeof *event; + data[0] = cpu_to_le32(ncm_bitrate(cdev->gadget)); + data[1] = data[0]; + + DBG(cdev, "notify speed %d\n", ncm_bitrate(cdev->gadget)); + ncm->notify_state = NCM_NOTIFY_CONNECT; + break; + } + event->bmRequestType = 0xA1; + event->wIndex = cpu_to_le16(ncm->ctrl_id); + + ncm->notify_req = NULL; + /* + * In double buffering if there is a space in FIFO, + * completion callback can be called right after the call, + * so unlocking + */ + spin_unlock(&ncm->lock); + status = usb_ep_queue(ncm->notify, req, GFP_ATOMIC); + spin_lock(&ncm->lock); + if (status < 0) { + ncm->notify_req = req; + DBG(cdev, "notify --> %d\n", status); + } +} + +/* + * Context: ncm->lock held + */ +static void ncm_notify(struct f_ncm *ncm) +{ + /* + * NOTE on most versions of Linux, host side cdc-ethernet + * won't listen for notifications until its netdevice opens. + * The first notification then sits in the FIFO for a long + * time, and the second one is queued. + * + * If ncm_notify() is called before the second (CONNECT) + * notification is sent, then it will reset to send the SPEED + * notificaion again (and again, and again), but it's not a problem + */ + ncm->notify_state = NCM_NOTIFY_SPEED; + ncm_do_notify(ncm); +} + +static void ncm_notify_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_ncm *ncm = req->context; + struct usb_composite_dev *cdev = ncm->port.func.config->cdev; + struct usb_cdc_notification *event = req->buf; + + spin_lock(&ncm->lock); + switch (req->status) { + case 0: + VDBG(cdev, "Notification %02x sent\n", + event->bNotificationType); + break; + case -ECONNRESET: + case -ESHUTDOWN: + ncm->notify_state = NCM_NOTIFY_NONE; + break; + default: + DBG(cdev, "event %02x --> %d\n", + event->bNotificationType, req->status); + break; + } + ncm->notify_req = req; + ncm_do_notify(ncm); + spin_unlock(&ncm->lock); +} + +static void ncm_ep0out_complete(struct usb_ep *ep, struct usb_request *req) +{ + /* now for SET_NTB_INPUT_SIZE only */ + unsigned in_size; + struct usb_function *f = req->context; + struct f_ncm *ncm = func_to_ncm(f); + struct usb_composite_dev *cdev = ep->driver_data; + + req->context = NULL; + if (req->status || req->actual != req->length) { + DBG(cdev, "Bad control-OUT transfer\n"); + goto invalid; + } + + in_size = get_unaligned_le32(req->buf); + if (in_size < USB_CDC_NCM_NTB_MIN_IN_SIZE || + in_size > le32_to_cpu(ntb_parameters.dwNtbInMaxSize)) { + DBG(cdev, "Got wrong INPUT SIZE (%d) from host\n", in_size); + goto invalid; + } + + ncm->port.fixed_in_len = in_size; + VDBG(cdev, "Set NTB INPUT SIZE %d\n", in_size); + return; + +invalid: + usb_ep_set_halt(ep); + return; +} + +static int ncm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_ncm *ncm = func_to_ncm(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* + * composite driver infrastructure handles everything except + * CDC class messages; interface activation uses set_alt(). + */ + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SET_ETHERNET_PACKET_FILTER: + /* + * see 6.2.30: no data, wIndex = interface, + * wValue = packet filter bitmap + */ + if (w_length != 0 || w_index != ncm->ctrl_id) + goto invalid; + DBG(cdev, "packet filter %02x\n", w_value); + /* + * REVISIT locking of cdc_filter. This assumes the UDC + * driver won't have a concurrent packet TX irq running on + * another CPU; or that if it does, this write is atomic... + */ + ncm->port.cdc_filter = w_value; + value = 0; + break; + /* + * and optionally: + * case USB_CDC_SEND_ENCAPSULATED_COMMAND: + * case USB_CDC_GET_ENCAPSULATED_RESPONSE: + * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS: + * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER: + * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER: + * case USB_CDC_GET_ETHERNET_STATISTIC: + */ + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_NTB_PARAMETERS: + + if (w_length == 0 || w_value != 0 || w_index != ncm->ctrl_id) + goto invalid; + value = w_length > sizeof ntb_parameters ? + sizeof ntb_parameters : w_length; + memcpy(req->buf, &ntb_parameters, value); + VDBG(cdev, "Host asked NTB parameters\n"); + break; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_NTB_INPUT_SIZE: + + if (w_length < 4 || w_value != 0 || w_index != ncm->ctrl_id) + goto invalid; + put_unaligned_le32(ncm->port.fixed_in_len, req->buf); + value = 4; + VDBG(cdev, "Host asked INPUT SIZE, sending %d\n", + ncm->port.fixed_in_len); + break; + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SET_NTB_INPUT_SIZE: + { + if (w_length != 4 || w_value != 0 || w_index != ncm->ctrl_id) + goto invalid; + req->complete = ncm_ep0out_complete; + req->length = w_length; + req->context = f; + + value = req->length; + break; + } + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_NTB_FORMAT: + { + uint16_t format; + + if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id) + goto invalid; + format = (ncm->parser_opts == &ndp16_opts) ? 0x0000 : 0x0001; + put_unaligned_le16(format, req->buf); + value = 2; + VDBG(cdev, "Host asked NTB FORMAT, sending %d\n", format); + break; + } + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SET_NTB_FORMAT: + { + if (w_length != 0 || w_index != ncm->ctrl_id) + goto invalid; + switch (w_value) { + case 0x0000: + ncm->parser_opts = &ndp16_opts; + DBG(cdev, "NCM16 selected\n"); + break; + case 0x0001: + ncm->parser_opts = &ndp32_opts; + DBG(cdev, "NCM32 selected\n"); + break; + default: + goto invalid; + } + value = 0; + break; + } + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_CRC_MODE: + { + uint16_t is_crc; + + if (w_length < 2 || w_value != 0 || w_index != ncm->ctrl_id) + goto invalid; + is_crc = ncm->is_crc ? 0x0001 : 0x0000; + put_unaligned_le16(is_crc, req->buf); + value = 2; + VDBG(cdev, "Host asked CRC MODE, sending %d\n", is_crc); + break; + } + + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SET_CRC_MODE: + { + int ndp_hdr_crc = 0; + + if (w_length != 0 || w_index != ncm->ctrl_id) + goto invalid; + switch (w_value) { + case 0x0000: + ncm->is_crc = false; + ndp_hdr_crc = NCM_NDP_HDR_NOCRC; + DBG(cdev, "non-CRC mode selected\n"); + break; + case 0x0001: + ncm->is_crc = true; + ndp_hdr_crc = NCM_NDP_HDR_CRC; + DBG(cdev, "CRC mode selected\n"); + break; + default: + goto invalid; + } + ncm->ndp_sign = ncm->parser_opts->ndp_sign | ndp_hdr_crc; + value = 0; + break; + } + + /* and disabled in ncm descriptor: */ + /* case USB_CDC_GET_NET_ADDRESS: */ + /* case USB_CDC_SET_NET_ADDRESS: */ + /* case USB_CDC_GET_MAX_DATAGRAM_SIZE: */ + /* case USB_CDC_SET_MAX_DATAGRAM_SIZE: */ + + default: +invalid: + DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + DBG(cdev, "ncm req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "ncm req %02x.%02x response err %d\n", + ctrl->bRequestType, ctrl->bRequest, + value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + + +static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_ncm *ncm = func_to_ncm(f); + struct usb_composite_dev *cdev = f->config->cdev; + + /* Control interface has only altsetting 0 */ + if (intf == ncm->ctrl_id) { + if (alt != 0) + goto fail; + + if (ncm->notify->driver_data) { + DBG(cdev, "reset ncm control %d\n", intf); + usb_ep_disable(ncm->notify); + } + + if (!(ncm->notify->desc)) { + DBG(cdev, "init ncm ctrl %d\n", intf); + if (config_ep_by_speed(cdev->gadget, f, ncm->notify)) + goto fail; + } + usb_ep_enable(ncm->notify); + ncm->notify->driver_data = ncm; + + /* Data interface has two altsettings, 0 and 1 */ + } else if (intf == ncm->data_id) { + if (alt > 1) + goto fail; + + if (ncm->port.in_ep->driver_data) { + DBG(cdev, "reset ncm\n"); + ncm->timer_stopping = true; + ncm->netdev = NULL; + gether_disconnect(&ncm->port); + ncm_reset_values(ncm); + } + + /* + * CDC Network only sends data in non-default altsettings. + * Changing altsettings resets filters, statistics, etc. + */ + if (alt == 1) { + struct net_device *net; + + if (!ncm->port.in_ep->desc || + !ncm->port.out_ep->desc) { + DBG(cdev, "init ncm\n"); + if (config_ep_by_speed(cdev->gadget, f, + ncm->port.in_ep) || + config_ep_by_speed(cdev->gadget, f, + ncm->port.out_ep)) { + ncm->port.in_ep->desc = NULL; + ncm->port.out_ep->desc = NULL; + goto fail; + } + } + + /* TODO */ + /* Enable zlps by default for NCM conformance; + * override for musb_hdrc (avoids txdma ovhead) + */ + ncm->port.is_zlp_ok = !( + gadget_is_musbhdrc(cdev->gadget) + ); + ncm->port.cdc_filter = DEFAULT_FILTER; + DBG(cdev, "activate ncm\n"); + net = gether_connect(&ncm->port); + if (IS_ERR(net)) + return PTR_ERR(net); + ncm->netdev = net; + ncm->timer_stopping = false; + } + + spin_lock(&ncm->lock); + ncm_notify(ncm); + spin_unlock(&ncm->lock); + } else + goto fail; + + return 0; +fail: + return -EINVAL; +} + +/* + * Because the data interface supports multiple altsettings, + * this NCM function *MUST* implement a get_alt() method. + */ +static int ncm_get_alt(struct usb_function *f, unsigned intf) +{ + struct f_ncm *ncm = func_to_ncm(f); + + if (intf == ncm->ctrl_id) + return 0; + return ncm->port.in_ep->driver_data ? 1 : 0; +} + +static struct sk_buff *package_for_tx(struct f_ncm *ncm) +{ + __le16 *ntb_iter; + struct sk_buff *skb2 = NULL; + unsigned ndp_pad; + unsigned ndp_index; + unsigned new_len; + + const struct ndp_parser_opts *opts = ncm->parser_opts; + const int ndp_align = le16_to_cpu(ntb_parameters.wNdpInAlignment); + const int dgram_idx_len = 2 * 2 * opts->dgram_item_len; + + /* Stop the timer */ + hrtimer_try_to_cancel(&ncm->task_timer); + + ndp_pad = ALIGN(ncm->skb_tx_data->len, ndp_align) - + ncm->skb_tx_data->len; + ndp_index = ncm->skb_tx_data->len + ndp_pad; + new_len = ndp_index + dgram_idx_len + ncm->skb_tx_ndp->len; + + /* Set the final BlockLength and wNdpIndex */ + ntb_iter = (void *) ncm->skb_tx_data->data; + /* Increment pointer to BlockLength */ + ntb_iter += 2 + 1 + 1; + put_ncm(&ntb_iter, opts->block_length, new_len); + put_ncm(&ntb_iter, opts->ndp_index, ndp_index); + + /* Set the final NDP wLength */ + new_len = opts->ndp_size + + (ncm->ndp_dgram_count * dgram_idx_len); + ncm->ndp_dgram_count = 0; + /* Increment from start to wLength */ + ntb_iter = (void *) ncm->skb_tx_ndp->data; + ntb_iter += 2; + put_unaligned_le16(new_len, ntb_iter); + + /* Merge the skbs */ + swap(skb2, ncm->skb_tx_data); + if (ncm->skb_tx_data) { + dev_kfree_skb_any(ncm->skb_tx_data); + ncm->skb_tx_data = NULL; + } + + /* Insert NDP alignment. */ + ntb_iter = (void *) skb_put(skb2, ndp_pad); + memset(ntb_iter, 0, ndp_pad); + + /* Copy NTB across. */ + ntb_iter = (void *) skb_put(skb2, ncm->skb_tx_ndp->len); + memcpy(ntb_iter, ncm->skb_tx_ndp->data, ncm->skb_tx_ndp->len); + dev_kfree_skb_any(ncm->skb_tx_ndp); + ncm->skb_tx_ndp = NULL; + + /* Insert zero'd datagram. */ + ntb_iter = (void *) skb_put(skb2, dgram_idx_len); + memset(ntb_iter, 0, dgram_idx_len); + + return skb2; +} + +static struct sk_buff *ncm_wrap_ntb(struct gether *port, + struct sk_buff *skb) +{ + struct f_ncm *ncm = func_to_ncm(&port->func); + struct sk_buff *skb2 = NULL; + int ncb_len = 0; + __le16 *ntb_data; + __le16 *ntb_ndp; + int dgram_pad; + + unsigned max_size = ncm->port.fixed_in_len; + const struct ndp_parser_opts *opts = ncm->parser_opts; + const int ndp_align = le16_to_cpu(ntb_parameters.wNdpInAlignment); + const int div = le16_to_cpu(ntb_parameters.wNdpInDivisor); + const int rem = le16_to_cpu(ntb_parameters.wNdpInPayloadRemainder); + const int dgram_idx_len = 2 * 2 * opts->dgram_item_len; + + if (!skb && !ncm->skb_tx_data) + return NULL; + + if (skb) { + /* Add the CRC if required up front */ + if (ncm->is_crc) { + uint32_t crc; + __le16 *crc_pos; + + crc = ~crc32_le(~0, + skb->data, + skb->len); + crc_pos = (void *) skb_put(skb, sizeof(uint32_t)); + put_unaligned_le32(crc, crc_pos); + } + + /* If the new skb is too big for the current NCM NTB then + * set the current stored skb to be sent now and clear it + * ready for new data. + * NOTE: Assume maximum align for speed of calculation. + */ + if (ncm->skb_tx_data + && (ncm->ndp_dgram_count >= TX_MAX_NUM_DPE + || (ncm->skb_tx_data->len + + div + rem + skb->len + + ncm->skb_tx_ndp->len + ndp_align + (2 * dgram_idx_len)) + > max_size)) { + skb2 = package_for_tx(ncm); + if (!skb2) + goto err; + } + + if (!ncm->skb_tx_data) { + ncb_len = opts->nth_size; + dgram_pad = ALIGN(ncb_len, div) + rem - ncb_len; + ncb_len += dgram_pad; + + /* Create a new skb for the NTH and datagrams. */ + ncm->skb_tx_data = alloc_skb(max_size, GFP_ATOMIC); + if (!ncm->skb_tx_data) + goto err; + + ntb_data = (void *) skb_put(ncm->skb_tx_data, ncb_len); + memset(ntb_data, 0, ncb_len); + /* dwSignature */ + put_unaligned_le32(opts->nth_sign, ntb_data); + ntb_data += 2; + /* wHeaderLength */ + put_unaligned_le16(opts->nth_size, ntb_data++); + + /* Allocate an skb for storing the NDP, + * TX_MAX_NUM_DPE should easily suffice for a + * 16k packet. + */ + ncm->skb_tx_ndp = alloc_skb((int)(opts->ndp_size + + opts->dpe_size + * TX_MAX_NUM_DPE), + GFP_ATOMIC); + if (!ncm->skb_tx_ndp) + goto err; + ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp, + opts->ndp_size); + memset(ntb_ndp, 0, ncb_len); + /* dwSignature */ + put_unaligned_le32(ncm->ndp_sign, ntb_ndp); + ntb_ndp += 2; + + /* There is always a zeroed entry */ + ncm->ndp_dgram_count = 1; + + /* Note: we skip opts->next_ndp_index */ + } + + /* Delay the timer. */ + hrtimer_start(&ncm->task_timer, + ktime_set(0, TX_TIMEOUT_NSECS), + HRTIMER_MODE_REL); + + /* Add the datagram position entries */ + ntb_ndp = (void *) skb_put(ncm->skb_tx_ndp, dgram_idx_len); + memset(ntb_ndp, 0, dgram_idx_len); + + ncb_len = ncm->skb_tx_data->len; + dgram_pad = ALIGN(ncb_len, div) + rem - ncb_len; + ncb_len += dgram_pad; + + /* (d)wDatagramIndex */ + put_ncm(&ntb_ndp, opts->dgram_item_len, ncb_len); + /* (d)wDatagramLength */ + put_ncm(&ntb_ndp, opts->dgram_item_len, skb->len); + ncm->ndp_dgram_count++; + + /* Add the new data to the skb */ + ntb_data = (void *) skb_put(ncm->skb_tx_data, dgram_pad); + memset(ntb_data, 0, dgram_pad); + ntb_data = (void *) skb_put(ncm->skb_tx_data, skb->len); + memcpy(ntb_data, skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = NULL; + + } else if (ncm->skb_tx_data && ncm->timer_force_tx) { + /* If the tx was requested because of a timeout then send */ + skb2 = package_for_tx(ncm); + if (!skb2) + goto err; + } + + return skb2; + +err: + ncm->netdev->stats.tx_dropped++; + + if (skb) + dev_kfree_skb_any(skb); + if (ncm->skb_tx_data) + dev_kfree_skb_any(ncm->skb_tx_data); + if (ncm->skb_tx_ndp) + dev_kfree_skb_any(ncm->skb_tx_ndp); + + return NULL; +} + +/* + * This transmits the NTB if there are frames waiting. + */ +static void ncm_tx_tasklet(unsigned long data) +{ + struct f_ncm *ncm = (void *)data; + + if (ncm->timer_stopping) + return; + + /* Only send if data is available. */ + if (ncm->skb_tx_data) { + ncm->timer_force_tx = true; + + /* XXX This allowance of a NULL skb argument to ndo_start_xmit + * XXX is not sane. The gadget layer should be redesigned so + * XXX that the dev->wrap() invocations to build SKBs is transparent + * XXX and performed in some way outside of the ndo_start_xmit + * XXX interface. + */ + ncm->netdev->netdev_ops->ndo_start_xmit(NULL, ncm->netdev); + + ncm->timer_force_tx = false; + } +} + +/* + * The transmit should only be run if no skb data has been sent + * for a certain duration. + */ +static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data) +{ + struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer); + tasklet_schedule(&ncm->tx_tasklet); + return HRTIMER_NORESTART; +} + +static int ncm_unwrap_ntb(struct gether *port, + struct sk_buff *skb, + struct sk_buff_head *list) +{ + struct f_ncm *ncm = func_to_ncm(&port->func); + __le16 *tmp = (void *) skb->data; + unsigned index, index2; + int ndp_index; + unsigned dg_len, dg_len2; + unsigned ndp_len; + struct sk_buff *skb2; + int ret = -EINVAL; + unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize); + const struct ndp_parser_opts *opts = ncm->parser_opts; + unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; + int dgram_counter; + + /* dwSignature */ + if (get_unaligned_le32(tmp) != opts->nth_sign) { + INFO(port->func.config->cdev, "Wrong NTH SIGN, skblen %d\n", + skb->len); + print_hex_dump(KERN_INFO, "HEAD:", DUMP_PREFIX_ADDRESS, 32, 1, + skb->data, 32, false); + + goto err; + } + tmp += 2; + /* wHeaderLength */ + if (get_unaligned_le16(tmp++) != opts->nth_size) { + INFO(port->func.config->cdev, "Wrong NTB headersize\n"); + goto err; + } + tmp++; /* skip wSequence */ + + /* (d)wBlockLength */ + if (get_ncm(&tmp, opts->block_length) > max_size) { + INFO(port->func.config->cdev, "OUT size exceeded\n"); + goto err; + } + + ndp_index = get_ncm(&tmp, opts->ndp_index); + + /* Run through all the NDP's in the NTB */ + do { + /* NCM 3.2 */ + if (((ndp_index % 4) != 0) && + (ndp_index < opts->nth_size)) { + INFO(port->func.config->cdev, "Bad index: %#X\n", + ndp_index); + goto err; + } + + /* walk through NDP */ + tmp = (void *)(skb->data + ndp_index); + if (get_unaligned_le32(tmp) != ncm->ndp_sign) { + INFO(port->func.config->cdev, "Wrong NDP SIGN\n"); + goto err; + } + tmp += 2; + + ndp_len = get_unaligned_le16(tmp++); + /* + * NCM 3.3.1 + * entry is 2 items + * item size is 16/32 bits, opts->dgram_item_len * 2 bytes + * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry + * Each entry is a dgram index and a dgram length. + */ + if ((ndp_len < opts->ndp_size + + 2 * 2 * (opts->dgram_item_len * 2)) + || (ndp_len % opts->ndplen_align != 0)) { + INFO(port->func.config->cdev, "Bad NDP length: %#X\n", + ndp_len); + goto err; + } + tmp += opts->reserved1; + /* Check for another NDP (d)wNextNdpIndex */ + ndp_index = get_ncm(&tmp, opts->next_ndp_index); + tmp += opts->reserved2; + + ndp_len -= opts->ndp_size; + index2 = get_ncm(&tmp, opts->dgram_item_len); + dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + dgram_counter = 0; + + do { + index = index2; + dg_len = dg_len2; + if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */ + INFO(port->func.config->cdev, + "Bad dgram length: %#X\n", dg_len); + goto err; + } + if (ncm->is_crc) { + uint32_t crc, crc2; + + crc = get_unaligned_le32(skb->data + + index + dg_len - + crc_len); + crc2 = ~crc32_le(~0, + skb->data + index, + dg_len - crc_len); + if (crc != crc2) { + INFO(port->func.config->cdev, + "Bad CRC\n"); + goto err; + } + } + + index2 = get_ncm(&tmp, opts->dgram_item_len); + dg_len2 = get_ncm(&tmp, opts->dgram_item_len); + + /* + * Copy the data into a new skb. + * This ensures the truesize is correct + */ + skb2 = netdev_alloc_skb_ip_align(ncm->netdev, + dg_len - crc_len); + if (skb2 == NULL) + goto err; + memcpy(skb_put(skb2, dg_len - crc_len), + skb->data + index, dg_len - crc_len); + + skb_queue_tail(list, skb2); + + ndp_len -= 2 * (opts->dgram_item_len * 2); + + dgram_counter++; + + if (index2 == 0 || dg_len2 == 0) + break; + } while (ndp_len > 2 * (opts->dgram_item_len * 2)); + } while (ndp_index); + + dev_kfree_skb_any(skb); + + VDBG(port->func.config->cdev, + "Parsed NTB with %d frames\n", dgram_counter); + return 0; +err: + skb_queue_purge(list); + dev_kfree_skb_any(skb); + return ret; +} + +static void ncm_disable(struct usb_function *f) +{ + struct f_ncm *ncm = func_to_ncm(f); + struct usb_composite_dev *cdev = f->config->cdev; + + DBG(cdev, "ncm deactivated\n"); + + if (ncm->port.in_ep->driver_data) { + ncm->timer_stopping = true; + ncm->netdev = NULL; + gether_disconnect(&ncm->port); + } + + if (ncm->notify->driver_data) { + usb_ep_disable(ncm->notify); + ncm->notify->driver_data = NULL; + ncm->notify->desc = NULL; + } +} + +/*-------------------------------------------------------------------------*/ + +/* + * Callbacks let us notify the host about connect/disconnect when the + * net device is opened or closed. + * + * For testing, note that link states on this side include both opened + * and closed variants of: + * + * - disconnected/unconfigured + * - configured but inactive (data alt 0) + * - configured and active (data alt 1) + * + * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and + * SET_INTERFACE (altsetting). Remember also that "configured" doesn't + * imply the host is actually polling the notification endpoint, and + * likewise that "active" doesn't imply it's actually using the data + * endpoints for traffic. + */ + +static void ncm_open(struct gether *geth) +{ + struct f_ncm *ncm = func_to_ncm(&geth->func); + + DBG(ncm->port.func.config->cdev, "%s\n", __func__); + + spin_lock(&ncm->lock); + ncm->is_open = true; + ncm_notify(ncm); + spin_unlock(&ncm->lock); +} + +static void ncm_close(struct gether *geth) +{ + struct f_ncm *ncm = func_to_ncm(&geth->func); + + DBG(ncm->port.func.config->cdev, "%s\n", __func__); + + spin_lock(&ncm->lock); + ncm->is_open = false; + ncm_notify(ncm); + spin_unlock(&ncm->lock); +} + +/*-------------------------------------------------------------------------*/ + +/* ethernet function driver setup/binding */ + +static int ncm_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_ncm *ncm = func_to_ncm(f); + struct usb_string *us; + int status; + struct usb_ep *ep; + struct f_ncm_opts *ncm_opts; + + if (!can_support_ecm(cdev->gadget)) + return -EINVAL; + + ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst); + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to ncm_opts->bound access + */ + if (!ncm_opts->bound) { + mutex_lock(&ncm_opts->lock); + gether_set_gadget(ncm_opts->net, cdev->gadget); + status = gether_register_netdev(ncm_opts->net); + mutex_unlock(&ncm_opts->lock); + if (status) + return status; + ncm_opts->bound = true; + } + us = usb_gstrings_attach(cdev, ncm_strings, + ARRAY_SIZE(ncm_string_defs)); + if (IS_ERR(us)) + return PTR_ERR(us); + ncm_control_intf.iInterface = us[STRING_CTRL_IDX].id; + ncm_data_nop_intf.iInterface = us[STRING_DATA_IDX].id; + ncm_data_intf.iInterface = us[STRING_DATA_IDX].id; + ecm_desc.iMACAddress = us[STRING_MAC_IDX].id; + ncm_iad_desc.iFunction = us[STRING_IAD_IDX].id; + + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ncm->ctrl_id = status; + ncm_iad_desc.bFirstInterface = status; + + ncm_control_intf.bInterfaceNumber = status; + ncm_union_desc.bMasterInterface0 = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ncm->data_id = status; + + ncm_data_nop_intf.bInterfaceNumber = status; + ncm_data_intf.bInterfaceNumber = status; + ncm_union_desc.bSlaveInterface0 = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_in_desc); + if (!ep) + goto fail; + ncm->port.in_ep = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_out_desc); + if (!ep) + goto fail; + ncm->port.out_ep = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &fs_ncm_notify_desc); + if (!ep) + goto fail; + ncm->notify = ep; + ep->driver_data = cdev; /* claim */ + + status = -ENOMEM; + + /* allocate notification request and buffer */ + ncm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!ncm->notify_req) + goto fail; + ncm->notify_req->buf = kmalloc(NCM_STATUS_BYTECOUNT, GFP_KERNEL); + if (!ncm->notify_req->buf) + goto fail; + ncm->notify_req->context = ncm; + ncm->notify_req->complete = ncm_notify_complete; + + /* + * support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + hs_ncm_in_desc.bEndpointAddress = fs_ncm_in_desc.bEndpointAddress; + hs_ncm_out_desc.bEndpointAddress = fs_ncm_out_desc.bEndpointAddress; + hs_ncm_notify_desc.bEndpointAddress = + fs_ncm_notify_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, + NULL); + if (status) + goto fail; + + /* + * NOTE: all that is done without knowing or caring about + * the network link ... which is unavailable to this code + * until we're activated via set_alt(). + */ + + ncm->port.open = ncm_open; + ncm->port.close = ncm_close; + + tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm); + hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ncm->task_timer.function = ncm_tx_timeout; + + DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n", + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + ncm->port.in_ep->name, ncm->port.out_ep->name, + ncm->notify->name); + return 0; + +fail: + if (ncm->notify_req) { + kfree(ncm->notify_req->buf); + usb_ep_free_request(ncm->notify, ncm->notify_req); + } + + /* we might as well release our claims on endpoints */ + if (ncm->notify) + ncm->notify->driver_data = NULL; + if (ncm->port.out_ep) + ncm->port.out_ep->driver_data = NULL; + if (ncm->port.in_ep) + ncm->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); + + return status; +} + +static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_ncm_opts, + func_inst.group); +} + +/* f_ncm_item_ops */ +USB_ETHERNET_CONFIGFS_ITEM(ncm); + +/* f_ncm_opts_dev_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm); + +/* f_ncm_opts_host_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm); + +/* f_ncm_opts_qmult */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm); + +/* f_ncm_opts_ifname */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm); + +static struct configfs_attribute *ncm_attrs[] = { + &f_ncm_opts_dev_addr.attr, + &f_ncm_opts_host_addr.attr, + &f_ncm_opts_qmult.attr, + &f_ncm_opts_ifname.attr, + NULL, +}; + +static struct config_item_type ncm_func_type = { + .ct_item_ops = &ncm_item_ops, + .ct_attrs = ncm_attrs, + .ct_owner = THIS_MODULE, +}; + +static void ncm_free_inst(struct usb_function_instance *f) +{ + struct f_ncm_opts *opts; + + opts = container_of(f, struct f_ncm_opts, func_inst); + if (opts->bound) + gether_cleanup(netdev_priv(opts->net)); + else + free_netdev(opts->net); + kfree(opts); +} + +static struct usb_function_instance *ncm_alloc_inst(void) +{ + struct f_ncm_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = ncm_free_inst; + opts->net = gether_setup_default(); + if (IS_ERR(opts->net)) { + struct net_device *net = opts->net; + kfree(opts); + return ERR_CAST(net); + } + + config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type); + + return &opts->func_inst; +} + +static void ncm_free(struct usb_function *f) +{ + struct f_ncm *ncm; + struct f_ncm_opts *opts; + + ncm = func_to_ncm(f); + opts = container_of(f->fi, struct f_ncm_opts, func_inst); + kfree(ncm); + mutex_lock(&opts->lock); + opts->refcnt--; + mutex_unlock(&opts->lock); +} + +static void ncm_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_ncm *ncm = func_to_ncm(f); + + DBG(c->cdev, "ncm unbind\n"); + + hrtimer_cancel(&ncm->task_timer); + tasklet_kill(&ncm->tx_tasklet); + + ncm_string_defs[0].id = 0; + usb_free_all_descriptors(f); + + kfree(ncm->notify_req->buf); + usb_ep_free_request(ncm->notify, ncm->notify_req); +} + +static struct usb_function *ncm_alloc(struct usb_function_instance *fi) +{ + struct f_ncm *ncm; + struct f_ncm_opts *opts; + int status; + + /* allocate and initialize one new instance */ + ncm = kzalloc(sizeof(*ncm), GFP_KERNEL); + if (!ncm) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_ncm_opts, func_inst); + mutex_lock(&opts->lock); + opts->refcnt++; + + /* export host's Ethernet address in CDC format */ + status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr, + sizeof(ncm->ethaddr)); + if (status < 12) { /* strlen("01234567890a") */ + kfree(ncm); + mutex_unlock(&opts->lock); + return ERR_PTR(-EINVAL); + } + ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr; + + spin_lock_init(&ncm->lock); + ncm_reset_values(ncm); + ncm->port.ioport = netdev_priv(opts->net); + mutex_unlock(&opts->lock); + ncm->port.is_fixed = true; + ncm->port.supports_multi_frame = true; + + ncm->port.func.name = "cdc_network"; + /* descriptors are per-instance copies */ + ncm->port.func.bind = ncm_bind; + ncm->port.func.unbind = ncm_unbind; + ncm->port.func.set_alt = ncm_set_alt; + ncm->port.func.get_alt = ncm_get_alt; + ncm->port.func.setup = ncm_setup; + ncm->port.func.disable = ncm_disable; + ncm->port.func.free_func = ncm_free; + + ncm->port.wrap = ncm_wrap_ntb; + ncm->port.unwrap = ncm_unwrap_ntb; + + return &ncm->port.func; +} + +DECLARE_USB_FUNCTION_INIT(ncm, ncm_alloc_inst, ncm_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yauheni Kaliuta"); diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c new file mode 100644 index 000000000..a1b79c534 --- /dev/null +++ b/drivers/usb/gadget/function/f_obex.c @@ -0,0 +1,538 @@ +/* + * f_obex.c -- USB CDC OBEX function driver + * + * Copyright (C) 2008 Nokia Corporation + * Contact: Felipe Balbi <felipe.balbi@nokia.com> + * + * Based on f_acm.c by Al Borchers and David Brownell. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> + +#include "u_serial.h" +#include "gadget_chips.h" + + +/* + * This CDC OBEX function support just packages a TTY-ish byte stream. + * A user mode server will put it into "raw" mode and handle all the + * relevant protocol details ... this is just a kernel passthrough. + * When possible, we prevent gadget enumeration until that server is + * ready to handle the commands. + */ + +struct f_obex { + struct gserial port; + u8 ctrl_id; + u8 data_id; + u8 cur_alt; + u8 port_num; + u8 can_activate; +}; + +static inline struct f_obex *func_to_obex(struct usb_function *f) +{ + return container_of(f, struct f_obex, port.func); +} + +static inline struct f_obex *port_to_obex(struct gserial *p) +{ + return container_of(p, struct f_obex, port); +} + +/*-------------------------------------------------------------------------*/ + +#define OBEX_CTRL_IDX 0 +#define OBEX_DATA_IDX 1 + +static struct usb_string obex_string_defs[] = { + [OBEX_CTRL_IDX].s = "CDC Object Exchange (OBEX)", + [OBEX_DATA_IDX].s = "CDC OBEX Data", + { }, /* end of list */ +}; + +static struct usb_gadget_strings obex_string_table = { + .language = 0x0409, /* en-US */ + .strings = obex_string_defs, +}; + +static struct usb_gadget_strings *obex_strings[] = { + &obex_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static struct usb_interface_descriptor obex_control_intf = { + .bLength = sizeof(obex_control_intf), + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 0, + + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_OBEX, +}; + +static struct usb_interface_descriptor obex_data_nop_intf = { + .bLength = sizeof(obex_data_nop_intf), + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 1, + + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_CDC_DATA, +}; + +static struct usb_interface_descriptor obex_data_intf = { + .bLength = sizeof(obex_data_intf), + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = 2, + + .bAlternateSetting = 1, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, +}; + +static struct usb_cdc_header_desc obex_cdc_header_desc = { + .bLength = sizeof(obex_cdc_header_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + .bcdCDC = cpu_to_le16(0x0120), +}; + +static struct usb_cdc_union_desc obex_cdc_union_desc = { + .bLength = sizeof(obex_cdc_union_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_UNION_TYPE, + .bMasterInterface0 = 1, + .bSlaveInterface0 = 2, +}; + +static struct usb_cdc_obex_desc obex_desc = { + .bLength = sizeof(obex_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_OBEX_TYPE, + .bcdVersion = cpu_to_le16(0x0100), +}; + +/* High-Speed Support */ + +static struct usb_endpoint_descriptor obex_hs_ep_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor obex_hs_ep_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *hs_function[] = { + (struct usb_descriptor_header *) &obex_control_intf, + (struct usb_descriptor_header *) &obex_cdc_header_desc, + (struct usb_descriptor_header *) &obex_desc, + (struct usb_descriptor_header *) &obex_cdc_union_desc, + + (struct usb_descriptor_header *) &obex_data_nop_intf, + (struct usb_descriptor_header *) &obex_data_intf, + (struct usb_descriptor_header *) &obex_hs_ep_in_desc, + (struct usb_descriptor_header *) &obex_hs_ep_out_desc, + NULL, +}; + +/* Full-Speed Support */ + +static struct usb_endpoint_descriptor obex_fs_ep_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor obex_fs_ep_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_function[] = { + (struct usb_descriptor_header *) &obex_control_intf, + (struct usb_descriptor_header *) &obex_cdc_header_desc, + (struct usb_descriptor_header *) &obex_desc, + (struct usb_descriptor_header *) &obex_cdc_union_desc, + + (struct usb_descriptor_header *) &obex_data_nop_intf, + (struct usb_descriptor_header *) &obex_data_intf, + (struct usb_descriptor_header *) &obex_fs_ep_in_desc, + (struct usb_descriptor_header *) &obex_fs_ep_out_desc, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static int obex_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_obex *obex = func_to_obex(f); + struct usb_composite_dev *cdev = f->config->cdev; + + if (intf == obex->ctrl_id) { + if (alt != 0) + goto fail; + /* NOP */ + dev_dbg(&cdev->gadget->dev, + "reset obex ttyGS%d control\n", obex->port_num); + + } else if (intf == obex->data_id) { + if (alt > 1) + goto fail; + + if (obex->port.in->driver_data) { + dev_dbg(&cdev->gadget->dev, + "reset obex ttyGS%d\n", obex->port_num); + gserial_disconnect(&obex->port); + } + + if (!obex->port.in->desc || !obex->port.out->desc) { + dev_dbg(&cdev->gadget->dev, + "init obex ttyGS%d\n", obex->port_num); + if (config_ep_by_speed(cdev->gadget, f, + obex->port.in) || + config_ep_by_speed(cdev->gadget, f, + obex->port.out)) { + obex->port.out->desc = NULL; + obex->port.in->desc = NULL; + goto fail; + } + } + + if (alt == 1) { + dev_dbg(&cdev->gadget->dev, + "activate obex ttyGS%d\n", obex->port_num); + gserial_connect(&obex->port, obex->port_num); + } + + } else + goto fail; + + obex->cur_alt = alt; + + return 0; + +fail: + return -EINVAL; +} + +static int obex_get_alt(struct usb_function *f, unsigned intf) +{ + struct f_obex *obex = func_to_obex(f); + + return obex->cur_alt; +} + +static void obex_disable(struct usb_function *f) +{ + struct f_obex *obex = func_to_obex(f); + struct usb_composite_dev *cdev = f->config->cdev; + + dev_dbg(&cdev->gadget->dev, "obex ttyGS%d disable\n", obex->port_num); + gserial_disconnect(&obex->port); +} + +/*-------------------------------------------------------------------------*/ + +static void obex_connect(struct gserial *g) +{ + struct f_obex *obex = port_to_obex(g); + struct usb_composite_dev *cdev = g->func.config->cdev; + int status; + + if (!obex->can_activate) + return; + + status = usb_function_activate(&g->func); + if (status) + dev_dbg(&cdev->gadget->dev, + "obex ttyGS%d function activate --> %d\n", + obex->port_num, status); +} + +static void obex_disconnect(struct gserial *g) +{ + struct f_obex *obex = port_to_obex(g); + struct usb_composite_dev *cdev = g->func.config->cdev; + int status; + + if (!obex->can_activate) + return; + + status = usb_function_deactivate(&g->func); + if (status) + dev_dbg(&cdev->gadget->dev, + "obex ttyGS%d function deactivate --> %d\n", + obex->port_num, status); +} + +/*-------------------------------------------------------------------------*/ + +/* Some controllers can't support CDC OBEX ... */ +static inline bool can_support_obex(struct usb_configuration *c) +{ + /* Since the first interface is a NOP, we can ignore the + * issue of multi-interface support on most controllers. + * + * Altsettings are mandatory, however... + */ + if (!gadget_supports_altsettings(c->cdev->gadget)) + return false; + + /* everything else is *probably* fine ... */ + return true; +} + +static int obex_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_obex *obex = func_to_obex(f); + struct usb_string *us; + int status; + struct usb_ep *ep; + + if (!can_support_obex(c)) + return -EINVAL; + + us = usb_gstrings_attach(cdev, obex_strings, + ARRAY_SIZE(obex_string_defs)); + if (IS_ERR(us)) + return PTR_ERR(us); + obex_control_intf.iInterface = us[OBEX_CTRL_IDX].id; + obex_data_nop_intf.iInterface = us[OBEX_DATA_IDX].id; + obex_data_intf.iInterface = us[OBEX_DATA_IDX].id; + + /* allocate instance-specific interface IDs, and patch descriptors */ + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + obex->ctrl_id = status; + + obex_control_intf.bInterfaceNumber = status; + obex_cdc_union_desc.bMasterInterface0 = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + obex->data_id = status; + + obex_data_nop_intf.bInterfaceNumber = status; + obex_data_intf.bInterfaceNumber = status; + obex_cdc_union_desc.bSlaveInterface0 = status; + + /* allocate instance-specific endpoints */ + + status = -ENODEV; + ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_in_desc); + if (!ep) + goto fail; + obex->port.in = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &obex_fs_ep_out_desc); + if (!ep) + goto fail; + obex->port.out = ep; + ep->driver_data = cdev; /* claim */ + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + + obex_hs_ep_in_desc.bEndpointAddress = + obex_fs_ep_in_desc.bEndpointAddress; + obex_hs_ep_out_desc.bEndpointAddress = + obex_fs_ep_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, fs_function, hs_function, NULL); + if (status) + goto fail; + + /* Avoid letting this gadget enumerate until the userspace + * OBEX server is active. + */ + status = usb_function_deactivate(f); + if (status < 0) + WARNING(cdev, "obex ttyGS%d: can't prevent enumeration, %d\n", + obex->port_num, status); + else + obex->can_activate = true; + + + dev_dbg(&cdev->gadget->dev, "obex ttyGS%d: %s speed IN/%s OUT/%s\n", + obex->port_num, + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + obex->port.in->name, obex->port.out->name); + + return 0; + +fail: + /* we might as well release our claims on endpoints */ + if (obex->port.out) + obex->port.out->driver_data = NULL; + if (obex->port.in) + obex->port.in->driver_data = NULL; + + ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status); + + return status; +} + +static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_serial_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_serial_opts); +static ssize_t f_obex_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct f_serial_opts *opts = to_f_serial_opts(item); + struct f_serial_opts_attribute *f_serial_opts_attr = + container_of(attr, struct f_serial_opts_attribute, attr); + ssize_t ret = 0; + + if (f_serial_opts_attr->show) + ret = f_serial_opts_attr->show(opts, page); + + return ret; +} + +static void obex_attr_release(struct config_item *item) +{ + struct f_serial_opts *opts = to_f_serial_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations obex_item_ops = { + .release = obex_attr_release, + .show_attribute = f_obex_attr_show, +}; + +static ssize_t f_obex_port_num_show(struct f_serial_opts *opts, char *page) +{ + return sprintf(page, "%u\n", opts->port_num); +} + +static struct f_serial_opts_attribute f_obex_port_num = + __CONFIGFS_ATTR_RO(port_num, f_obex_port_num_show); + +static struct configfs_attribute *acm_attrs[] = { + &f_obex_port_num.attr, + NULL, +}; + +static struct config_item_type obex_func_type = { + .ct_item_ops = &obex_item_ops, + .ct_attrs = acm_attrs, + .ct_owner = THIS_MODULE, +}; + +static void obex_free_inst(struct usb_function_instance *f) +{ + struct f_serial_opts *opts; + + opts = container_of(f, struct f_serial_opts, func_inst); + gserial_free_line(opts->port_num); + kfree(opts); +} + +static struct usb_function_instance *obex_alloc_inst(void) +{ + struct f_serial_opts *opts; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + opts->func_inst.free_func_inst = obex_free_inst; + ret = gserial_alloc_line(&opts->port_num); + if (ret) { + kfree(opts); + return ERR_PTR(ret); + } + config_group_init_type_name(&opts->func_inst.group, "", + &obex_func_type); + + return &opts->func_inst; +} + +static void obex_free(struct usb_function *f) +{ + struct f_obex *obex; + + obex = func_to_obex(f); + kfree(obex); +} + +static void obex_unbind(struct usb_configuration *c, struct usb_function *f) +{ + usb_free_all_descriptors(f); +} + +static struct usb_function *obex_alloc(struct usb_function_instance *fi) +{ + struct f_obex *obex; + struct f_serial_opts *opts; + + /* allocate and initialize one new instance */ + obex = kzalloc(sizeof(*obex), GFP_KERNEL); + if (!obex) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_serial_opts, func_inst); + + obex->port_num = opts->port_num; + + obex->port.connect = obex_connect; + obex->port.disconnect = obex_disconnect; + + obex->port.func.name = "obex"; + /* descriptors are per-instance copies */ + obex->port.func.bind = obex_bind; + obex->port.func.unbind = obex_unbind; + obex->port.func.set_alt = obex_set_alt; + obex->port.func.get_alt = obex_get_alt; + obex->port.func.disable = obex_disable; + obex->port.func.free_func = obex_free; + + return &obex->port.func; +} + +DECLARE_USB_FUNCTION_INIT(obex, obex_alloc_inst, obex_alloc); +MODULE_AUTHOR("Felipe Balbi"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c new file mode 100644 index 000000000..c0c3ef272 --- /dev/null +++ b/drivers/usb/gadget/function/f_phonet.c @@ -0,0 +1,762 @@ +/* + * f_phonet.c -- USB CDC Phonet function + * + * Copyright (C) 2007-2008 Nokia Corporation. All rights reserved. + * + * Author: Rémi Denis-Courmont + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> + +#include <linux/netdevice.h> +#include <linux/if_ether.h> +#include <linux/if_phonet.h> +#include <linux/if_arp.h> + +#include <linux/usb/ch9.h> +#include <linux/usb/cdc.h> +#include <linux/usb/composite.h> + +#include "u_phonet.h" +#include "u_ether.h" + +#define PN_MEDIA_USB 0x1B +#define MAXPACKET 512 +#if (PAGE_SIZE % MAXPACKET) +#error MAXPACKET must divide PAGE_SIZE! +#endif + +/*-------------------------------------------------------------------------*/ + +struct phonet_port { + struct f_phonet *usb; + spinlock_t lock; +}; + +struct f_phonet { + struct usb_function function; + struct { + struct sk_buff *skb; + spinlock_t lock; + } rx; + struct net_device *dev; + struct usb_ep *in_ep, *out_ep; + + struct usb_request *in_req; + struct usb_request *out_reqv[0]; +}; + +static int phonet_rxq_size = 17; + +static inline struct f_phonet *func_to_pn(struct usb_function *f) +{ + return container_of(f, struct f_phonet, function); +} + +/*-------------------------------------------------------------------------*/ + +#define USB_CDC_SUBCLASS_PHONET 0xfe +#define USB_CDC_PHONET_TYPE 0xab + +static struct usb_interface_descriptor +pn_control_intf_desc = { + .bLength = sizeof pn_control_intf_desc, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC, */ + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_PHONET, +}; + +static const struct usb_cdc_header_desc +pn_header_desc = { + .bLength = sizeof pn_header_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + .bcdCDC = cpu_to_le16(0x0110), +}; + +static const struct usb_cdc_header_desc +pn_phonet_desc = { + .bLength = sizeof pn_phonet_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_PHONET_TYPE, + .bcdCDC = cpu_to_le16(0x1505), /* ??? */ +}; + +static struct usb_cdc_union_desc +pn_union_desc = { + .bLength = sizeof pn_union_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_UNION_TYPE, + + /* .bMasterInterface0 = DYNAMIC, */ + /* .bSlaveInterface0 = DYNAMIC, */ +}; + +static struct usb_interface_descriptor +pn_data_nop_intf_desc = { + .bLength = sizeof pn_data_nop_intf_desc, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC, */ + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_CDC_DATA, +}; + +static struct usb_interface_descriptor +pn_data_intf_desc = { + .bLength = sizeof pn_data_intf_desc, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC, */ + .bAlternateSetting = 1, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, +}; + +static struct usb_endpoint_descriptor +pn_fs_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor +pn_hs_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(MAXPACKET), +}; + +static struct usb_endpoint_descriptor +pn_fs_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor +pn_hs_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *fs_pn_function[] = { + (struct usb_descriptor_header *) &pn_control_intf_desc, + (struct usb_descriptor_header *) &pn_header_desc, + (struct usb_descriptor_header *) &pn_phonet_desc, + (struct usb_descriptor_header *) &pn_union_desc, + (struct usb_descriptor_header *) &pn_data_nop_intf_desc, + (struct usb_descriptor_header *) &pn_data_intf_desc, + (struct usb_descriptor_header *) &pn_fs_sink_desc, + (struct usb_descriptor_header *) &pn_fs_source_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_pn_function[] = { + (struct usb_descriptor_header *) &pn_control_intf_desc, + (struct usb_descriptor_header *) &pn_header_desc, + (struct usb_descriptor_header *) &pn_phonet_desc, + (struct usb_descriptor_header *) &pn_union_desc, + (struct usb_descriptor_header *) &pn_data_nop_intf_desc, + (struct usb_descriptor_header *) &pn_data_intf_desc, + (struct usb_descriptor_header *) &pn_hs_sink_desc, + (struct usb_descriptor_header *) &pn_hs_source_desc, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static int pn_net_open(struct net_device *dev) +{ + netif_wake_queue(dev); + return 0; +} + +static int pn_net_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static void pn_tx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_phonet *fp = ep->driver_data; + struct net_device *dev = fp->dev; + struct sk_buff *skb = req->context; + + switch (req->status) { + case 0: + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + break; + + case -ESHUTDOWN: /* disconnected */ + case -ECONNRESET: /* disabled */ + dev->stats.tx_aborted_errors++; + default: + dev->stats.tx_errors++; + } + + dev_kfree_skb_any(skb); + netif_wake_queue(dev); +} + +static int pn_net_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct phonet_port *port = netdev_priv(dev); + struct f_phonet *fp; + struct usb_request *req; + unsigned long flags; + + if (skb->protocol != htons(ETH_P_PHONET)) + goto out; + + spin_lock_irqsave(&port->lock, flags); + fp = port->usb; + if (unlikely(!fp)) /* race with carrier loss */ + goto out_unlock; + + req = fp->in_req; + req->buf = skb->data; + req->length = skb->len; + req->complete = pn_tx_complete; + req->zero = 1; + req->context = skb; + + if (unlikely(usb_ep_queue(fp->in_ep, req, GFP_ATOMIC))) + goto out_unlock; + + netif_stop_queue(dev); + skb = NULL; + +out_unlock: + spin_unlock_irqrestore(&port->lock, flags); +out: + if (unlikely(skb)) { + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + } + return NETDEV_TX_OK; +} + +static int pn_net_mtu(struct net_device *dev, int new_mtu) +{ + if ((new_mtu < PHONET_MIN_MTU) || (new_mtu > PHONET_MAX_MTU)) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops pn_netdev_ops = { + .ndo_open = pn_net_open, + .ndo_stop = pn_net_close, + .ndo_start_xmit = pn_net_xmit, + .ndo_change_mtu = pn_net_mtu, +}; + +static void pn_net_setup(struct net_device *dev) +{ + dev->features = 0; + dev->type = ARPHRD_PHONET; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = PHONET_DEV_MTU; + dev->hard_header_len = 1; + dev->dev_addr[0] = PN_MEDIA_USB; + dev->addr_len = 1; + dev->tx_queue_len = 1; + + dev->netdev_ops = &pn_netdev_ops; + dev->destructor = free_netdev; + dev->header_ops = &phonet_header_ops; +} + +/*-------------------------------------------------------------------------*/ + +/* + * Queue buffer for data from the host + */ +static int +pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags) +{ + struct page *page; + int err; + + page = __dev_alloc_page(gfp_flags | __GFP_NOMEMALLOC); + if (!page) + return -ENOMEM; + + req->buf = page_address(page); + req->length = PAGE_SIZE; + req->context = page; + + err = usb_ep_queue(fp->out_ep, req, gfp_flags); + if (unlikely(err)) + put_page(page); + return err; +} + +static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_phonet *fp = ep->driver_data; + struct net_device *dev = fp->dev; + struct page *page = req->context; + struct sk_buff *skb; + unsigned long flags; + int status = req->status; + + switch (status) { + case 0: + spin_lock_irqsave(&fp->rx.lock, flags); + skb = fp->rx.skb; + if (!skb) + skb = fp->rx.skb = netdev_alloc_skb(dev, 12); + if (req->actual < req->length) /* Last fragment */ + fp->rx.skb = NULL; + spin_unlock_irqrestore(&fp->rx.lock, flags); + + if (unlikely(!skb)) + break; + + if (skb->len == 0) { /* First fragment */ + skb->protocol = htons(ETH_P_PHONET); + skb_reset_mac_header(skb); + /* Can't use pskb_pull() on page in IRQ */ + memcpy(skb_put(skb, 1), page_address(page), 1); + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, + skb->len <= 1, req->actual, PAGE_SIZE); + page = NULL; + + if (req->actual < req->length) { /* Last fragment */ + skb->dev = dev; + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + netif_rx(skb); + } + break; + + /* Do not resubmit in these cases: */ + case -ESHUTDOWN: /* disconnect */ + case -ECONNABORTED: /* hw reset */ + case -ECONNRESET: /* dequeued (unlink or netif down) */ + req = NULL; + break; + + /* Do resubmit in these cases: */ + case -EOVERFLOW: /* request buffer overflow */ + dev->stats.rx_over_errors++; + default: + dev->stats.rx_errors++; + break; + } + + if (page) + put_page(page); + if (req) + pn_rx_submit(fp, req, GFP_ATOMIC); +} + +/*-------------------------------------------------------------------------*/ + +static void __pn_reset(struct usb_function *f) +{ + struct f_phonet *fp = func_to_pn(f); + struct net_device *dev = fp->dev; + struct phonet_port *port = netdev_priv(dev); + + netif_carrier_off(dev); + port->usb = NULL; + + usb_ep_disable(fp->out_ep); + usb_ep_disable(fp->in_ep); + if (fp->rx.skb) { + dev_kfree_skb_irq(fp->rx.skb); + fp->rx.skb = NULL; + } +} + +static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_phonet *fp = func_to_pn(f); + struct usb_gadget *gadget = fp->function.config->cdev->gadget; + + if (intf == pn_control_intf_desc.bInterfaceNumber) + /* control interface, no altsetting */ + return (alt > 0) ? -EINVAL : 0; + + if (intf == pn_data_intf_desc.bInterfaceNumber) { + struct net_device *dev = fp->dev; + struct phonet_port *port = netdev_priv(dev); + + /* data intf (0: inactive, 1: active) */ + if (alt > 1) + return -EINVAL; + + spin_lock(&port->lock); + + if (fp->in_ep->driver_data) + __pn_reset(f); + + if (alt == 1) { + int i; + + if (config_ep_by_speed(gadget, f, fp->in_ep) || + config_ep_by_speed(gadget, f, fp->out_ep)) { + fp->in_ep->desc = NULL; + fp->out_ep->desc = NULL; + spin_unlock(&port->lock); + return -EINVAL; + } + usb_ep_enable(fp->out_ep); + usb_ep_enable(fp->in_ep); + + port->usb = fp; + fp->out_ep->driver_data = fp; + fp->in_ep->driver_data = fp; + + netif_carrier_on(dev); + for (i = 0; i < phonet_rxq_size; i++) + pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC); + } + spin_unlock(&port->lock); + return 0; + } + + return -EINVAL; +} + +static int pn_get_alt(struct usb_function *f, unsigned intf) +{ + struct f_phonet *fp = func_to_pn(f); + + if (intf == pn_control_intf_desc.bInterfaceNumber) + return 0; + + if (intf == pn_data_intf_desc.bInterfaceNumber) { + struct phonet_port *port = netdev_priv(fp->dev); + u8 alt; + + spin_lock(&port->lock); + alt = port->usb != NULL; + spin_unlock(&port->lock); + return alt; + } + + return -EINVAL; +} + +static void pn_disconnect(struct usb_function *f) +{ + struct f_phonet *fp = func_to_pn(f); + struct phonet_port *port = netdev_priv(fp->dev); + unsigned long flags; + + /* remain disabled until set_alt */ + spin_lock_irqsave(&port->lock, flags); + __pn_reset(f); + spin_unlock_irqrestore(&port->lock, flags); +} + +/*-------------------------------------------------------------------------*/ + +static int pn_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct usb_gadget *gadget = cdev->gadget; + struct f_phonet *fp = func_to_pn(f); + struct usb_ep *ep; + int status, i; + + struct f_phonet_opts *phonet_opts; + + phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst); + + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to phonet_opts->bound access + */ + if (!phonet_opts->bound) { + gphonet_set_gadget(phonet_opts->net, gadget); + status = gphonet_register_netdev(phonet_opts->net); + if (status) + return status; + phonet_opts->bound = true; + } + + /* Reserve interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto err; + pn_control_intf_desc.bInterfaceNumber = status; + pn_union_desc.bMasterInterface0 = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto err; + pn_data_nop_intf_desc.bInterfaceNumber = status; + pn_data_intf_desc.bInterfaceNumber = status; + pn_union_desc.bSlaveInterface0 = status; + + /* Reserve endpoints */ + status = -ENODEV; + ep = usb_ep_autoconfig(gadget, &pn_fs_sink_desc); + if (!ep) + goto err; + fp->out_ep = ep; + ep->driver_data = fp; /* Claim */ + + ep = usb_ep_autoconfig(gadget, &pn_fs_source_desc); + if (!ep) + goto err; + fp->in_ep = ep; + ep->driver_data = fp; /* Claim */ + + pn_hs_sink_desc.bEndpointAddress = pn_fs_sink_desc.bEndpointAddress; + pn_hs_source_desc.bEndpointAddress = pn_fs_source_desc.bEndpointAddress; + + /* Do not try to bind Phonet twice... */ + status = usb_assign_descriptors(f, fs_pn_function, hs_pn_function, + NULL); + if (status) + goto err; + + /* Incoming USB requests */ + status = -ENOMEM; + for (i = 0; i < phonet_rxq_size; i++) { + struct usb_request *req; + + req = usb_ep_alloc_request(fp->out_ep, GFP_KERNEL); + if (!req) + goto err_req; + + req->complete = pn_rx_complete; + fp->out_reqv[i] = req; + } + + /* Outgoing USB requests */ + fp->in_req = usb_ep_alloc_request(fp->in_ep, GFP_KERNEL); + if (!fp->in_req) + goto err_req; + + INFO(cdev, "USB CDC Phonet function\n"); + INFO(cdev, "using %s, OUT %s, IN %s\n", cdev->gadget->name, + fp->out_ep->name, fp->in_ep->name); + return 0; + +err_req: + for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++) + usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); + usb_free_all_descriptors(f); +err: + if (fp->out_ep) + fp->out_ep->driver_data = NULL; + if (fp->in_ep) + fp->in_ep->driver_data = NULL; + ERROR(cdev, "USB CDC Phonet: cannot autoconfigure\n"); + return status; +} + +static inline struct f_phonet_opts *to_f_phonet_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_phonet_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_phonet_opts); +static ssize_t f_phonet_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct f_phonet_opts *opts = to_f_phonet_opts(item); + struct f_phonet_opts_attribute *f_phonet_opts_attr = + container_of(attr, struct f_phonet_opts_attribute, attr); + ssize_t ret = 0; + + if (f_phonet_opts_attr->show) + ret = f_phonet_opts_attr->show(opts, page); + return ret; +} + +static void phonet_attr_release(struct config_item *item) +{ + struct f_phonet_opts *opts = to_f_phonet_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations phonet_item_ops = { + .release = phonet_attr_release, + .show_attribute = f_phonet_attr_show, +}; + +static ssize_t f_phonet_ifname_show(struct f_phonet_opts *opts, char *page) +{ + return gether_get_ifname(opts->net, page, PAGE_SIZE); +} + +static struct f_phonet_opts_attribute f_phonet_ifname = + __CONFIGFS_ATTR_RO(ifname, f_phonet_ifname_show); + +static struct configfs_attribute *phonet_attrs[] = { + &f_phonet_ifname.attr, + NULL, +}; + +static struct config_item_type phonet_func_type = { + .ct_item_ops = &phonet_item_ops, + .ct_attrs = phonet_attrs, + .ct_owner = THIS_MODULE, +}; + +static void phonet_free_inst(struct usb_function_instance *f) +{ + struct f_phonet_opts *opts; + + opts = container_of(f, struct f_phonet_opts, func_inst); + if (opts->bound) + gphonet_cleanup(opts->net); + else + free_netdev(opts->net); + kfree(opts); +} + +static struct usb_function_instance *phonet_alloc_inst(void) +{ + struct f_phonet_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + opts->func_inst.free_func_inst = phonet_free_inst; + opts->net = gphonet_setup_default(); + if (IS_ERR(opts->net)) { + struct net_device *net = opts->net; + kfree(opts); + return ERR_CAST(net); + } + + config_group_init_type_name(&opts->func_inst.group, "", + &phonet_func_type); + + return &opts->func_inst; +} + +static void phonet_free(struct usb_function *f) +{ + struct f_phonet *phonet; + + phonet = func_to_pn(f); + kfree(phonet); +} + +static void pn_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_phonet *fp = func_to_pn(f); + int i; + + /* We are already disconnected */ + if (fp->in_req) + usb_ep_free_request(fp->in_ep, fp->in_req); + for (i = 0; i < phonet_rxq_size; i++) + if (fp->out_reqv[i]) + usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); + + usb_free_all_descriptors(f); +} + +static struct usb_function *phonet_alloc(struct usb_function_instance *fi) +{ + struct f_phonet *fp; + struct f_phonet_opts *opts; + int size; + + size = sizeof(*fp) + (phonet_rxq_size * sizeof(struct usb_request *)); + fp = kzalloc(size, GFP_KERNEL); + if (!fp) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_phonet_opts, func_inst); + + fp->dev = opts->net; + fp->function.name = "phonet"; + fp->function.bind = pn_bind; + fp->function.unbind = pn_unbind; + fp->function.set_alt = pn_set_alt; + fp->function.get_alt = pn_get_alt; + fp->function.disable = pn_disconnect; + fp->function.free_func = phonet_free; + spin_lock_init(&fp->rx.lock); + + return &fp->function; +} + +struct net_device *gphonet_setup_default(void) +{ + struct net_device *dev; + struct phonet_port *port; + + /* Create net device */ + dev = alloc_netdev(sizeof(*port), "upnlink%d", NET_NAME_UNKNOWN, + pn_net_setup); + if (!dev) + return ERR_PTR(-ENOMEM); + + port = netdev_priv(dev); + spin_lock_init(&port->lock); + netif_carrier_off(dev); + + return dev; +} + +void gphonet_set_gadget(struct net_device *net, struct usb_gadget *g) +{ + SET_NETDEV_DEV(net, &g->dev); +} + +int gphonet_register_netdev(struct net_device *net) +{ + int status; + + status = register_netdev(net); + if (status) + free_netdev(net); + + return status; +} + +void gphonet_cleanup(struct net_device *dev) +{ + unregister_netdev(dev); +} + +DECLARE_USB_FUNCTION_INIT(phonet, phonet_alloc_inst, phonet_alloc); +MODULE_AUTHOR("Rémi Denis-Courmont"); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c new file mode 100644 index 000000000..44173df27 --- /dev/null +++ b/drivers/usb/gadget/function/f_printer.c @@ -0,0 +1,1471 @@ +/* + * f_printer.c - USB printer function driver + * + * Copied from drivers/usb/gadget/legacy/printer.c, + * which was: + * + * printer.c -- Printer gadget driver + * + * Copyright (C) 2003-2005 David Brownell + * Copyright (C) 2006 Craig W. Nadler + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/ioport.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/idr.h> +#include <linux/timer.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/moduleparam.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/types.h> +#include <linux/ctype.h> +#include <linux/cdev.h> + +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/uaccess.h> +#include <asm/unaligned.h> + +#include <linux/usb/ch9.h> +#include <linux/usb/composite.h> +#include <linux/usb/gadget.h> +#include <linux/usb/g_printer.h> + +#include "u_printer.h" + +#define PNP_STRING_LEN 1024 +#define PRINTER_MINORS 4 +#define GET_DEVICE_ID 0 +#define GET_PORT_STATUS 1 +#define SOFT_RESET 2 + +static int major, minors; +static struct class *usb_gadget_class; +static DEFINE_IDA(printer_ida); +static DEFINE_MUTEX(printer_ida_lock); /* protects access do printer_ida */ + +/*-------------------------------------------------------------------------*/ + +struct printer_dev { + spinlock_t lock; /* lock this structure */ + /* lock buffer lists during read/write calls */ + struct mutex lock_printer_io; + struct usb_gadget *gadget; + s8 interface; + struct usb_ep *in_ep, *out_ep; + + struct list_head rx_reqs; /* List of free RX structs */ + struct list_head rx_reqs_active; /* List of Active RX xfers */ + struct list_head rx_buffers; /* List of completed xfers */ + /* wait until there is data to be read. */ + wait_queue_head_t rx_wait; + struct list_head tx_reqs; /* List of free TX structs */ + struct list_head tx_reqs_active; /* List of Active TX xfers */ + /* Wait until there are write buffers available to use. */ + wait_queue_head_t tx_wait; + /* Wait until all write buffers have been sent. */ + wait_queue_head_t tx_flush_wait; + struct usb_request *current_rx_req; + size_t current_rx_bytes; + u8 *current_rx_buf; + u8 printer_status; + u8 reset_printer; + int minor; + struct cdev printer_cdev; + u8 printer_cdev_open; + wait_queue_head_t wait; + unsigned q_len; + char *pnp_string; /* We don't own memory! */ + struct usb_function function; +}; + +static inline struct printer_dev *func_to_printer(struct usb_function *f) +{ + return container_of(f, struct printer_dev, function); +} + +/*-------------------------------------------------------------------------*/ + +/* + * DESCRIPTORS ... most are static, but strings and (full) configuration + * descriptors are built on demand. + */ + +/* holds our biggest descriptor */ +#define USB_DESC_BUFSIZE 256 +#define USB_BUFSIZE 8192 + +static struct usb_interface_descriptor intf_desc = { + .bLength = sizeof(intf_desc), + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_PRINTER, + .bInterfaceSubClass = 1, /* Printer Sub-Class */ + .bInterfaceProtocol = 2, /* Bi-Directional */ + .iInterface = 0 +}; + +static struct usb_endpoint_descriptor fs_ep_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK +}; + +static struct usb_endpoint_descriptor fs_ep_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK +}; + +static struct usb_descriptor_header *fs_printer_function[] = { + (struct usb_descriptor_header *) &intf_desc, + (struct usb_descriptor_header *) &fs_ep_in_desc, + (struct usb_descriptor_header *) &fs_ep_out_desc, + NULL +}; + +/* + * usb 2.0 devices need to expose both high speed and full speed + * descriptors, unless they only run at full speed. + */ + +static struct usb_endpoint_descriptor hs_ep_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512) +}; + +static struct usb_endpoint_descriptor hs_ep_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512) +}; + +static struct usb_qualifier_descriptor dev_qualifier = { + .bLength = sizeof(dev_qualifier), + .bDescriptorType = USB_DT_DEVICE_QUALIFIER, + .bcdUSB = cpu_to_le16(0x0200), + .bDeviceClass = USB_CLASS_PRINTER, + .bNumConfigurations = 1 +}; + +static struct usb_descriptor_header *hs_printer_function[] = { + (struct usb_descriptor_header *) &intf_desc, + (struct usb_descriptor_header *) &hs_ep_in_desc, + (struct usb_descriptor_header *) &hs_ep_out_desc, + NULL +}; + +/* + * Added endpoint descriptors for 3.0 devices + */ + +static struct usb_endpoint_descriptor ss_ep_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_ep_in_comp_desc = { + .bLength = sizeof(ss_ep_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_endpoint_descriptor ss_ep_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_ep_out_comp_desc = { + .bLength = sizeof(ss_ep_out_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_descriptor_header *ss_printer_function[] = { + (struct usb_descriptor_header *) &intf_desc, + (struct usb_descriptor_header *) &ss_ep_in_desc, + (struct usb_descriptor_header *) &ss_ep_in_comp_desc, + (struct usb_descriptor_header *) &ss_ep_out_desc, + (struct usb_descriptor_header *) &ss_ep_out_comp_desc, + NULL +}; + +/* maxpacket and other transfer characteristics vary by speed. */ +static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget, + struct usb_endpoint_descriptor *fs, + struct usb_endpoint_descriptor *hs, + struct usb_endpoint_descriptor *ss) +{ + switch (gadget->speed) { + case USB_SPEED_SUPER: + return ss; + case USB_SPEED_HIGH: + return hs; + default: + return fs; + } +} + +/*-------------------------------------------------------------------------*/ + +static struct usb_request * +printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, gfp_flags); + + if (req != NULL) { + req->length = len; + req->buf = kmalloc(len, gfp_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + return NULL; + } + } + + return req; +} + +static void +printer_req_free(struct usb_ep *ep, struct usb_request *req) +{ + if (ep != NULL && req != NULL) { + kfree(req->buf); + usb_ep_free_request(ep, req); + } +} + +/*-------------------------------------------------------------------------*/ + +static void rx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct printer_dev *dev = ep->driver_data; + int status = req->status; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + + list_del_init(&req->list); /* Remode from Active List */ + + switch (status) { + + /* normal completion */ + case 0: + if (req->actual > 0) { + list_add_tail(&req->list, &dev->rx_buffers); + DBG(dev, "G_Printer : rx length %d\n", req->actual); + } else { + list_add(&req->list, &dev->rx_reqs); + } + break; + + /* software-driven interface shutdown */ + case -ECONNRESET: /* unlink */ + case -ESHUTDOWN: /* disconnect etc */ + VDBG(dev, "rx shutdown, code %d\n", status); + list_add(&req->list, &dev->rx_reqs); + break; + + /* for hardware automagic (such as pxa) */ + case -ECONNABORTED: /* endpoint reset */ + DBG(dev, "rx %s reset\n", ep->name); + list_add(&req->list, &dev->rx_reqs); + break; + + /* data overrun */ + case -EOVERFLOW: + /* FALLTHROUGH */ + + default: + DBG(dev, "rx status %d\n", status); + list_add(&req->list, &dev->rx_reqs); + break; + } + + wake_up_interruptible(&dev->rx_wait); + spin_unlock_irqrestore(&dev->lock, flags); +} + +static void tx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct printer_dev *dev = ep->driver_data; + + switch (req->status) { + default: + VDBG(dev, "tx err %d\n", req->status); + /* FALLTHROUGH */ + case -ECONNRESET: /* unlink */ + case -ESHUTDOWN: /* disconnect etc */ + break; + case 0: + break; + } + + spin_lock(&dev->lock); + /* Take the request struct off the active list and put it on the + * free list. + */ + list_del_init(&req->list); + list_add(&req->list, &dev->tx_reqs); + wake_up_interruptible(&dev->tx_wait); + if (likely(list_empty(&dev->tx_reqs_active))) + wake_up_interruptible(&dev->tx_flush_wait); + + spin_unlock(&dev->lock); +} + +/*-------------------------------------------------------------------------*/ + +static int +printer_open(struct inode *inode, struct file *fd) +{ + struct printer_dev *dev; + unsigned long flags; + int ret = -EBUSY; + + dev = container_of(inode->i_cdev, struct printer_dev, printer_cdev); + + spin_lock_irqsave(&dev->lock, flags); + + if (!dev->printer_cdev_open) { + dev->printer_cdev_open = 1; + fd->private_data = dev; + ret = 0; + /* Change the printer status to show that it's on-line. */ + dev->printer_status |= PRINTER_SELECTED; + } + + spin_unlock_irqrestore(&dev->lock, flags); + + DBG(dev, "printer_open returned %x\n", ret); + return ret; +} + +static int +printer_close(struct inode *inode, struct file *fd) +{ + struct printer_dev *dev = fd->private_data; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + dev->printer_cdev_open = 0; + fd->private_data = NULL; + /* Change printer status to show that the printer is off-line. */ + dev->printer_status &= ~PRINTER_SELECTED; + spin_unlock_irqrestore(&dev->lock, flags); + + DBG(dev, "printer_close\n"); + + return 0; +} + +/* This function must be called with interrupts turned off. */ +static void +setup_rx_reqs(struct printer_dev *dev) +{ + struct usb_request *req; + + while (likely(!list_empty(&dev->rx_reqs))) { + int error; + + req = container_of(dev->rx_reqs.next, + struct usb_request, list); + list_del_init(&req->list); + + /* The USB Host sends us whatever amount of data it wants to + * so we always set the length field to the full USB_BUFSIZE. + * If the amount of data is more than the read() caller asked + * for it will be stored in the request buffer until it is + * asked for by read(). + */ + req->length = USB_BUFSIZE; + req->complete = rx_complete; + + /* here, we unlock, and only unlock, to avoid deadlock. */ + spin_unlock(&dev->lock); + error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC); + spin_lock(&dev->lock); + if (error) { + DBG(dev, "rx submit --> %d\n", error); + list_add(&req->list, &dev->rx_reqs); + break; + } + /* if the req is empty, then add it into dev->rx_reqs_active. */ + else if (list_empty(&req->list)) + list_add(&req->list, &dev->rx_reqs_active); + } +} + +static ssize_t +printer_read(struct file *fd, char __user *buf, size_t len, loff_t *ptr) +{ + struct printer_dev *dev = fd->private_data; + unsigned long flags; + size_t size; + size_t bytes_copied; + struct usb_request *req; + /* This is a pointer to the current USB rx request. */ + struct usb_request *current_rx_req; + /* This is the number of bytes in the current rx buffer. */ + size_t current_rx_bytes; + /* This is a pointer to the current rx buffer. */ + u8 *current_rx_buf; + + if (len == 0) + return -EINVAL; + + DBG(dev, "printer_read trying to read %d bytes\n", (int)len); + + mutex_lock(&dev->lock_printer_io); + spin_lock_irqsave(&dev->lock, flags); + + /* We will use this flag later to check if a printer reset happened + * after we turn interrupts back on. + */ + dev->reset_printer = 0; + + setup_rx_reqs(dev); + + bytes_copied = 0; + current_rx_req = dev->current_rx_req; + current_rx_bytes = dev->current_rx_bytes; + current_rx_buf = dev->current_rx_buf; + dev->current_rx_req = NULL; + dev->current_rx_bytes = 0; + dev->current_rx_buf = NULL; + + /* Check if there is any data in the read buffers. Please note that + * current_rx_bytes is the number of bytes in the current rx buffer. + * If it is zero then check if there are any other rx_buffers that + * are on the completed list. We are only out of data if all rx + * buffers are empty. + */ + if ((current_rx_bytes == 0) && + (likely(list_empty(&dev->rx_buffers)))) { + /* Turn interrupts back on before sleeping. */ + spin_unlock_irqrestore(&dev->lock, flags); + + /* + * If no data is available check if this is a NON-Blocking + * call or not. + */ + if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { + mutex_unlock(&dev->lock_printer_io); + return -EAGAIN; + } + + /* Sleep until data is available */ + wait_event_interruptible(dev->rx_wait, + (likely(!list_empty(&dev->rx_buffers)))); + spin_lock_irqsave(&dev->lock, flags); + } + + /* We have data to return then copy it to the caller's buffer.*/ + while ((current_rx_bytes || likely(!list_empty(&dev->rx_buffers))) + && len) { + if (current_rx_bytes == 0) { + req = container_of(dev->rx_buffers.next, + struct usb_request, list); + list_del_init(&req->list); + + if (req->actual && req->buf) { + current_rx_req = req; + current_rx_bytes = req->actual; + current_rx_buf = req->buf; + } else { + list_add(&req->list, &dev->rx_reqs); + continue; + } + } + + /* Don't leave irqs off while doing memory copies */ + spin_unlock_irqrestore(&dev->lock, flags); + + if (len > current_rx_bytes) + size = current_rx_bytes; + else + size = len; + + size -= copy_to_user(buf, current_rx_buf, size); + bytes_copied += size; + len -= size; + buf += size; + + spin_lock_irqsave(&dev->lock, flags); + + /* We've disconnected or reset so return. */ + if (dev->reset_printer) { + list_add(¤t_rx_req->list, &dev->rx_reqs); + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + return -EAGAIN; + } + + /* If we not returning all the data left in this RX request + * buffer then adjust the amount of data left in the buffer. + * Othewise if we are done with this RX request buffer then + * requeue it to get any incoming data from the USB host. + */ + if (size < current_rx_bytes) { + current_rx_bytes -= size; + current_rx_buf += size; + } else { + list_add(¤t_rx_req->list, &dev->rx_reqs); + current_rx_bytes = 0; + current_rx_buf = NULL; + current_rx_req = NULL; + } + } + + dev->current_rx_req = current_rx_req; + dev->current_rx_bytes = current_rx_bytes; + dev->current_rx_buf = current_rx_buf; + + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + + DBG(dev, "printer_read returned %d bytes\n", (int)bytes_copied); + + if (bytes_copied) + return bytes_copied; + else + return -EAGAIN; +} + +static ssize_t +printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr) +{ + struct printer_dev *dev = fd->private_data; + unsigned long flags; + size_t size; /* Amount of data in a TX request. */ + size_t bytes_copied = 0; + struct usb_request *req; + + DBG(dev, "printer_write trying to send %d bytes\n", (int)len); + + if (len == 0) + return -EINVAL; + + mutex_lock(&dev->lock_printer_io); + spin_lock_irqsave(&dev->lock, flags); + + /* Check if a printer reset happens while we have interrupts on */ + dev->reset_printer = 0; + + /* Check if there is any available write buffers */ + if (likely(list_empty(&dev->tx_reqs))) { + /* Turn interrupts back on before sleeping. */ + spin_unlock_irqrestore(&dev->lock, flags); + + /* + * If write buffers are available check if this is + * a NON-Blocking call or not. + */ + if (fd->f_flags & (O_NONBLOCK|O_NDELAY)) { + mutex_unlock(&dev->lock_printer_io); + return -EAGAIN; + } + + /* Sleep until a write buffer is available */ + wait_event_interruptible(dev->tx_wait, + (likely(!list_empty(&dev->tx_reqs)))); + spin_lock_irqsave(&dev->lock, flags); + } + + while (likely(!list_empty(&dev->tx_reqs)) && len) { + + if (len > USB_BUFSIZE) + size = USB_BUFSIZE; + else + size = len; + + req = container_of(dev->tx_reqs.next, struct usb_request, + list); + list_del_init(&req->list); + + req->complete = tx_complete; + req->length = size; + + /* Check if we need to send a zero length packet. */ + if (len > size) + /* They will be more TX requests so no yet. */ + req->zero = 0; + else + /* If the data amount is not a multiple of the + * maxpacket size then send a zero length packet. + */ + req->zero = ((len % dev->in_ep->maxpacket) == 0); + + /* Don't leave irqs off while doing memory copies */ + spin_unlock_irqrestore(&dev->lock, flags); + + if (copy_from_user(req->buf, buf, size)) { + list_add(&req->list, &dev->tx_reqs); + mutex_unlock(&dev->lock_printer_io); + return bytes_copied; + } + + bytes_copied += size; + len -= size; + buf += size; + + spin_lock_irqsave(&dev->lock, flags); + + /* We've disconnected or reset so free the req and buffer */ + if (dev->reset_printer) { + list_add(&req->list, &dev->tx_reqs); + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + return -EAGAIN; + } + + if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { + list_add(&req->list, &dev->tx_reqs); + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + return -EAGAIN; + } + + list_add(&req->list, &dev->tx_reqs_active); + + } + + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + + DBG(dev, "printer_write sent %d bytes\n", (int)bytes_copied); + + if (bytes_copied) + return bytes_copied; + else + return -EAGAIN; +} + +static int +printer_fsync(struct file *fd, loff_t start, loff_t end, int datasync) +{ + struct printer_dev *dev = fd->private_data; + struct inode *inode = file_inode(fd); + unsigned long flags; + int tx_list_empty; + + mutex_lock(&inode->i_mutex); + spin_lock_irqsave(&dev->lock, flags); + tx_list_empty = (likely(list_empty(&dev->tx_reqs))); + spin_unlock_irqrestore(&dev->lock, flags); + + if (!tx_list_empty) { + /* Sleep until all data has been sent */ + wait_event_interruptible(dev->tx_flush_wait, + (likely(list_empty(&dev->tx_reqs_active)))); + } + mutex_unlock(&inode->i_mutex); + + return 0; +} + +static unsigned int +printer_poll(struct file *fd, poll_table *wait) +{ + struct printer_dev *dev = fd->private_data; + unsigned long flags; + int status = 0; + + mutex_lock(&dev->lock_printer_io); + spin_lock_irqsave(&dev->lock, flags); + setup_rx_reqs(dev); + spin_unlock_irqrestore(&dev->lock, flags); + mutex_unlock(&dev->lock_printer_io); + + poll_wait(fd, &dev->rx_wait, wait); + poll_wait(fd, &dev->tx_wait, wait); + + spin_lock_irqsave(&dev->lock, flags); + if (likely(!list_empty(&dev->tx_reqs))) + status |= POLLOUT | POLLWRNORM; + + if (likely(dev->current_rx_bytes) || + likely(!list_empty(&dev->rx_buffers))) + status |= POLLIN | POLLRDNORM; + + spin_unlock_irqrestore(&dev->lock, flags); + + return status; +} + +static long +printer_ioctl(struct file *fd, unsigned int code, unsigned long arg) +{ + struct printer_dev *dev = fd->private_data; + unsigned long flags; + int status = 0; + + DBG(dev, "printer_ioctl: cmd=0x%4.4x, arg=%lu\n", code, arg); + + /* handle ioctls */ + + spin_lock_irqsave(&dev->lock, flags); + + switch (code) { + case GADGET_GET_PRINTER_STATUS: + status = (int)dev->printer_status; + break; + case GADGET_SET_PRINTER_STATUS: + dev->printer_status = (u8)arg; + break; + default: + /* could not handle ioctl */ + DBG(dev, "printer_ioctl: ERROR cmd=0x%4.4xis not supported\n", + code); + status = -ENOTTY; + } + + spin_unlock_irqrestore(&dev->lock, flags); + + return status; +} + +/* used after endpoint configuration */ +static const struct file_operations printer_io_operations = { + .owner = THIS_MODULE, + .open = printer_open, + .read = printer_read, + .write = printer_write, + .fsync = printer_fsync, + .poll = printer_poll, + .unlocked_ioctl = printer_ioctl, + .release = printer_close, + .llseek = noop_llseek, +}; + +/*-------------------------------------------------------------------------*/ + +static int +set_printer_interface(struct printer_dev *dev) +{ + int result = 0; + + dev->in_ep->desc = ep_desc(dev->gadget, &fs_ep_in_desc, &hs_ep_in_desc, + &ss_ep_in_desc); + dev->in_ep->driver_data = dev; + + dev->out_ep->desc = ep_desc(dev->gadget, &fs_ep_out_desc, + &hs_ep_out_desc, &ss_ep_out_desc); + dev->out_ep->driver_data = dev; + + result = usb_ep_enable(dev->in_ep); + if (result != 0) { + DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result); + goto done; + } + + result = usb_ep_enable(dev->out_ep); + if (result != 0) { + DBG(dev, "enable %s --> %d\n", dev->in_ep->name, result); + goto done; + } + +done: + /* on error, disable any endpoints */ + if (result != 0) { + (void) usb_ep_disable(dev->in_ep); + (void) usb_ep_disable(dev->out_ep); + dev->in_ep->desc = NULL; + dev->out_ep->desc = NULL; + } + + /* caller is responsible for cleanup on error */ + return result; +} + +static void printer_reset_interface(struct printer_dev *dev) +{ + if (dev->interface < 0) + return; + + DBG(dev, "%s\n", __func__); + + if (dev->in_ep->desc) + usb_ep_disable(dev->in_ep); + + if (dev->out_ep->desc) + usb_ep_disable(dev->out_ep); + + dev->in_ep->desc = NULL; + dev->out_ep->desc = NULL; + dev->interface = -1; +} + +/* Change our operational Interface. */ +static int set_interface(struct printer_dev *dev, unsigned number) +{ + int result = 0; + + /* Free the current interface */ + printer_reset_interface(dev); + + result = set_printer_interface(dev); + if (result) + printer_reset_interface(dev); + else + dev->interface = number; + + if (!result) + INFO(dev, "Using interface %x\n", number); + + return result; +} + +static void printer_soft_reset(struct printer_dev *dev) +{ + struct usb_request *req; + + INFO(dev, "Received Printer Reset Request\n"); + + if (usb_ep_disable(dev->in_ep)) + DBG(dev, "Failed to disable USB in_ep\n"); + if (usb_ep_disable(dev->out_ep)) + DBG(dev, "Failed to disable USB out_ep\n"); + + if (dev->current_rx_req != NULL) { + list_add(&dev->current_rx_req->list, &dev->rx_reqs); + dev->current_rx_req = NULL; + } + dev->current_rx_bytes = 0; + dev->current_rx_buf = NULL; + dev->reset_printer = 1; + + while (likely(!(list_empty(&dev->rx_buffers)))) { + req = container_of(dev->rx_buffers.next, struct usb_request, + list); + list_del_init(&req->list); + list_add(&req->list, &dev->rx_reqs); + } + + while (likely(!(list_empty(&dev->rx_reqs_active)))) { + req = container_of(dev->rx_buffers.next, struct usb_request, + list); + list_del_init(&req->list); + list_add(&req->list, &dev->rx_reqs); + } + + while (likely(!(list_empty(&dev->tx_reqs_active)))) { + req = container_of(dev->tx_reqs_active.next, + struct usb_request, list); + list_del_init(&req->list); + list_add(&req->list, &dev->tx_reqs); + } + + if (usb_ep_enable(dev->in_ep)) + DBG(dev, "Failed to enable USB in_ep\n"); + if (usb_ep_enable(dev->out_ep)) + DBG(dev, "Failed to enable USB out_ep\n"); + + wake_up_interruptible(&dev->rx_wait); + wake_up_interruptible(&dev->tx_wait); + wake_up_interruptible(&dev->tx_flush_wait); +} + +/*-------------------------------------------------------------------------*/ + +static bool gprinter_req_match(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct printer_dev *dev = func_to_printer(f); + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + if ((ctrl->bRequestType & USB_RECIP_MASK) != USB_RECIP_INTERFACE || + (ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) + return false; + + switch (ctrl->bRequest) { + case GET_DEVICE_ID: + w_index >>= 8; + if (w_length <= PNP_STRING_LEN && + (USB_DIR_IN & ctrl->bRequestType)) + break; + return false; + case GET_PORT_STATUS: + if (!w_value && w_length == 1 && + (USB_DIR_IN & ctrl->bRequestType)) + break; + return false; + case SOFT_RESET: + if (!w_value && !w_length && + !(USB_DIR_IN & ctrl->bRequestType)) + break; + /* fall through */ + default: + return false; + } + return w_index == dev->interface; +} + +/* + * The setup() callback implements all the ep0 functionality that's not + * handled lower down. + */ +static int printer_func_setup(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct printer_dev *dev = func_to_printer(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 wIndex = le16_to_cpu(ctrl->wIndex); + u16 wValue = le16_to_cpu(ctrl->wValue); + u16 wLength = le16_to_cpu(ctrl->wLength); + + DBG(dev, "ctrl req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, wValue, wIndex, wLength); + + switch (ctrl->bRequestType&USB_TYPE_MASK) { + case USB_TYPE_CLASS: + switch (ctrl->bRequest) { + case GET_DEVICE_ID: /* Get the IEEE-1284 PNP String */ + /* Only one printer interface is supported. */ + if ((wIndex>>8) != dev->interface) + break; + + value = (dev->pnp_string[0] << 8) | dev->pnp_string[1]; + memcpy(req->buf, dev->pnp_string, value); + DBG(dev, "1284 PNP String: %x %s\n", value, + &dev->pnp_string[2]); + break; + + case GET_PORT_STATUS: /* Get Port Status */ + /* Only one printer interface is supported. */ + if (wIndex != dev->interface) + break; + + *(u8 *)req->buf = dev->printer_status; + value = min_t(u16, wLength, 1); + break; + + case SOFT_RESET: /* Soft Reset */ + /* Only one printer interface is supported. */ + if (wIndex != dev->interface) + break; + + printer_soft_reset(dev); + + value = 0; + break; + + default: + goto unknown; + } + break; + + default: +unknown: + VDBG(dev, + "unknown ctrl req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + wValue, wIndex, wLength); + break; + } + /* host either stalls (value < 0) or reports success */ + if (value >= 0) { + req->length = value; + req->zero = value < wLength; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) { + ERROR(dev, "%s:%d Error!\n", __func__, __LINE__); + req->status = 0; + } + } + return value; +} + +static int printer_func_bind(struct usb_configuration *c, + struct usb_function *f) +{ + struct usb_gadget *gadget = c->cdev->gadget; + struct printer_dev *dev = func_to_printer(f); + struct device *pdev; + struct usb_composite_dev *cdev = c->cdev; + struct usb_ep *in_ep; + struct usb_ep *out_ep = NULL; + struct usb_request *req; + dev_t devt; + int id; + int ret; + u32 i; + + id = usb_interface_id(c, f); + if (id < 0) + return id; + intf_desc.bInterfaceNumber = id; + + /* finish hookup to lower layer ... */ + dev->gadget = gadget; + + /* all we really need is bulk IN/OUT */ + in_ep = usb_ep_autoconfig(cdev->gadget, &fs_ep_in_desc); + if (!in_ep) { +autoconf_fail: + dev_err(&cdev->gadget->dev, "can't autoconfigure on %s\n", + cdev->gadget->name); + return -ENODEV; + } + in_ep->driver_data = in_ep; /* claim */ + + out_ep = usb_ep_autoconfig(cdev->gadget, &fs_ep_out_desc); + if (!out_ep) + goto autoconf_fail; + out_ep->driver_data = out_ep; /* claim */ + + /* assumes that all endpoints are dual-speed */ + hs_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress; + hs_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress; + ss_ep_in_desc.bEndpointAddress = fs_ep_in_desc.bEndpointAddress; + ss_ep_out_desc.bEndpointAddress = fs_ep_out_desc.bEndpointAddress; + + ret = usb_assign_descriptors(f, fs_printer_function, + hs_printer_function, ss_printer_function); + if (ret) + return ret; + + dev->in_ep = in_ep; + dev->out_ep = out_ep; + + ret = -ENOMEM; + for (i = 0; i < dev->q_len; i++) { + req = printer_req_alloc(dev->in_ep, USB_BUFSIZE, GFP_KERNEL); + if (!req) + goto fail_tx_reqs; + list_add(&req->list, &dev->tx_reqs); + } + + for (i = 0; i < dev->q_len; i++) { + req = printer_req_alloc(dev->out_ep, USB_BUFSIZE, GFP_KERNEL); + if (!req) + goto fail_rx_reqs; + list_add(&req->list, &dev->rx_reqs); + } + + /* Setup the sysfs files for the printer gadget. */ + devt = MKDEV(major, dev->minor); + pdev = device_create(usb_gadget_class, NULL, devt, + NULL, "g_printer%d", dev->minor); + if (IS_ERR(pdev)) { + ERROR(dev, "Failed to create device: g_printer\n"); + ret = PTR_ERR(pdev); + goto fail_rx_reqs; + } + + /* + * Register a character device as an interface to a user mode + * program that handles the printer specific functionality. + */ + cdev_init(&dev->printer_cdev, &printer_io_operations); + dev->printer_cdev.owner = THIS_MODULE; + ret = cdev_add(&dev->printer_cdev, devt, 1); + if (ret) { + ERROR(dev, "Failed to open char device\n"); + goto fail_cdev_add; + } + + return 0; + +fail_cdev_add: + device_destroy(usb_gadget_class, devt); + +fail_rx_reqs: + while (!list_empty(&dev->rx_reqs)) { + req = container_of(dev->rx_reqs.next, struct usb_request, list); + list_del(&req->list); + printer_req_free(dev->out_ep, req); + } + +fail_tx_reqs: + while (!list_empty(&dev->tx_reqs)) { + req = container_of(dev->tx_reqs.next, struct usb_request, list); + list_del(&req->list); + printer_req_free(dev->in_ep, req); + } + + return ret; + +} + +static int printer_func_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct printer_dev *dev = func_to_printer(f); + int ret = -ENOTSUPP; + + if (!alt) + ret = set_interface(dev, intf); + + return ret; +} + +static void printer_func_disable(struct usb_function *f) +{ + struct printer_dev *dev = func_to_printer(f); + unsigned long flags; + + DBG(dev, "%s\n", __func__); + + spin_lock_irqsave(&dev->lock, flags); + printer_reset_interface(dev); + spin_unlock_irqrestore(&dev->lock, flags); +} + +static inline struct f_printer_opts +*to_f_printer_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_printer_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_printer_opts); +CONFIGFS_ATTR_OPS(f_printer_opts); + +static void printer_attr_release(struct config_item *item) +{ + struct f_printer_opts *opts = to_f_printer_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations printer_item_ops = { + .release = printer_attr_release, + .show_attribute = f_printer_opts_attr_show, + .store_attribute = f_printer_opts_attr_store, +}; + +static ssize_t f_printer_opts_pnp_string_show(struct f_printer_opts *opts, + char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = strlcpy(page, opts->pnp_string + 2, PNP_STRING_LEN - 2); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_printer_opts_pnp_string_store(struct f_printer_opts *opts, + const char *page, size_t len) +{ + int result, l; + + mutex_lock(&opts->lock); + result = strlcpy(opts->pnp_string + 2, page, PNP_STRING_LEN - 2); + l = strlen(opts->pnp_string + 2) + 2; + opts->pnp_string[0] = (l >> 8) & 0xFF; + opts->pnp_string[1] = l & 0xFF; + mutex_unlock(&opts->lock); + + return result; +} + +static struct f_printer_opts_attribute f_printer_opts_pnp_string = + __CONFIGFS_ATTR(pnp_string, S_IRUGO | S_IWUSR, + f_printer_opts_pnp_string_show, + f_printer_opts_pnp_string_store); + +static ssize_t f_printer_opts_q_len_show(struct f_printer_opts *opts, + char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%d\n", opts->q_len); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_printer_opts_q_len_store(struct f_printer_opts *opts, + const char *page, size_t len) +{ + int ret; + u16 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou16(page, 0, &num); + if (ret) + goto end; + + opts->q_len = (unsigned)num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_printer_opts_attribute f_printer_opts_q_len = + __CONFIGFS_ATTR(q_len, S_IRUGO | S_IWUSR, f_printer_opts_q_len_show, + f_printer_opts_q_len_store); + +static struct configfs_attribute *printer_attrs[] = { + &f_printer_opts_pnp_string.attr, + &f_printer_opts_q_len.attr, + NULL, +}; + +static struct config_item_type printer_func_type = { + .ct_item_ops = &printer_item_ops, + .ct_attrs = printer_attrs, + .ct_owner = THIS_MODULE, +}; + +static inline int gprinter_get_minor(void) +{ + return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); +} + +static inline void gprinter_put_minor(int minor) +{ + ida_simple_remove(&printer_ida, minor); +} + +static int gprinter_setup(int); +static void gprinter_cleanup(void); + +static void gprinter_free_inst(struct usb_function_instance *f) +{ + struct f_printer_opts *opts; + + opts = container_of(f, struct f_printer_opts, func_inst); + + mutex_lock(&printer_ida_lock); + + gprinter_put_minor(opts->minor); + if (idr_is_empty(&printer_ida.idr)) + gprinter_cleanup(); + + mutex_unlock(&printer_ida_lock); + + kfree(opts); +} + +static struct usb_function_instance *gprinter_alloc_inst(void) +{ + struct f_printer_opts *opts; + struct usb_function_instance *ret; + int status = 0; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = gprinter_free_inst; + ret = &opts->func_inst; + + mutex_lock(&printer_ida_lock); + + if (idr_is_empty(&printer_ida.idr)) { + status = gprinter_setup(PRINTER_MINORS); + if (status) { + ret = ERR_PTR(status); + kfree(opts); + goto unlock; + } + } + + opts->minor = gprinter_get_minor(); + if (opts->minor < 0) { + ret = ERR_PTR(opts->minor); + kfree(opts); + if (idr_is_empty(&printer_ida.idr)) + gprinter_cleanup(); + goto unlock; + } + config_group_init_type_name(&opts->func_inst.group, "", + &printer_func_type); + +unlock: + mutex_unlock(&printer_ida_lock); + return ret; +} + +static void gprinter_free(struct usb_function *f) +{ + struct printer_dev *dev = func_to_printer(f); + struct f_printer_opts *opts; + + opts = container_of(f->fi, struct f_printer_opts, func_inst); + kfree(dev); + mutex_lock(&opts->lock); + --opts->refcnt; + mutex_unlock(&opts->lock); +} + +static void printer_func_unbind(struct usb_configuration *c, + struct usb_function *f) +{ + struct printer_dev *dev; + struct usb_request *req; + + dev = func_to_printer(f); + + device_destroy(usb_gadget_class, MKDEV(major, dev->minor)); + + /* Remove Character Device */ + cdev_del(&dev->printer_cdev); + + /* we must already have been disconnected ... no i/o may be active */ + WARN_ON(!list_empty(&dev->tx_reqs_active)); + WARN_ON(!list_empty(&dev->rx_reqs_active)); + + /* Free all memory for this driver. */ + while (!list_empty(&dev->tx_reqs)) { + req = container_of(dev->tx_reqs.next, struct usb_request, + list); + list_del(&req->list); + printer_req_free(dev->in_ep, req); + } + + if (dev->current_rx_req != NULL) + printer_req_free(dev->out_ep, dev->current_rx_req); + + while (!list_empty(&dev->rx_reqs)) { + req = container_of(dev->rx_reqs.next, + struct usb_request, list); + list_del(&req->list); + printer_req_free(dev->out_ep, req); + } + + while (!list_empty(&dev->rx_buffers)) { + req = container_of(dev->rx_buffers.next, + struct usb_request, list); + list_del(&req->list); + printer_req_free(dev->out_ep, req); + } + usb_free_all_descriptors(f); +} + +static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) +{ + struct printer_dev *dev; + struct f_printer_opts *opts; + + opts = container_of(fi, struct f_printer_opts, func_inst); + + mutex_lock(&opts->lock); + if (opts->minor >= minors) { + mutex_unlock(&opts->lock); + return ERR_PTR(-ENOENT); + } + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + mutex_unlock(&opts->lock); + return ERR_PTR(-ENOMEM); + } + + ++opts->refcnt; + dev->minor = opts->minor; + dev->pnp_string = opts->pnp_string; + dev->q_len = opts->q_len; + mutex_unlock(&opts->lock); + + dev->function.name = "printer"; + dev->function.bind = printer_func_bind; + dev->function.setup = printer_func_setup; + dev->function.unbind = printer_func_unbind; + dev->function.set_alt = printer_func_set_alt; + dev->function.disable = printer_func_disable; + dev->function.req_match = gprinter_req_match; + dev->function.free_func = gprinter_free; + + INIT_LIST_HEAD(&dev->tx_reqs); + INIT_LIST_HEAD(&dev->rx_reqs); + INIT_LIST_HEAD(&dev->rx_buffers); + INIT_LIST_HEAD(&dev->tx_reqs_active); + INIT_LIST_HEAD(&dev->rx_reqs_active); + + spin_lock_init(&dev->lock); + mutex_init(&dev->lock_printer_io); + init_waitqueue_head(&dev->rx_wait); + init_waitqueue_head(&dev->tx_wait); + init_waitqueue_head(&dev->tx_flush_wait); + + dev->interface = -1; + dev->printer_cdev_open = 0; + dev->printer_status = PRINTER_NOT_ERROR; + dev->current_rx_req = NULL; + dev->current_rx_bytes = 0; + dev->current_rx_buf = NULL; + + return &dev->function; +} + +DECLARE_USB_FUNCTION_INIT(printer, gprinter_alloc_inst, gprinter_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Craig Nadler"); + +static int gprinter_setup(int count) +{ + int status; + dev_t devt; + + usb_gadget_class = class_create(THIS_MODULE, "usb_printer_gadget"); + if (IS_ERR(usb_gadget_class)) { + status = PTR_ERR(usb_gadget_class); + usb_gadget_class = NULL; + pr_err("unable to create usb_gadget class %d\n", status); + return status; + } + + status = alloc_chrdev_region(&devt, 0, count, "USB printer gadget"); + if (status) { + pr_err("alloc_chrdev_region %d\n", status); + class_destroy(usb_gadget_class); + usb_gadget_class = NULL; + return status; + } + + major = MAJOR(devt); + minors = count; + + return status; +} + +static void gprinter_cleanup(void) +{ + if (major) { + unregister_chrdev_region(MKDEV(major, 0), minors); + major = minors = 0; + } + class_destroy(usb_gadget_class); + usb_gadget_class = NULL; +} diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c new file mode 100644 index 000000000..829edf878 --- /dev/null +++ b/drivers/usb/gadget/function/f_rndis.c @@ -0,0 +1,1037 @@ +/* + * f_rndis.c -- RNDIS link function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2008 Nokia Corporation + * Copyright (C) 2009 Samsung Electronics + * Author: Michal Nazarewicz (mina86@mina86.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/etherdevice.h> + +#include <linux/atomic.h> + +#include "u_ether.h" +#include "u_ether_configfs.h" +#include "u_rndis.h" +#include "rndis.h" +#include "configfs.h" + +/* + * This function is an RNDIS Ethernet port -- a Microsoft protocol that's + * been promoted instead of the standard CDC Ethernet. The published RNDIS + * spec is ambiguous, incomplete, and needlessly complex. Variants such as + * ActiveSync have even worse status in terms of specification. + * + * In short: it's a protocol controlled by (and for) Microsoft, not for an + * Open ecosystem or markets. Linux supports it *only* because Microsoft + * doesn't support the CDC Ethernet standard. + * + * The RNDIS data transfer model is complex, with multiple Ethernet packets + * per USB message, and out of band data. The control model is built around + * what's essentially an "RNDIS RPC" protocol. It's all wrapped in a CDC ACM + * (modem, not Ethernet) veneer, with those ACM descriptors being entirely + * useless (they're ignored). RNDIS expects to be the only function in its + * configuration, so it's no real help if you need composite devices; and + * it expects to be the first configuration too. + * + * There is a single technical advantage of RNDIS over CDC Ethernet, if you + * discount the fluff that its RPC can be made to deliver: it doesn't need + * a NOP altsetting for the data interface. That lets it work on some of the + * "so smart it's stupid" hardware which takes over configuration changes + * from the software, and adds restrictions like "no altsettings". + * + * Unfortunately MSFT's RNDIS drivers are buggy. They hang or oops, and + * have all sorts of contrary-to-specification oddities that can prevent + * them from working sanely. Since bugfixes (or accurate specs, letting + * Linux work around those bugs) are unlikely to ever come from MSFT, you + * may want to avoid using RNDIS on purely operational grounds. + * + * Omissions from the RNDIS 1.0 specification include: + * + * - Power management ... references data that's scattered around lots + * of other documentation, which is incorrect/incomplete there too. + * + * - There are various undocumented protocol requirements, like the need + * to send garbage in some control-OUT messages. + * + * - MS-Windows drivers sometimes emit undocumented requests. + */ + +struct f_rndis { + struct gether port; + u8 ctrl_id, data_id; + u8 ethaddr[ETH_ALEN]; + u32 vendorID; + const char *manufacturer; + int config; + + struct usb_ep *notify; + struct usb_request *notify_req; + atomic_t notify_count; +}; + +static inline struct f_rndis *func_to_rndis(struct usb_function *f) +{ + return container_of(f, struct f_rndis, port.func); +} + +/* peak (theoretical) bulk transfer rate in bits-per-second */ +static unsigned int bitrate(struct usb_gadget *g) +{ + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) + return 13 * 1024 * 8 * 1000 * 8; + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return 13 * 512 * 8 * 1000 * 8; + else + return 19 * 64 * 1 * 1000 * 8; +} + +/*-------------------------------------------------------------------------*/ + +/* + */ + +#define RNDIS_STATUS_INTERVAL_MS 32 +#define STATUS_BYTECOUNT 8 /* 8 bytes data */ + + +/* interface descriptor: */ + +static struct usb_interface_descriptor rndis_control_intf = { + .bLength = sizeof rndis_control_intf, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + /* status endpoint is optional; this could be patched later */ + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM, + .bInterfaceProtocol = USB_CDC_ACM_PROTO_VENDOR, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_cdc_header_desc header_desc = { + .bLength = sizeof header_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + + .bcdCDC = cpu_to_le16(0x0110), +}; + +static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = { + .bLength = sizeof call_mgmt_descriptor, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_CALL_MANAGEMENT_TYPE, + + .bmCapabilities = 0x00, + .bDataInterface = 0x01, +}; + +static struct usb_cdc_acm_descriptor rndis_acm_descriptor = { + .bLength = sizeof rndis_acm_descriptor, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_ACM_TYPE, + + .bmCapabilities = 0x00, +}; + +static struct usb_cdc_union_desc rndis_union_desc = { + .bLength = sizeof(rndis_union_desc), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_UNION_TYPE, + /* .bMasterInterface0 = DYNAMIC */ + /* .bSlaveInterface0 = DYNAMIC */ +}; + +/* the data interface has two bulk endpoints */ + +static struct usb_interface_descriptor rndis_data_intf = { + .bLength = sizeof rndis_data_intf, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_CDC_DATA, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, + /* .iInterface = DYNAMIC */ +}; + + +static struct usb_interface_assoc_descriptor +rndis_iad_descriptor = { + .bLength = sizeof rndis_iad_descriptor, + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + + .bFirstInterface = 0, /* XXX, hardcoded */ + .bInterfaceCount = 2, // control + data + .bFunctionClass = USB_CLASS_COMM, + .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET, + .bFunctionProtocol = USB_CDC_PROTO_NONE, + /* .iFunction = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor fs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), + .bInterval = RNDIS_STATUS_INTERVAL_MS, +}; + +static struct usb_endpoint_descriptor fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *eth_fs_function[] = { + (struct usb_descriptor_header *) &rndis_iad_descriptor, + + /* control interface matches ACM, not Ethernet */ + (struct usb_descriptor_header *) &rndis_control_intf, + (struct usb_descriptor_header *) &header_desc, + (struct usb_descriptor_header *) &call_mgmt_descriptor, + (struct usb_descriptor_header *) &rndis_acm_descriptor, + (struct usb_descriptor_header *) &rndis_union_desc, + (struct usb_descriptor_header *) &fs_notify_desc, + + /* data interface has no altsetting */ + (struct usb_descriptor_header *) &rndis_data_intf, + (struct usb_descriptor_header *) &fs_in_desc, + (struct usb_descriptor_header *) &fs_out_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor hs_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), + .bInterval = USB_MS_TO_HS_INTERVAL(RNDIS_STATUS_INTERVAL_MS) +}; + +static struct usb_endpoint_descriptor hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *eth_hs_function[] = { + (struct usb_descriptor_header *) &rndis_iad_descriptor, + + /* control interface matches ACM, not Ethernet */ + (struct usb_descriptor_header *) &rndis_control_intf, + (struct usb_descriptor_header *) &header_desc, + (struct usb_descriptor_header *) &call_mgmt_descriptor, + (struct usb_descriptor_header *) &rndis_acm_descriptor, + (struct usb_descriptor_header *) &rndis_union_desc, + (struct usb_descriptor_header *) &hs_notify_desc, + + /* data interface has no altsetting */ + (struct usb_descriptor_header *) &rndis_data_intf, + (struct usb_descriptor_header *) &hs_in_desc, + (struct usb_descriptor_header *) &hs_out_desc, + NULL, +}; + +/* super speed support: */ + +static struct usb_endpoint_descriptor ss_notify_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(STATUS_BYTECOUNT), + .bInterval = USB_MS_TO_HS_INTERVAL(RNDIS_STATUS_INTERVAL_MS) +}; + +static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = { + .bLength = sizeof ss_intr_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 3 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ + .wBytesPerInterval = cpu_to_le16(STATUS_BYTECOUNT), +}; + +static struct usb_endpoint_descriptor ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = { + .bLength = sizeof ss_bulk_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 2 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ +}; + +static struct usb_descriptor_header *eth_ss_function[] = { + (struct usb_descriptor_header *) &rndis_iad_descriptor, + + /* control interface matches ACM, not Ethernet */ + (struct usb_descriptor_header *) &rndis_control_intf, + (struct usb_descriptor_header *) &header_desc, + (struct usb_descriptor_header *) &call_mgmt_descriptor, + (struct usb_descriptor_header *) &rndis_acm_descriptor, + (struct usb_descriptor_header *) &rndis_union_desc, + (struct usb_descriptor_header *) &ss_notify_desc, + (struct usb_descriptor_header *) &ss_intr_comp_desc, + + /* data interface has no altsetting */ + (struct usb_descriptor_header *) &rndis_data_intf, + (struct usb_descriptor_header *) &ss_in_desc, + (struct usb_descriptor_header *) &ss_bulk_comp_desc, + (struct usb_descriptor_header *) &ss_out_desc, + (struct usb_descriptor_header *) &ss_bulk_comp_desc, + NULL, +}; + +/* string descriptors: */ + +static struct usb_string rndis_string_defs[] = { + [0].s = "RNDIS Communications Control", + [1].s = "RNDIS Ethernet Data", + [2].s = "RNDIS", + { } /* end of list */ +}; + +static struct usb_gadget_strings rndis_string_table = { + .language = 0x0409, /* en-us */ + .strings = rndis_string_defs, +}; + +static struct usb_gadget_strings *rndis_strings[] = { + &rndis_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static struct sk_buff *rndis_add_header(struct gether *port, + struct sk_buff *skb) +{ + struct sk_buff *skb2; + + skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type)); + rndis_add_hdr(skb2); + + dev_kfree_skb(skb); + return skb2; +} + +static void rndis_response_available(void *_rndis) +{ + struct f_rndis *rndis = _rndis; + struct usb_request *req = rndis->notify_req; + struct usb_composite_dev *cdev = rndis->port.func.config->cdev; + __le32 *data = req->buf; + int status; + + if (atomic_inc_return(&rndis->notify_count) != 1) + return; + + /* Send RNDIS RESPONSE_AVAILABLE notification; a + * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too + * + * This is the only notification defined by RNDIS. + */ + data[0] = cpu_to_le32(1); + data[1] = cpu_to_le32(0); + + status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&rndis->notify_count); + DBG(cdev, "notify/0 --> %d\n", status); + } +} + +static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_rndis *rndis = req->context; + struct usb_composite_dev *cdev = rndis->port.func.config->cdev; + int status = req->status; + + /* after TX: + * - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control) + * - RNDIS_RESPONSE_AVAILABLE (status/irq) + */ + switch (status) { + case -ECONNRESET: + case -ESHUTDOWN: + /* connection gone */ + atomic_set(&rndis->notify_count, 0); + break; + default: + DBG(cdev, "RNDIS %s response error %d, %d/%d\n", + ep->name, status, + req->actual, req->length); + /* FALLTHROUGH */ + case 0: + if (ep != rndis->notify) + break; + + /* handle multiple pending RNDIS_RESPONSE_AVAILABLE + * notifications by resending until we're done + */ + if (atomic_dec_and_test(&rndis->notify_count)) + break; + status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC); + if (status) { + atomic_dec(&rndis->notify_count); + DBG(cdev, "notify/1 --> %d\n", status); + } + break; + } +} + +static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_rndis *rndis = req->context; + int status; + + /* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */ +// spin_lock(&dev->lock); + status = rndis_msg_parser(rndis->config, (u8 *) req->buf); + if (status < 0) + pr_err("RNDIS command error %d, %d/%d\n", + status, req->actual, req->length); +// spin_unlock(&dev->lock); +} + +static int +rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct f_rndis *rndis = func_to_rndis(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* composite driver infrastructure handles everything except + * CDC class messages; interface activation uses set_alt(). + */ + switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { + + /* RNDIS uses the CDC command encapsulation mechanism to implement + * an RPC scheme, with much getting/setting of attributes by OID. + */ + case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_SEND_ENCAPSULATED_COMMAND: + if (w_value || w_index != rndis->ctrl_id) + goto invalid; + /* read the request; process it later */ + value = w_length; + req->complete = rndis_command_complete; + req->context = rndis; + /* later, rndis_response_available() sends a notification */ + break; + + case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) + | USB_CDC_GET_ENCAPSULATED_RESPONSE: + if (w_value || w_index != rndis->ctrl_id) + goto invalid; + else { + u8 *buf; + u32 n; + + /* return the result */ + buf = rndis_get_next_response(rndis->config, &n); + if (buf) { + memcpy(req->buf, buf, n); + req->complete = rndis_response_complete; + req->context = rndis; + rndis_free_response(rndis->config, buf); + value = n; + } + /* else stalls ... spec says to avoid that */ + } + break; + + default: +invalid: + VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = (value < w_length); + req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "rndis response on err %d\n", value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + + +static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_rndis *rndis = func_to_rndis(f); + struct usb_composite_dev *cdev = f->config->cdev; + + /* we know alt == 0 */ + + if (intf == rndis->ctrl_id) { + if (rndis->notify->driver_data) { + VDBG(cdev, "reset rndis control %d\n", intf); + usb_ep_disable(rndis->notify); + } + if (!rndis->notify->desc) { + VDBG(cdev, "init rndis ctrl %d\n", intf); + if (config_ep_by_speed(cdev->gadget, f, rndis->notify)) + goto fail; + } + usb_ep_enable(rndis->notify); + rndis->notify->driver_data = rndis; + + } else if (intf == rndis->data_id) { + struct net_device *net; + + if (rndis->port.in_ep->driver_data) { + DBG(cdev, "reset rndis\n"); + gether_disconnect(&rndis->port); + } + + if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) { + DBG(cdev, "init rndis\n"); + if (config_ep_by_speed(cdev->gadget, f, + rndis->port.in_ep) || + config_ep_by_speed(cdev->gadget, f, + rndis->port.out_ep)) { + rndis->port.in_ep->desc = NULL; + rndis->port.out_ep->desc = NULL; + goto fail; + } + } + + /* Avoid ZLPs; they can be troublesome. */ + rndis->port.is_zlp_ok = false; + + /* RNDIS should be in the "RNDIS uninitialized" state, + * either never activated or after rndis_uninit(). + * + * We don't want data to flow here until a nonzero packet + * filter is set, at which point it enters "RNDIS data + * initialized" state ... but we do want the endpoints + * to be activated. It's a strange little state. + * + * REVISIT the RNDIS gadget code has done this wrong for a + * very long time. We need another call to the link layer + * code -- gether_updown(...bool) maybe -- to do it right. + */ + rndis->port.cdc_filter = 0; + + DBG(cdev, "RNDIS RX/TX early activation ... \n"); + net = gether_connect(&rndis->port); + if (IS_ERR(net)) + return PTR_ERR(net); + + rndis_set_param_dev(rndis->config, net, + &rndis->port.cdc_filter); + } else + goto fail; + + return 0; +fail: + return -EINVAL; +} + +static void rndis_disable(struct usb_function *f) +{ + struct f_rndis *rndis = func_to_rndis(f); + struct usb_composite_dev *cdev = f->config->cdev; + + if (!rndis->notify->driver_data) + return; + + DBG(cdev, "rndis deactivated\n"); + + rndis_uninit(rndis->config); + gether_disconnect(&rndis->port); + + usb_ep_disable(rndis->notify); + rndis->notify->driver_data = NULL; +} + +/*-------------------------------------------------------------------------*/ + +/* + * This isn't quite the same mechanism as CDC Ethernet, since the + * notification scheme passes less data, but the same set of link + * states must be tested. A key difference is that altsettings are + * not used to tell whether the link should send packets or not. + */ + +static void rndis_open(struct gether *geth) +{ + struct f_rndis *rndis = func_to_rndis(&geth->func); + struct usb_composite_dev *cdev = geth->func.config->cdev; + + DBG(cdev, "%s\n", __func__); + + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, + bitrate(cdev->gadget) / 100); + rndis_signal_connect(rndis->config); +} + +static void rndis_close(struct gether *geth) +{ + struct f_rndis *rndis = func_to_rndis(&geth->func); + + DBG(geth->func.config->cdev, "%s\n", __func__); + + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0); + rndis_signal_disconnect(rndis->config); +} + +/*-------------------------------------------------------------------------*/ + +/* Some controllers can't support RNDIS ... */ +static inline bool can_support_rndis(struct usb_configuration *c) +{ + /* everything else is *presumably* fine */ + return true; +} + +/* ethernet function driver setup/binding */ + +static int +rndis_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_rndis *rndis = func_to_rndis(f); + struct usb_string *us; + int status; + struct usb_ep *ep; + + struct f_rndis_opts *rndis_opts; + + if (!can_support_rndis(c)) + return -EINVAL; + + rndis_opts = container_of(f->fi, struct f_rndis_opts, func_inst); + + if (cdev->use_os_string) { + f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), + GFP_KERNEL); + if (!f->os_desc_table) + return -ENOMEM; + f->os_desc_n = 1; + f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; + } + + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to rndis_opts->bound access + */ + if (!rndis_opts->bound) { + gether_set_gadget(rndis_opts->net, cdev->gadget); + status = gether_register_netdev(rndis_opts->net); + if (status) + goto fail; + rndis_opts->bound = true; + } + + us = usb_gstrings_attach(cdev, rndis_strings, + ARRAY_SIZE(rndis_string_defs)); + if (IS_ERR(us)) { + status = PTR_ERR(us); + goto fail; + } + rndis_control_intf.iInterface = us[0].id; + rndis_data_intf.iInterface = us[1].id; + rndis_iad_descriptor.iFunction = us[2].id; + + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + rndis->ctrl_id = status; + rndis_iad_descriptor.bFirstInterface = status; + + rndis_control_intf.bInterfaceNumber = status; + rndis_union_desc.bMasterInterface0 = status; + + if (cdev->use_os_string) + f->os_desc_table[0].if_id = + rndis_iad_descriptor.bFirstInterface; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + rndis->data_id = status; + + rndis_data_intf.bInterfaceNumber = status; + rndis_union_desc.bSlaveInterface0 = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc); + if (!ep) + goto fail; + rndis->port.in_ep = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc); + if (!ep) + goto fail; + rndis->port.out_ep = ep; + ep->driver_data = cdev; /* claim */ + + /* NOTE: a status/notification endpoint is, strictly speaking, + * optional. We don't treat it that way though! It's simpler, + * and some newer profiles don't treat it as optional. + */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc); + if (!ep) + goto fail; + rndis->notify = ep; + ep->driver_data = cdev; /* claim */ + + status = -ENOMEM; + + /* allocate notification request and buffer */ + rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); + if (!rndis->notify_req) + goto fail; + rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL); + if (!rndis->notify_req->buf) + goto fail; + rndis->notify_req->length = STATUS_BYTECOUNT; + rndis->notify_req->context = rndis; + rndis->notify_req->complete = rndis_response_complete; + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + hs_in_desc.bEndpointAddress = fs_in_desc.bEndpointAddress; + hs_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress; + hs_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress; + + ss_in_desc.bEndpointAddress = fs_in_desc.bEndpointAddress; + ss_out_desc.bEndpointAddress = fs_out_desc.bEndpointAddress; + ss_notify_desc.bEndpointAddress = fs_notify_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, eth_fs_function, eth_hs_function, + eth_ss_function); + if (status) + goto fail; + + rndis->port.open = rndis_open; + rndis->port.close = rndis_close; + + rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0); + rndis_set_host_mac(rndis->config, rndis->ethaddr); + + if (rndis->manufacturer && rndis->vendorID && + rndis_set_param_vendor(rndis->config, rndis->vendorID, + rndis->manufacturer)) { + status = -EINVAL; + goto fail_free_descs; + } + + /* NOTE: all that is done without knowing or caring about + * the network link ... which is unavailable to this code + * until we're activated via set_alt(). + */ + + DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n", + gadget_is_superspeed(c->cdev->gadget) ? "super" : + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + rndis->port.in_ep->name, rndis->port.out_ep->name, + rndis->notify->name); + return 0; + +fail_free_descs: + usb_free_all_descriptors(f); +fail: + kfree(f->os_desc_table); + f->os_desc_n = 0; + + if (rndis->notify_req) { + kfree(rndis->notify_req->buf); + usb_ep_free_request(rndis->notify, rndis->notify_req); + } + + /* we might as well release our claims on endpoints */ + if (rndis->notify) + rndis->notify->driver_data = NULL; + if (rndis->port.out_ep) + rndis->port.out_ep->driver_data = NULL; + if (rndis->port.in_ep) + rndis->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); + + return status; +} + +void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net) +{ + struct f_rndis_opts *opts; + + opts = container_of(f, struct f_rndis_opts, func_inst); + if (opts->bound) + gether_cleanup(netdev_priv(opts->net)); + else + free_netdev(opts->net); + opts->borrowed_net = opts->bound = true; + opts->net = net; +} +EXPORT_SYMBOL_GPL(rndis_borrow_net); + +static inline struct f_rndis_opts *to_f_rndis_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_rndis_opts, + func_inst.group); +} + +/* f_rndis_item_ops */ +USB_ETHERNET_CONFIGFS_ITEM(rndis); + +/* f_rndis_opts_dev_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(rndis); + +/* f_rndis_opts_host_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(rndis); + +/* f_rndis_opts_qmult */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(rndis); + +/* f_rndis_opts_ifname */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(rndis); + +static struct configfs_attribute *rndis_attrs[] = { + &f_rndis_opts_dev_addr.attr, + &f_rndis_opts_host_addr.attr, + &f_rndis_opts_qmult.attr, + &f_rndis_opts_ifname.attr, + NULL, +}; + +static struct config_item_type rndis_func_type = { + .ct_item_ops = &rndis_item_ops, + .ct_attrs = rndis_attrs, + .ct_owner = THIS_MODULE, +}; + +static void rndis_free_inst(struct usb_function_instance *f) +{ + struct f_rndis_opts *opts; + + opts = container_of(f, struct f_rndis_opts, func_inst); + if (!opts->borrowed_net) { + if (opts->bound) + gether_cleanup(netdev_priv(opts->net)); + else + free_netdev(opts->net); + } + + kfree(opts->rndis_os_desc.group.default_groups); /* single VLA chunk */ + kfree(opts); +} + +static struct usb_function_instance *rndis_alloc_inst(void) +{ + struct f_rndis_opts *opts; + struct usb_os_desc *descs[1]; + char *names[1]; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + opts->rndis_os_desc.ext_compat_id = opts->rndis_ext_compat_id; + + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = rndis_free_inst; + opts->net = gether_setup_default(); + if (IS_ERR(opts->net)) { + struct net_device *net = opts->net; + kfree(opts); + return ERR_CAST(net); + } + INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop); + + descs[0] = &opts->rndis_os_desc; + names[0] = "rndis"; + usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, + names, THIS_MODULE); + config_group_init_type_name(&opts->func_inst.group, "", + &rndis_func_type); + + return &opts->func_inst; +} + +static void rndis_free(struct usb_function *f) +{ + struct f_rndis *rndis; + struct f_rndis_opts *opts; + + rndis = func_to_rndis(f); + rndis_deregister(rndis->config); + opts = container_of(f->fi, struct f_rndis_opts, func_inst); + kfree(rndis); + mutex_lock(&opts->lock); + opts->refcnt--; + mutex_unlock(&opts->lock); +} + +static void rndis_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct f_rndis *rndis = func_to_rndis(f); + + kfree(f->os_desc_table); + f->os_desc_n = 0; + usb_free_all_descriptors(f); + + kfree(rndis->notify_req->buf); + usb_ep_free_request(rndis->notify, rndis->notify_req); +} + +static struct usb_function *rndis_alloc(struct usb_function_instance *fi) +{ + struct f_rndis *rndis; + struct f_rndis_opts *opts; + int status; + + /* allocate and initialize one new instance */ + rndis = kzalloc(sizeof(*rndis), GFP_KERNEL); + if (!rndis) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_rndis_opts, func_inst); + mutex_lock(&opts->lock); + opts->refcnt++; + + gether_get_host_addr_u8(opts->net, rndis->ethaddr); + rndis->vendorID = opts->vendor_id; + rndis->manufacturer = opts->manufacturer; + + rndis->port.ioport = netdev_priv(opts->net); + mutex_unlock(&opts->lock); + /* RNDIS activates when the host changes this filter */ + rndis->port.cdc_filter = 0; + + /* RNDIS has special (and complex) framing */ + rndis->port.header_len = sizeof(struct rndis_packet_msg_type); + rndis->port.wrap = rndis_add_header; + rndis->port.unwrap = rndis_rm_hdr; + + rndis->port.func.name = "rndis"; + /* descriptors are per-instance copies */ + rndis->port.func.bind = rndis_bind; + rndis->port.func.unbind = rndis_unbind; + rndis->port.func.set_alt = rndis_set_alt; + rndis->port.func.setup = rndis_setup; + rndis->port.func.disable = rndis_disable; + rndis->port.func.free_func = rndis_free; + + status = rndis_register(rndis_response_available, rndis); + if (status < 0) { + kfree(rndis); + return ERR_PTR(status); + } + rndis->config = status; + + return &rndis->port.func; +} + +DECLARE_USB_FUNCTION(rndis, rndis_alloc_inst, rndis_alloc); + +static int __init rndis_mod_init(void) +{ + int ret; + + ret = rndis_init(); + if (ret) + return ret; + + return usb_function_register(&rndisusb_func); +} +module_init(rndis_mod_init); + +static void __exit rndis_mod_exit(void) +{ + usb_function_unregister(&rndisusb_func); + rndis_exit(); +} +module_exit(rndis_mod_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Brownell"); diff --git a/drivers/usb/gadget/function/f_serial.c b/drivers/usb/gadget/function/f_serial.c new file mode 100644 index 000000000..2e02dfabc --- /dev/null +++ b/drivers/usb/gadget/function/f_serial.c @@ -0,0 +1,388 @@ +/* + * f_serial.c - generic USB serial function driver + * + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 by David Brownell + * Copyright (C) 2008 by Nokia Corporation + * + * This software is distributed under the terms of the GNU General + * Public License ("GPL") as published by the Free Software Foundation, + * either version 2 of that License or (at your option) any later version. + */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> + +#include "u_serial.h" +#include "gadget_chips.h" + + +/* + * This function packages a simple "generic serial" port with no real + * control mechanisms, just raw data transfer over two bulk endpoints. + * + * Because it's not standardized, this isn't as interoperable as the + * CDC ACM driver. However, for many purposes it's just as functional + * if you can arrange appropriate host side drivers. + */ + +struct f_gser { + struct gserial port; + u8 data_id; + u8 port_num; +}; + +static inline struct f_gser *func_to_gser(struct usb_function *f) +{ + return container_of(f, struct f_gser, port.func); +} + +/*-------------------------------------------------------------------------*/ + +/* interface descriptor: */ + +static struct usb_interface_descriptor gser_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + /* .bInterfaceNumber = DYNAMIC */ + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + .bInterfaceSubClass = 0, + .bInterfaceProtocol = 0, + /* .iInterface = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor gser_fs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor gser_fs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *gser_fs_function[] = { + (struct usb_descriptor_header *) &gser_interface_desc, + (struct usb_descriptor_header *) &gser_fs_in_desc, + (struct usb_descriptor_header *) &gser_fs_out_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor gser_hs_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor gser_hs_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *gser_hs_function[] = { + (struct usb_descriptor_header *) &gser_interface_desc, + (struct usb_descriptor_header *) &gser_hs_in_desc, + (struct usb_descriptor_header *) &gser_hs_out_desc, + NULL, +}; + +static struct usb_endpoint_descriptor gser_ss_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor gser_ss_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor gser_ss_bulk_comp_desc = { + .bLength = sizeof gser_ss_bulk_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, +}; + +static struct usb_descriptor_header *gser_ss_function[] = { + (struct usb_descriptor_header *) &gser_interface_desc, + (struct usb_descriptor_header *) &gser_ss_in_desc, + (struct usb_descriptor_header *) &gser_ss_bulk_comp_desc, + (struct usb_descriptor_header *) &gser_ss_out_desc, + (struct usb_descriptor_header *) &gser_ss_bulk_comp_desc, + NULL, +}; + +/* string descriptors: */ + +static struct usb_string gser_string_defs[] = { + [0].s = "Generic Serial", + { } /* end of list */ +}; + +static struct usb_gadget_strings gser_string_table = { + .language = 0x0409, /* en-us */ + .strings = gser_string_defs, +}; + +static struct usb_gadget_strings *gser_strings[] = { + &gser_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_gser *gser = func_to_gser(f); + struct usb_composite_dev *cdev = f->config->cdev; + + /* we know alt == 0, so this is an activation or a reset */ + + if (gser->port.in->driver_data) { + dev_dbg(&cdev->gadget->dev, + "reset generic ttyGS%d\n", gser->port_num); + gserial_disconnect(&gser->port); + } + if (!gser->port.in->desc || !gser->port.out->desc) { + dev_dbg(&cdev->gadget->dev, + "activate generic ttyGS%d\n", gser->port_num); + if (config_ep_by_speed(cdev->gadget, f, gser->port.in) || + config_ep_by_speed(cdev->gadget, f, gser->port.out)) { + gser->port.in->desc = NULL; + gser->port.out->desc = NULL; + return -EINVAL; + } + } + gserial_connect(&gser->port, gser->port_num); + return 0; +} + +static void gser_disable(struct usb_function *f) +{ + struct f_gser *gser = func_to_gser(f); + struct usb_composite_dev *cdev = f->config->cdev; + + dev_dbg(&cdev->gadget->dev, + "generic ttyGS%d deactivated\n", gser->port_num); + gserial_disconnect(&gser->port); +} + +/*-------------------------------------------------------------------------*/ + +/* serial function driver setup/binding */ + +static int gser_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_gser *gser = func_to_gser(f); + int status; + struct usb_ep *ep; + + /* REVISIT might want instance-specific strings to help + * distinguish instances ... + */ + + /* maybe allocate device-global string ID */ + if (gser_string_defs[0].id == 0) { + status = usb_string_id(c->cdev); + if (status < 0) + return status; + gser_string_defs[0].id = status; + } + + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + gser->data_id = status; + gser_interface_desc.bInterfaceNumber = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_in_desc); + if (!ep) + goto fail; + gser->port.in = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &gser_fs_out_desc); + if (!ep) + goto fail; + gser->port.out = ep; + ep->driver_data = cdev; /* claim */ + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + gser_hs_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress; + gser_hs_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; + + gser_ss_in_desc.bEndpointAddress = gser_fs_in_desc.bEndpointAddress; + gser_ss_out_desc.bEndpointAddress = gser_fs_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, gser_fs_function, gser_hs_function, + gser_ss_function); + if (status) + goto fail; + dev_dbg(&cdev->gadget->dev, "generic ttyGS%d: %s speed IN/%s OUT/%s\n", + gser->port_num, + gadget_is_superspeed(c->cdev->gadget) ? "super" : + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + gser->port.in->name, gser->port.out->name); + return 0; + +fail: + /* we might as well release our claims on endpoints */ + if (gser->port.out) + gser->port.out->driver_data = NULL; + if (gser->port.in) + gser->port.in->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); + + return status; +} + +static inline struct f_serial_opts *to_f_serial_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_serial_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_serial_opts); +static ssize_t f_serial_attr_show(struct config_item *item, + struct configfs_attribute *attr, + char *page) +{ + struct f_serial_opts *opts = to_f_serial_opts(item); + struct f_serial_opts_attribute *f_serial_opts_attr = + container_of(attr, struct f_serial_opts_attribute, attr); + ssize_t ret = 0; + + if (f_serial_opts_attr->show) + ret = f_serial_opts_attr->show(opts, page); + + return ret; +} + +static void serial_attr_release(struct config_item *item) +{ + struct f_serial_opts *opts = to_f_serial_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations serial_item_ops = { + .release = serial_attr_release, + .show_attribute = f_serial_attr_show, +}; + +static ssize_t f_serial_port_num_show(struct f_serial_opts *opts, char *page) +{ + return sprintf(page, "%u\n", opts->port_num); +} + +static struct f_serial_opts_attribute f_serial_port_num = + __CONFIGFS_ATTR_RO(port_num, f_serial_port_num_show); + +static struct configfs_attribute *acm_attrs[] = { + &f_serial_port_num.attr, + NULL, +}; + +static struct config_item_type serial_func_type = { + .ct_item_ops = &serial_item_ops, + .ct_attrs = acm_attrs, + .ct_owner = THIS_MODULE, +}; + +static void gser_free_inst(struct usb_function_instance *f) +{ + struct f_serial_opts *opts; + + opts = container_of(f, struct f_serial_opts, func_inst); + gserial_free_line(opts->port_num); + kfree(opts); +} + +static struct usb_function_instance *gser_alloc_inst(void) +{ + struct f_serial_opts *opts; + int ret; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + opts->func_inst.free_func_inst = gser_free_inst; + ret = gserial_alloc_line(&opts->port_num); + if (ret) { + kfree(opts); + return ERR_PTR(ret); + } + config_group_init_type_name(&opts->func_inst.group, "", + &serial_func_type); + + return &opts->func_inst; +} + +static void gser_free(struct usb_function *f) +{ + struct f_gser *serial; + + serial = func_to_gser(f); + kfree(serial); +} + +static void gser_unbind(struct usb_configuration *c, struct usb_function *f) +{ + usb_free_all_descriptors(f); +} + +static struct usb_function *gser_alloc(struct usb_function_instance *fi) +{ + struct f_gser *gser; + struct f_serial_opts *opts; + + /* allocate and initialize one new instance */ + gser = kzalloc(sizeof(*gser), GFP_KERNEL); + if (!gser) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_serial_opts, func_inst); + + gser->port_num = opts->port_num; + + gser->port.func.name = "gser"; + gser->port.func.strings = gser_strings; + gser->port.func.bind = gser_bind; + gser->port.func.unbind = gser_unbind; + gser->port.func.set_alt = gser_set_alt; + gser->port.func.disable = gser_disable; + gser->port.func.free_func = gser_free; + + return &gser->port.func; +} + +DECLARE_USB_FUNCTION_INIT(gser, gser_alloc_inst, gser_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Al Borchers"); +MODULE_AUTHOR("David Brownell"); diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c new file mode 100644 index 000000000..3a5ae9900 --- /dev/null +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -0,0 +1,1247 @@ +/* + * f_sourcesink.c - USB peripheral source/sink configuration driver + * + * Copyright (C) 2003-2008 David Brownell + * Copyright (C) 2008 by Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/usb/composite.h> +#include <linux/err.h> + +#include "g_zero.h" +#include "gadget_chips.h" +#include "u_f.h" + +/* + * SOURCE/SINK FUNCTION ... a primary testing vehicle for USB peripheral + * controller drivers. + * + * This just sinks bulk packets OUT to the peripheral and sources them IN + * to the host, optionally with specific data patterns for integrity tests. + * As such it supports basic functionality and load tests. + * + * In terms of control messaging, this supports all the standard requests + * plus two that support control-OUT tests. If the optional "autoresume" + * mode is enabled, it provides good functional coverage for the "USBCV" + * test harness from USB-IF. + * + * Note that because this doesn't queue more than one request at a time, + * some other function must be used to test queueing logic. The network + * link (g_ether) is the best overall option for that, since its TX and RX + * queues are relatively independent, will receive a range of packet sizes, + * and can often be made to run out completely. Those issues are important + * when stress testing peripheral controller drivers. + * + * + * This is currently packaged as a configuration driver, which can't be + * combined with other functions to make composite devices. However, it + * can be combined with other independent configurations. + */ +struct f_sourcesink { + struct usb_function function; + + struct usb_ep *in_ep; + struct usb_ep *out_ep; + struct usb_ep *iso_in_ep; + struct usb_ep *iso_out_ep; + int cur_alt; +}; + +static inline struct f_sourcesink *func_to_ss(struct usb_function *f) +{ + return container_of(f, struct f_sourcesink, function); +} + +static unsigned pattern; +static unsigned isoc_interval; +static unsigned isoc_maxpacket; +static unsigned isoc_mult; +static unsigned isoc_maxburst; +static unsigned buflen; + +/*-------------------------------------------------------------------------*/ + +static struct usb_interface_descriptor source_sink_intf_alt0 = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + + .bAlternateSetting = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_interface_descriptor source_sink_intf_alt1 = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + + .bAlternateSetting = 1, + .bNumEndpoints = 4, + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, + /* .iInterface = DYNAMIC */ +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor fs_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor fs_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor fs_iso_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = cpu_to_le16(1023), + .bInterval = 4, +}; + +static struct usb_endpoint_descriptor fs_iso_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = cpu_to_le16(1023), + .bInterval = 4, +}; + +static struct usb_descriptor_header *fs_source_sink_descs[] = { + (struct usb_descriptor_header *) &source_sink_intf_alt0, + (struct usb_descriptor_header *) &fs_sink_desc, + (struct usb_descriptor_header *) &fs_source_desc, + (struct usb_descriptor_header *) &source_sink_intf_alt1, +#define FS_ALT_IFC_1_OFFSET 3 + (struct usb_descriptor_header *) &fs_sink_desc, + (struct usb_descriptor_header *) &fs_source_desc, + (struct usb_descriptor_header *) &fs_iso_sink_desc, + (struct usb_descriptor_header *) &fs_iso_source_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor hs_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor hs_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor hs_iso_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = cpu_to_le16(1024), + .bInterval = 4, +}; + +static struct usb_endpoint_descriptor hs_iso_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = cpu_to_le16(1024), + .bInterval = 4, +}; + +static struct usb_descriptor_header *hs_source_sink_descs[] = { + (struct usb_descriptor_header *) &source_sink_intf_alt0, + (struct usb_descriptor_header *) &hs_source_desc, + (struct usb_descriptor_header *) &hs_sink_desc, + (struct usb_descriptor_header *) &source_sink_intf_alt1, +#define HS_ALT_IFC_1_OFFSET 3 + (struct usb_descriptor_header *) &hs_source_desc, + (struct usb_descriptor_header *) &hs_sink_desc, + (struct usb_descriptor_header *) &hs_iso_source_desc, + (struct usb_descriptor_header *) &hs_iso_sink_desc, + NULL, +}; + +/* super speed support: */ + +static struct usb_endpoint_descriptor ss_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_source_comp_desc = { + .bLength = USB_DT_SS_EP_COMP_SIZE, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = 0, +}; + +static struct usb_endpoint_descriptor ss_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_sink_comp_desc = { + .bLength = USB_DT_SS_EP_COMP_SIZE, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = 0, +}; + +static struct usb_endpoint_descriptor ss_iso_source_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = cpu_to_le16(1024), + .bInterval = 4, +}; + +static struct usb_ss_ep_comp_descriptor ss_iso_source_comp_desc = { + .bLength = USB_DT_SS_EP_COMP_SIZE, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor ss_iso_sink_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = cpu_to_le16(1024), + .bInterval = 4, +}; + +static struct usb_ss_ep_comp_descriptor ss_iso_sink_comp_desc = { + .bLength = USB_DT_SS_EP_COMP_SIZE, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = cpu_to_le16(1024), +}; + +static struct usb_descriptor_header *ss_source_sink_descs[] = { + (struct usb_descriptor_header *) &source_sink_intf_alt0, + (struct usb_descriptor_header *) &ss_source_desc, + (struct usb_descriptor_header *) &ss_source_comp_desc, + (struct usb_descriptor_header *) &ss_sink_desc, + (struct usb_descriptor_header *) &ss_sink_comp_desc, + (struct usb_descriptor_header *) &source_sink_intf_alt1, +#define SS_ALT_IFC_1_OFFSET 5 + (struct usb_descriptor_header *) &ss_source_desc, + (struct usb_descriptor_header *) &ss_source_comp_desc, + (struct usb_descriptor_header *) &ss_sink_desc, + (struct usb_descriptor_header *) &ss_sink_comp_desc, + (struct usb_descriptor_header *) &ss_iso_source_desc, + (struct usb_descriptor_header *) &ss_iso_source_comp_desc, + (struct usb_descriptor_header *) &ss_iso_sink_desc, + (struct usb_descriptor_header *) &ss_iso_sink_comp_desc, + NULL, +}; + +/* function-specific strings: */ + +static struct usb_string strings_sourcesink[] = { + [0].s = "source and sink data", + { } /* end of list */ +}; + +static struct usb_gadget_strings stringtab_sourcesink = { + .language = 0x0409, /* en-us */ + .strings = strings_sourcesink, +}; + +static struct usb_gadget_strings *sourcesink_strings[] = { + &stringtab_sourcesink, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) +{ + return alloc_ep_req(ep, len, buflen); +} + +void free_ep_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} + +static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) +{ + int value; + + if (ep->driver_data) { + value = usb_ep_disable(ep); + if (value < 0) + DBG(cdev, "disable %s --> %d\n", + ep->name, value); + ep->driver_data = NULL; + } +} + +void disable_endpoints(struct usb_composite_dev *cdev, + struct usb_ep *in, struct usb_ep *out, + struct usb_ep *iso_in, struct usb_ep *iso_out) +{ + disable_ep(cdev, in); + disable_ep(cdev, out); + if (iso_in) + disable_ep(cdev, iso_in); + if (iso_out) + disable_ep(cdev, iso_out); +} + +static int +sourcesink_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_sourcesink *ss = func_to_ss(f); + int id; + int ret; + + /* allocate interface ID(s) */ + id = usb_interface_id(c, f); + if (id < 0) + return id; + source_sink_intf_alt0.bInterfaceNumber = id; + source_sink_intf_alt1.bInterfaceNumber = id; + + /* allocate bulk endpoints */ + ss->in_ep = usb_ep_autoconfig(cdev->gadget, &fs_source_desc); + if (!ss->in_ep) { +autoconf_fail: + ERROR(cdev, "%s: can't autoconfigure on %s\n", + f->name, cdev->gadget->name); + return -ENODEV; + } + ss->in_ep->driver_data = cdev; /* claim */ + + ss->out_ep = usb_ep_autoconfig(cdev->gadget, &fs_sink_desc); + if (!ss->out_ep) + goto autoconf_fail; + ss->out_ep->driver_data = cdev; /* claim */ + + /* sanity check the isoc module parameters */ + if (isoc_interval < 1) + isoc_interval = 1; + if (isoc_interval > 16) + isoc_interval = 16; + if (isoc_mult > 2) + isoc_mult = 2; + if (isoc_maxburst > 15) + isoc_maxburst = 15; + + /* fill in the FS isoc descriptors from the module parameters */ + fs_iso_source_desc.wMaxPacketSize = isoc_maxpacket > 1023 ? + 1023 : isoc_maxpacket; + fs_iso_source_desc.bInterval = isoc_interval; + fs_iso_sink_desc.wMaxPacketSize = isoc_maxpacket > 1023 ? + 1023 : isoc_maxpacket; + fs_iso_sink_desc.bInterval = isoc_interval; + + /* allocate iso endpoints */ + ss->iso_in_ep = usb_ep_autoconfig(cdev->gadget, &fs_iso_source_desc); + if (!ss->iso_in_ep) + goto no_iso; + ss->iso_in_ep->driver_data = cdev; /* claim */ + + ss->iso_out_ep = usb_ep_autoconfig(cdev->gadget, &fs_iso_sink_desc); + if (ss->iso_out_ep) { + ss->iso_out_ep->driver_data = cdev; /* claim */ + } else { + ss->iso_in_ep->driver_data = NULL; + ss->iso_in_ep = NULL; +no_iso: + /* + * We still want to work even if the UDC doesn't have isoc + * endpoints, so null out the alt interface that contains + * them and continue. + */ + fs_source_sink_descs[FS_ALT_IFC_1_OFFSET] = NULL; + hs_source_sink_descs[HS_ALT_IFC_1_OFFSET] = NULL; + ss_source_sink_descs[SS_ALT_IFC_1_OFFSET] = NULL; + } + + if (isoc_maxpacket > 1024) + isoc_maxpacket = 1024; + + /* support high speed hardware */ + hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; + hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; + + /* + * Fill in the HS isoc descriptors from the module parameters. + * We assume that the user knows what they are doing and won't + * give parameters that their UDC doesn't support. + */ + hs_iso_source_desc.wMaxPacketSize = isoc_maxpacket; + hs_iso_source_desc.wMaxPacketSize |= isoc_mult << 11; + hs_iso_source_desc.bInterval = isoc_interval; + hs_iso_source_desc.bEndpointAddress = + fs_iso_source_desc.bEndpointAddress; + + hs_iso_sink_desc.wMaxPacketSize = isoc_maxpacket; + hs_iso_sink_desc.wMaxPacketSize |= isoc_mult << 11; + hs_iso_sink_desc.bInterval = isoc_interval; + hs_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; + + /* support super speed hardware */ + ss_source_desc.bEndpointAddress = + fs_source_desc.bEndpointAddress; + ss_sink_desc.bEndpointAddress = + fs_sink_desc.bEndpointAddress; + + /* + * Fill in the SS isoc descriptors from the module parameters. + * We assume that the user knows what they are doing and won't + * give parameters that their UDC doesn't support. + */ + ss_iso_source_desc.wMaxPacketSize = isoc_maxpacket; + ss_iso_source_desc.bInterval = isoc_interval; + ss_iso_source_comp_desc.bmAttributes = isoc_mult; + ss_iso_source_comp_desc.bMaxBurst = isoc_maxburst; + ss_iso_source_comp_desc.wBytesPerInterval = + isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); + ss_iso_source_desc.bEndpointAddress = + fs_iso_source_desc.bEndpointAddress; + + ss_iso_sink_desc.wMaxPacketSize = isoc_maxpacket; + ss_iso_sink_desc.bInterval = isoc_interval; + ss_iso_sink_comp_desc.bmAttributes = isoc_mult; + ss_iso_sink_comp_desc.bMaxBurst = isoc_maxburst; + ss_iso_sink_comp_desc.wBytesPerInterval = + isoc_maxpacket * (isoc_mult + 1) * (isoc_maxburst + 1); + ss_iso_sink_desc.bEndpointAddress = fs_iso_sink_desc.bEndpointAddress; + + ret = usb_assign_descriptors(f, fs_source_sink_descs, + hs_source_sink_descs, ss_source_sink_descs); + if (ret) + return ret; + + DBG(cdev, "%s speed %s: IN/%s, OUT/%s, ISO-IN/%s, ISO-OUT/%s\n", + (gadget_is_superspeed(c->cdev->gadget) ? "super" : + (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full")), + f->name, ss->in_ep->name, ss->out_ep->name, + ss->iso_in_ep ? ss->iso_in_ep->name : "<none>", + ss->iso_out_ep ? ss->iso_out_ep->name : "<none>"); + return 0; +} + +static void +sourcesink_free_func(struct usb_function *f) +{ + struct f_ss_opts *opts; + + opts = container_of(f->fi, struct f_ss_opts, func_inst); + + mutex_lock(&opts->lock); + opts->refcnt--; + mutex_unlock(&opts->lock); + + usb_free_all_descriptors(f); + kfree(func_to_ss(f)); +} + +/* optionally require specific source/sink data patterns */ +static int check_read_data(struct f_sourcesink *ss, struct usb_request *req) +{ + unsigned i; + u8 *buf = req->buf; + struct usb_composite_dev *cdev = ss->function.config->cdev; + + if (pattern == 2) + return 0; + + for (i = 0; i < req->actual; i++, buf++) { + switch (pattern) { + + /* all-zeroes has no synchronization issues */ + case 0: + if (*buf == 0) + continue; + break; + + /* "mod63" stays in sync with short-terminated transfers, + * OR otherwise when host and gadget agree on how large + * each usb transfer request should be. Resync is done + * with set_interface or set_config. (We *WANT* it to + * get quickly out of sync if controllers or their drivers + * stutter for any reason, including buffer duplication...) + */ + case 1: + if (*buf == (u8)(i % 63)) + continue; + break; + } + ERROR(cdev, "bad OUT byte, buf[%d] = %d\n", i, *buf); + usb_ep_set_halt(ss->out_ep); + return -EINVAL; + } + return 0; +} + +static void reinit_write_data(struct usb_ep *ep, struct usb_request *req) +{ + unsigned i; + u8 *buf = req->buf; + + switch (pattern) { + case 0: + memset(req->buf, 0, req->length); + break; + case 1: + for (i = 0; i < req->length; i++) + *buf++ = (u8) (i % 63); + break; + case 2: + break; + } +} + +static void source_sink_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct usb_composite_dev *cdev; + struct f_sourcesink *ss = ep->driver_data; + int status = req->status; + + /* driver_data will be null if ep has been disabled */ + if (!ss) + return; + + cdev = ss->function.config->cdev; + + switch (status) { + + case 0: /* normal completion? */ + if (ep == ss->out_ep) { + check_read_data(ss, req); + if (pattern != 2) + memset(req->buf, 0x55, req->length); + } + break; + + /* this endpoint is normally active while we're configured */ + case -ECONNABORTED: /* hardware forced ep reset */ + case -ECONNRESET: /* request dequeued */ + case -ESHUTDOWN: /* disconnect from host */ + VDBG(cdev, "%s gone (%d), %d/%d\n", ep->name, status, + req->actual, req->length); + if (ep == ss->out_ep) + check_read_data(ss, req); + free_ep_req(ep, req); + return; + + case -EOVERFLOW: /* buffer overrun on read means that + * we didn't provide a big enough + * buffer. + */ + default: +#if 1 + DBG(cdev, "%s complete --> %d, %d/%d\n", ep->name, + status, req->actual, req->length); +#endif + case -EREMOTEIO: /* short read */ + break; + } + + status = usb_ep_queue(ep, req, GFP_ATOMIC); + if (status) { + ERROR(cdev, "kill %s: resubmit %d bytes --> %d\n", + ep->name, req->length, status); + usb_ep_set_halt(ep); + /* FIXME recover later ... somehow */ + } +} + +static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in, + bool is_iso, int speed) +{ + struct usb_ep *ep; + struct usb_request *req; + int i, size, status; + + for (i = 0; i < 8; i++) { + if (is_iso) { + switch (speed) { + case USB_SPEED_SUPER: + size = isoc_maxpacket * (isoc_mult + 1) * + (isoc_maxburst + 1); + break; + case USB_SPEED_HIGH: + size = isoc_maxpacket * (isoc_mult + 1); + break; + default: + size = isoc_maxpacket > 1023 ? + 1023 : isoc_maxpacket; + break; + } + ep = is_in ? ss->iso_in_ep : ss->iso_out_ep; + req = ss_alloc_ep_req(ep, size); + } else { + ep = is_in ? ss->in_ep : ss->out_ep; + req = ss_alloc_ep_req(ep, 0); + } + + if (!req) + return -ENOMEM; + + req->complete = source_sink_complete; + if (is_in) + reinit_write_data(ep, req); + else if (pattern != 2) + memset(req->buf, 0x55, req->length); + + status = usb_ep_queue(ep, req, GFP_ATOMIC); + if (status) { + struct usb_composite_dev *cdev; + + cdev = ss->function.config->cdev; + ERROR(cdev, "start %s%s %s --> %d\n", + is_iso ? "ISO-" : "", is_in ? "IN" : "OUT", + ep->name, status); + free_ep_req(ep, req); + } + + if (!is_iso) + break; + } + + return status; +} + +static void disable_source_sink(struct f_sourcesink *ss) +{ + struct usb_composite_dev *cdev; + + cdev = ss->function.config->cdev; + disable_endpoints(cdev, ss->in_ep, ss->out_ep, ss->iso_in_ep, + ss->iso_out_ep); + VDBG(cdev, "%s disabled\n", ss->function.name); +} + +static int +enable_source_sink(struct usb_composite_dev *cdev, struct f_sourcesink *ss, + int alt) +{ + int result = 0; + int speed = cdev->gadget->speed; + struct usb_ep *ep; + + /* one bulk endpoint writes (sources) zeroes IN (to the host) */ + ep = ss->in_ep; + result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); + if (result) + return result; + result = usb_ep_enable(ep); + if (result < 0) + return result; + ep->driver_data = ss; + + result = source_sink_start_ep(ss, true, false, speed); + if (result < 0) { +fail: + ep = ss->in_ep; + usb_ep_disable(ep); + ep->driver_data = NULL; + return result; + } + + /* one bulk endpoint reads (sinks) anything OUT (from the host) */ + ep = ss->out_ep; + result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); + if (result) + goto fail; + result = usb_ep_enable(ep); + if (result < 0) + goto fail; + ep->driver_data = ss; + + result = source_sink_start_ep(ss, false, false, speed); + if (result < 0) { +fail2: + ep = ss->out_ep; + usb_ep_disable(ep); + ep->driver_data = NULL; + goto fail; + } + + if (alt == 0) + goto out; + + /* one iso endpoint writes (sources) zeroes IN (to the host) */ + ep = ss->iso_in_ep; + if (ep) { + result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); + if (result) + goto fail2; + result = usb_ep_enable(ep); + if (result < 0) + goto fail2; + ep->driver_data = ss; + + result = source_sink_start_ep(ss, true, true, speed); + if (result < 0) { +fail3: + ep = ss->iso_in_ep; + if (ep) { + usb_ep_disable(ep); + ep->driver_data = NULL; + } + goto fail2; + } + } + + /* one iso endpoint reads (sinks) anything OUT (from the host) */ + ep = ss->iso_out_ep; + if (ep) { + result = config_ep_by_speed(cdev->gadget, &(ss->function), ep); + if (result) + goto fail3; + result = usb_ep_enable(ep); + if (result < 0) + goto fail3; + ep->driver_data = ss; + + result = source_sink_start_ep(ss, false, true, speed); + if (result < 0) { + usb_ep_disable(ep); + ep->driver_data = NULL; + goto fail3; + } + } +out: + ss->cur_alt = alt; + + DBG(cdev, "%s enabled, alt intf %d\n", ss->function.name, alt); + return result; +} + +static int sourcesink_set_alt(struct usb_function *f, + unsigned intf, unsigned alt) +{ + struct f_sourcesink *ss = func_to_ss(f); + struct usb_composite_dev *cdev = f->config->cdev; + + if (ss->in_ep->driver_data) + disable_source_sink(ss); + return enable_source_sink(cdev, ss, alt); +} + +static int sourcesink_get_alt(struct usb_function *f, unsigned intf) +{ + struct f_sourcesink *ss = func_to_ss(f); + + return ss->cur_alt; +} + +static void sourcesink_disable(struct usb_function *f) +{ + struct f_sourcesink *ss = func_to_ss(f); + + disable_source_sink(ss); +} + +/*-------------------------------------------------------------------------*/ + +static int sourcesink_setup(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_configuration *c = f->config; + struct usb_request *req = c->cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + req->length = USB_COMP_EP0_BUFSIZ; + + /* composite driver infrastructure handles everything except + * the two control test requests. + */ + switch (ctrl->bRequest) { + + /* + * These are the same vendor-specific requests supported by + * Intel's USB 2.0 compliance test devices. We exceed that + * device spec by allowing multiple-packet requests. + * + * NOTE: the Control-OUT data stays in req->buf ... better + * would be copying it into a scratch buffer, so that other + * requests may safely intervene. + */ + case 0x5b: /* control WRITE test -- fill the buffer */ + if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR)) + goto unknown; + if (w_value || w_index) + break; + /* just read that many bytes into the buffer */ + if (w_length > req->length) + break; + value = w_length; + break; + case 0x5c: /* control READ test -- return the buffer */ + if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR)) + goto unknown; + if (w_value || w_index) + break; + /* expect those bytes are still in the buffer; send back */ + if (w_length > req->length) + break; + value = w_length; + break; + + default: +unknown: + VDBG(c->cdev, + "unknown control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + VDBG(c->cdev, "source/sink req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = value; + value = usb_ep_queue(c->cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + ERROR(c->cdev, "source/sink response, err %d\n", + value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + +static struct usb_function *source_sink_alloc_func( + struct usb_function_instance *fi) +{ + struct f_sourcesink *ss; + struct f_ss_opts *ss_opts; + + ss = kzalloc(sizeof(*ss), GFP_KERNEL); + if (!ss) + return NULL; + + ss_opts = container_of(fi, struct f_ss_opts, func_inst); + + mutex_lock(&ss_opts->lock); + ss_opts->refcnt++; + mutex_unlock(&ss_opts->lock); + + pattern = ss_opts->pattern; + isoc_interval = ss_opts->isoc_interval; + isoc_maxpacket = ss_opts->isoc_maxpacket; + isoc_mult = ss_opts->isoc_mult; + isoc_maxburst = ss_opts->isoc_maxburst; + buflen = ss_opts->bulk_buflen; + + ss->function.name = "source/sink"; + ss->function.bind = sourcesink_bind; + ss->function.set_alt = sourcesink_set_alt; + ss->function.get_alt = sourcesink_get_alt; + ss->function.disable = sourcesink_disable; + ss->function.setup = sourcesink_setup; + ss->function.strings = sourcesink_strings; + + ss->function.free_func = sourcesink_free_func; + + return &ss->function; +} + +static inline struct f_ss_opts *to_f_ss_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_ss_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_ss_opts); +CONFIGFS_ATTR_OPS(f_ss_opts); + +static void ss_attr_release(struct config_item *item) +{ + struct f_ss_opts *ss_opts = to_f_ss_opts(item); + + usb_put_function_instance(&ss_opts->func_inst); +} + +static struct configfs_item_operations ss_item_ops = { + .release = ss_attr_release, + .show_attribute = f_ss_opts_attr_show, + .store_attribute = f_ss_opts_attr_store, +}; + +static ssize_t f_ss_opts_pattern_show(struct f_ss_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u", opts->pattern); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_pattern_store(struct f_ss_opts *opts, + const char *page, size_t len) +{ + int ret; + u8 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou8(page, 0, &num); + if (ret) + goto end; + + if (num != 0 && num != 1 && num != 2) { + ret = -EINVAL; + goto end; + } + + opts->pattern = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_ss_opts_attribute f_ss_opts_pattern = + __CONFIGFS_ATTR(pattern, S_IRUGO | S_IWUSR, + f_ss_opts_pattern_show, + f_ss_opts_pattern_store); + +static ssize_t f_ss_opts_isoc_interval_show(struct f_ss_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u", opts->isoc_interval); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_isoc_interval_store(struct f_ss_opts *opts, + const char *page, size_t len) +{ + int ret; + u8 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou8(page, 0, &num); + if (ret) + goto end; + + if (num > 16) { + ret = -EINVAL; + goto end; + } + + opts->isoc_interval = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_ss_opts_attribute f_ss_opts_isoc_interval = + __CONFIGFS_ATTR(isoc_interval, S_IRUGO | S_IWUSR, + f_ss_opts_isoc_interval_show, + f_ss_opts_isoc_interval_store); + +static ssize_t f_ss_opts_isoc_maxpacket_show(struct f_ss_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u", opts->isoc_maxpacket); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_isoc_maxpacket_store(struct f_ss_opts *opts, + const char *page, size_t len) +{ + int ret; + u16 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou16(page, 0, &num); + if (ret) + goto end; + + if (num > 1024) { + ret = -EINVAL; + goto end; + } + + opts->isoc_maxpacket = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_ss_opts_attribute f_ss_opts_isoc_maxpacket = + __CONFIGFS_ATTR(isoc_maxpacket, S_IRUGO | S_IWUSR, + f_ss_opts_isoc_maxpacket_show, + f_ss_opts_isoc_maxpacket_store); + +static ssize_t f_ss_opts_isoc_mult_show(struct f_ss_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u", opts->isoc_mult); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_isoc_mult_store(struct f_ss_opts *opts, + const char *page, size_t len) +{ + int ret; + u8 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou8(page, 0, &num); + if (ret) + goto end; + + if (num > 2) { + ret = -EINVAL; + goto end; + } + + opts->isoc_mult = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_ss_opts_attribute f_ss_opts_isoc_mult = + __CONFIGFS_ATTR(isoc_mult, S_IRUGO | S_IWUSR, + f_ss_opts_isoc_mult_show, + f_ss_opts_isoc_mult_store); + +static ssize_t f_ss_opts_isoc_maxburst_show(struct f_ss_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u", opts->isoc_maxburst); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_isoc_maxburst_store(struct f_ss_opts *opts, + const char *page, size_t len) +{ + int ret; + u8 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou8(page, 0, &num); + if (ret) + goto end; + + if (num > 15) { + ret = -EINVAL; + goto end; + } + + opts->isoc_maxburst = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_ss_opts_attribute f_ss_opts_isoc_maxburst = + __CONFIGFS_ATTR(isoc_maxburst, S_IRUGO | S_IWUSR, + f_ss_opts_isoc_maxburst_show, + f_ss_opts_isoc_maxburst_store); + +static ssize_t f_ss_opts_bulk_buflen_show(struct f_ss_opts *opts, char *page) +{ + int result; + + mutex_lock(&opts->lock); + result = sprintf(page, "%u", opts->bulk_buflen); + mutex_unlock(&opts->lock); + + return result; +} + +static ssize_t f_ss_opts_bulk_buflen_store(struct f_ss_opts *opts, + const char *page, size_t len) +{ + int ret; + u32 num; + + mutex_lock(&opts->lock); + if (opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = kstrtou32(page, 0, &num); + if (ret) + goto end; + + opts->bulk_buflen = num; + ret = len; +end: + mutex_unlock(&opts->lock); + return ret; +} + +static struct f_ss_opts_attribute f_ss_opts_bulk_buflen = + __CONFIGFS_ATTR(buflen, S_IRUGO | S_IWUSR, + f_ss_opts_bulk_buflen_show, + f_ss_opts_bulk_buflen_store); + +static struct configfs_attribute *ss_attrs[] = { + &f_ss_opts_pattern.attr, + &f_ss_opts_isoc_interval.attr, + &f_ss_opts_isoc_maxpacket.attr, + &f_ss_opts_isoc_mult.attr, + &f_ss_opts_isoc_maxburst.attr, + &f_ss_opts_bulk_buflen.attr, + NULL, +}; + +static struct config_item_type ss_func_type = { + .ct_item_ops = &ss_item_ops, + .ct_attrs = ss_attrs, + .ct_owner = THIS_MODULE, +}; + +static void source_sink_free_instance(struct usb_function_instance *fi) +{ + struct f_ss_opts *ss_opts; + + ss_opts = container_of(fi, struct f_ss_opts, func_inst); + kfree(ss_opts); +} + +static struct usb_function_instance *source_sink_alloc_inst(void) +{ + struct f_ss_opts *ss_opts; + + ss_opts = kzalloc(sizeof(*ss_opts), GFP_KERNEL); + if (!ss_opts) + return ERR_PTR(-ENOMEM); + mutex_init(&ss_opts->lock); + ss_opts->func_inst.free_func_inst = source_sink_free_instance; + ss_opts->isoc_interval = GZERO_ISOC_INTERVAL; + ss_opts->isoc_maxpacket = GZERO_ISOC_MAXPACKET; + ss_opts->bulk_buflen = GZERO_BULK_BUFLEN; + + config_group_init_type_name(&ss_opts->func_inst.group, "", + &ss_func_type); + + return &ss_opts->func_inst; +} +DECLARE_USB_FUNCTION(SourceSink, source_sink_alloc_inst, + source_sink_alloc_func); + +static int __init sslb_modinit(void) +{ + int ret; + + ret = usb_function_register(&SourceSinkusb_func); + if (ret) + return ret; + ret = lb_modinit(); + if (ret) + usb_function_unregister(&SourceSinkusb_func); + return ret; +} +static void __exit sslb_modexit(void) +{ + usb_function_unregister(&SourceSinkusb_func); + lb_modexit(); +} +module_init(sslb_modinit); +module_exit(sslb_modexit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c new file mode 100644 index 000000000..e3dfa675f --- /dev/null +++ b/drivers/usb/gadget/function/f_subset.c @@ -0,0 +1,518 @@ +/* + * f_subset.c -- "CDC Subset" Ethernet link function driver + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/etherdevice.h> + +#include "u_ether.h" +#include "u_ether_configfs.h" +#include "u_gether.h" + +/* + * This function packages a simple "CDC Subset" Ethernet port with no real + * control mechanisms; just raw data transfer over two bulk endpoints. + * The data transfer model is exactly that of CDC Ethernet, which is + * why we call it the "CDC Subset". + * + * Because it's not standardized, this has some interoperability issues. + * They mostly relate to driver binding, since the data transfer model is + * so simple (CDC Ethernet). The original versions of this protocol used + * specific product/vendor IDs: byteswapped IDs for Digital Equipment's + * SA-1100 "Itsy" board, which could run Linux 2.4 kernels and supported + * daughtercards with USB peripheral connectors. (It was used more often + * with other boards, using the Itsy identifiers.) Linux hosts recognized + * this with CONFIG_USB_ARMLINUX; these devices have only one configuration + * and one interface. + * + * At some point, MCCI defined a (nonconformant) CDC MDLM variant called + * "SAFE", which happens to have a mode which is identical to the "CDC + * Subset" in terms of data transfer and lack of control model. This was + * adopted by later Sharp Zaurus models, and by some other software which + * Linux hosts recognize with CONFIG_USB_NET_ZAURUS. + * + * Because Microsoft's RNDIS drivers are far from robust, we added a few + * descriptors to the CDC Subset code, making this code look like a SAFE + * implementation. This lets you use MCCI's host side MS-Windows drivers + * if you get fed up with RNDIS. It also makes it easier for composite + * drivers to work, since they can use class based binding instead of + * caring about specific product and vendor IDs. + */ + +struct f_gether { + struct gether port; + + char ethaddr[14]; +}; + +static inline struct f_gether *func_to_geth(struct usb_function *f) +{ + return container_of(f, struct f_gether, port.func); +} + +/*-------------------------------------------------------------------------*/ + +/* + * "Simple" CDC-subset option is a simple vendor-neutral model that most + * full speed controllers can handle: one interface, two bulk endpoints. + * To assist host side drivers, we fancy it up a bit, and add descriptors so + * some host side drivers will understand it as a "SAFE" variant. + * + * "SAFE" loosely follows CDC WMC MDLM, violating the spec in various ways. + * Data endpoints live in the control interface, there's no data interface. + * And it's not used to talk to a cell phone radio. + */ + +/* interface descriptor: */ + +static struct usb_interface_descriptor subset_data_intf = { + .bLength = sizeof subset_data_intf, + .bDescriptorType = USB_DT_INTERFACE, + + /* .bInterfaceNumber = DYNAMIC */ + .bAlternateSetting = 0, + .bNumEndpoints = 2, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, + .bInterfaceProtocol = 0, + /* .iInterface = DYNAMIC */ +}; + +static struct usb_cdc_header_desc mdlm_header_desc = { + .bLength = sizeof mdlm_header_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_HEADER_TYPE, + + .bcdCDC = cpu_to_le16(0x0110), +}; + +static struct usb_cdc_mdlm_desc mdlm_desc = { + .bLength = sizeof mdlm_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_MDLM_TYPE, + + .bcdVersion = cpu_to_le16(0x0100), + .bGUID = { + 0x5d, 0x34, 0xcf, 0x66, 0x11, 0x18, 0x11, 0xd6, + 0xa2, 0x1a, 0x00, 0x01, 0x02, 0xca, 0x9a, 0x7f, + }, +}; + +/* since "usb_cdc_mdlm_detail_desc" is a variable length structure, we + * can't really use its struct. All we do here is say that we're using + * the submode of "SAFE" which directly matches the CDC Subset. + */ +static u8 mdlm_detail_desc[] = { + 6, + USB_DT_CS_INTERFACE, + USB_CDC_MDLM_DETAIL_TYPE, + + 0, /* "SAFE" */ + 0, /* network control capabilities (none) */ + 0, /* network data capabilities ("raw" encapsulation) */ +}; + +static struct usb_cdc_ether_desc ether_desc = { + .bLength = sizeof ether_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, + + /* this descriptor actually adds value, surprise! */ + /* .iMACAddress = DYNAMIC */ + .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ + .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), + .wNumberMCFilters = cpu_to_le16(0), + .bNumberPowerFilters = 0, +}; + +/* full speed support: */ + +static struct usb_endpoint_descriptor fs_subset_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_endpoint_descriptor fs_subset_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, +}; + +static struct usb_descriptor_header *fs_eth_function[] = { + (struct usb_descriptor_header *) &subset_data_intf, + (struct usb_descriptor_header *) &mdlm_header_desc, + (struct usb_descriptor_header *) &mdlm_desc, + (struct usb_descriptor_header *) &mdlm_detail_desc, + (struct usb_descriptor_header *) ðer_desc, + (struct usb_descriptor_header *) &fs_subset_in_desc, + (struct usb_descriptor_header *) &fs_subset_out_desc, + NULL, +}; + +/* high speed support: */ + +static struct usb_endpoint_descriptor hs_subset_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_endpoint_descriptor hs_subset_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; + +static struct usb_descriptor_header *hs_eth_function[] = { + (struct usb_descriptor_header *) &subset_data_intf, + (struct usb_descriptor_header *) &mdlm_header_desc, + (struct usb_descriptor_header *) &mdlm_desc, + (struct usb_descriptor_header *) &mdlm_detail_desc, + (struct usb_descriptor_header *) ðer_desc, + (struct usb_descriptor_header *) &hs_subset_in_desc, + (struct usb_descriptor_header *) &hs_subset_out_desc, + NULL, +}; + +/* super speed support: */ + +static struct usb_endpoint_descriptor ss_subset_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_endpoint_descriptor ss_subset_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; + +static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc = { + .bLength = sizeof ss_subset_bulk_comp_desc, + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /* the following 2 values can be tweaked if necessary */ + /* .bMaxBurst = 0, */ + /* .bmAttributes = 0, */ +}; + +static struct usb_descriptor_header *ss_eth_function[] = { + (struct usb_descriptor_header *) &subset_data_intf, + (struct usb_descriptor_header *) &mdlm_header_desc, + (struct usb_descriptor_header *) &mdlm_desc, + (struct usb_descriptor_header *) &mdlm_detail_desc, + (struct usb_descriptor_header *) ðer_desc, + (struct usb_descriptor_header *) &ss_subset_in_desc, + (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc, + (struct usb_descriptor_header *) &ss_subset_out_desc, + (struct usb_descriptor_header *) &ss_subset_bulk_comp_desc, + NULL, +}; + +/* string descriptors: */ + +static struct usb_string geth_string_defs[] = { + [0].s = "CDC Ethernet Subset/SAFE", + [1].s = "", + { } /* end of list */ +}; + +static struct usb_gadget_strings geth_string_table = { + .language = 0x0409, /* en-us */ + .strings = geth_string_defs, +}; + +static struct usb_gadget_strings *geth_strings[] = { + &geth_string_table, + NULL, +}; + +/*-------------------------------------------------------------------------*/ + +static int geth_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_gether *geth = func_to_geth(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct net_device *net; + + /* we know alt == 0, so this is an activation or a reset */ + + if (geth->port.in_ep->driver_data) { + DBG(cdev, "reset cdc subset\n"); + gether_disconnect(&geth->port); + } + + DBG(cdev, "init + activate cdc subset\n"); + if (config_ep_by_speed(cdev->gadget, f, geth->port.in_ep) || + config_ep_by_speed(cdev->gadget, f, geth->port.out_ep)) { + geth->port.in_ep->desc = NULL; + geth->port.out_ep->desc = NULL; + return -EINVAL; + } + + net = gether_connect(&geth->port); + return PTR_ERR_OR_ZERO(net); +} + +static void geth_disable(struct usb_function *f) +{ + struct f_gether *geth = func_to_geth(f); + struct usb_composite_dev *cdev = f->config->cdev; + + DBG(cdev, "net deactivated\n"); + gether_disconnect(&geth->port); +} + +/*-------------------------------------------------------------------------*/ + +/* serial function driver setup/binding */ + +static int +geth_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_gether *geth = func_to_geth(f); + struct usb_string *us; + int status; + struct usb_ep *ep; + + struct f_gether_opts *gether_opts; + + gether_opts = container_of(f->fi, struct f_gether_opts, func_inst); + + /* + * in drivers/usb/gadget/configfs.c:configfs_composite_bind() + * configurations are bound in sequence with list_for_each_entry, + * in each configuration its functions are bound in sequence + * with list_for_each_entry, so we assume no race condition + * with regard to gether_opts->bound access + */ + if (!gether_opts->bound) { + mutex_lock(&gether_opts->lock); + gether_set_gadget(gether_opts->net, cdev->gadget); + status = gether_register_netdev(gether_opts->net); + mutex_unlock(&gether_opts->lock); + if (status) + return status; + gether_opts->bound = true; + } + + us = usb_gstrings_attach(cdev, geth_strings, + ARRAY_SIZE(geth_string_defs)); + if (IS_ERR(us)) + return PTR_ERR(us); + + subset_data_intf.iInterface = us[0].id; + ether_desc.iMACAddress = us[1].id; + + /* allocate instance-specific interface IDs */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + subset_data_intf.bInterfaceNumber = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_in_desc); + if (!ep) + goto fail; + geth->port.in_ep = ep; + ep->driver_data = cdev; /* claim */ + + ep = usb_ep_autoconfig(cdev->gadget, &fs_subset_out_desc); + if (!ep) + goto fail; + geth->port.out_ep = ep; + ep->driver_data = cdev; /* claim */ + + /* support all relevant hardware speeds... we expect that when + * hardware is dual speed, all bulk-capable endpoints work at + * both speeds + */ + hs_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress; + hs_subset_out_desc.bEndpointAddress = + fs_subset_out_desc.bEndpointAddress; + + ss_subset_in_desc.bEndpointAddress = fs_subset_in_desc.bEndpointAddress; + ss_subset_out_desc.bEndpointAddress = + fs_subset_out_desc.bEndpointAddress; + + status = usb_assign_descriptors(f, fs_eth_function, hs_eth_function, + ss_eth_function); + if (status) + goto fail; + + /* NOTE: all that is done without knowing or caring about + * the network link ... which is unavailable to this code + * until we're activated via set_alt(). + */ + + DBG(cdev, "CDC Subset: %s speed IN/%s OUT/%s\n", + gadget_is_superspeed(c->cdev->gadget) ? "super" : + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", + geth->port.in_ep->name, geth->port.out_ep->name); + return 0; + +fail: + /* we might as well release our claims on endpoints */ + if (geth->port.out_ep) + geth->port.out_ep->driver_data = NULL; + if (geth->port.in_ep) + geth->port.in_ep->driver_data = NULL; + + ERROR(cdev, "%s: can't bind, err %d\n", f->name, status); + + return status; +} + +static inline struct f_gether_opts *to_f_gether_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_gether_opts, + func_inst.group); +} + +/* f_gether_item_ops */ +USB_ETHERNET_CONFIGFS_ITEM(gether); + +/* f_gether_opts_dev_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(gether); + +/* f_gether_opts_host_addr */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(gether); + +/* f_gether_opts_qmult */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(gether); + +/* f_gether_opts_ifname */ +USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(gether); + +static struct configfs_attribute *gether_attrs[] = { + &f_gether_opts_dev_addr.attr, + &f_gether_opts_host_addr.attr, + &f_gether_opts_qmult.attr, + &f_gether_opts_ifname.attr, + NULL, +}; + +static struct config_item_type gether_func_type = { + .ct_item_ops = &gether_item_ops, + .ct_attrs = gether_attrs, + .ct_owner = THIS_MODULE, +}; + +static void geth_free_inst(struct usb_function_instance *f) +{ + struct f_gether_opts *opts; + + opts = container_of(f, struct f_gether_opts, func_inst); + if (opts->bound) + gether_cleanup(netdev_priv(opts->net)); + else + free_netdev(opts->net); + kfree(opts); +} + +static struct usb_function_instance *geth_alloc_inst(void) +{ + struct f_gether_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = geth_free_inst; + opts->net = gether_setup_default(); + if (IS_ERR(opts->net)) { + struct net_device *net = opts->net; + kfree(opts); + return ERR_CAST(net); + } + + config_group_init_type_name(&opts->func_inst.group, "", + &gether_func_type); + + return &opts->func_inst; +} + +static void geth_free(struct usb_function *f) +{ + struct f_gether *eth; + + eth = func_to_geth(f); + kfree(eth); +} + +static void geth_unbind(struct usb_configuration *c, struct usb_function *f) +{ + geth_string_defs[0].id = 0; + usb_free_all_descriptors(f); +} + +static struct usb_function *geth_alloc(struct usb_function_instance *fi) +{ + struct f_gether *geth; + struct f_gether_opts *opts; + int status; + + /* allocate and initialize one new instance */ + geth = kzalloc(sizeof(*geth), GFP_KERNEL); + if (!geth) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_gether_opts, func_inst); + + mutex_lock(&opts->lock); + opts->refcnt++; + /* export host's Ethernet address in CDC format */ + status = gether_get_host_addr_cdc(opts->net, geth->ethaddr, + sizeof(geth->ethaddr)); + if (status < 12) { + kfree(geth); + mutex_unlock(&opts->lock); + return ERR_PTR(-EINVAL); + } + geth_string_defs[1].s = geth->ethaddr; + + geth->port.ioport = netdev_priv(opts->net); + mutex_unlock(&opts->lock); + geth->port.cdc_filter = DEFAULT_FILTER; + + geth->port.func.name = "cdc_subset"; + geth->port.func.bind = geth_bind; + geth->port.func.unbind = geth_unbind; + geth->port.func.set_alt = geth_set_alt; + geth->port.func.disable = geth_disable; + geth->port.func.free_func = geth_free; + + return &geth->port.func; +} + +DECLARE_USB_FUNCTION_INIT(geth, geth_alloc_inst, geth_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Brownell"); diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c new file mode 100644 index 000000000..7856b3394 --- /dev/null +++ b/drivers/usb/gadget/function/f_uac1.c @@ -0,0 +1,992 @@ +/* + * f_audio.c -- USB Audio class function driver + * + * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org> + * Copyright (C) 2008 Analog Devices, Inc + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/slab.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/atomic.h> + +#include "u_uac1.h" + +static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value); +static int generic_get_cmd(struct usb_audio_control *con, u8 cmd); + +/* + * DESCRIPTORS ... most are static, but strings and full + * configuration descriptors are built on demand. + */ + +/* + * We have two interfaces- AudioControl and AudioStreaming + * TODO: only supcard playback currently + */ +#define F_AUDIO_AC_INTERFACE 0 +#define F_AUDIO_AS_INTERFACE 1 +#define F_AUDIO_NUM_INTERFACES 1 + +/* B.3.1 Standard AC Interface Descriptor */ +static struct usb_interface_descriptor ac_interface_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, +}; + +/* + * The number of AudioStreaming and MIDIStreaming interfaces + * in the Audio Interface Collection + */ +DECLARE_UAC_AC_HEADER_DESCRIPTOR(1); + +#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES) +/* 1 input terminal, 1 output terminal and 1 feature unit */ +#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH + UAC_DT_INPUT_TERMINAL_SIZE \ + + UAC_DT_OUTPUT_TERMINAL_SIZE + UAC_DT_FEATURE_UNIT_SIZE(0)) +/* B.3.2 Class-Specific AC Interface Descriptor */ +static struct uac1_ac_header_descriptor_1 ac_header_desc = { + .bLength = UAC_DT_AC_HEADER_LENGTH, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_HEADER, + .bcdADC = __constant_cpu_to_le16(0x0100), + .wTotalLength = __constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH), + .bInCollection = F_AUDIO_NUM_INTERFACES, + .baInterfaceNr = { + /* Interface number of the first AudioStream interface */ + [0] = 1, + } +}; + +#define INPUT_TERMINAL_ID 1 +static struct uac_input_terminal_descriptor input_terminal_desc = { + .bLength = UAC_DT_INPUT_TERMINAL_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_INPUT_TERMINAL, + .bTerminalID = INPUT_TERMINAL_ID, + .wTerminalType = UAC_TERMINAL_STREAMING, + .bAssocTerminal = 0, + .wChannelConfig = 0x3, +}; + +DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0); + +#define FEATURE_UNIT_ID 2 +static struct uac_feature_unit_descriptor_0 feature_unit_desc = { + .bLength = UAC_DT_FEATURE_UNIT_SIZE(0), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FEATURE_UNIT, + .bUnitID = FEATURE_UNIT_ID, + .bSourceID = INPUT_TERMINAL_ID, + .bControlSize = 2, + .bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME), +}; + +static struct usb_audio_control mute_control = { + .list = LIST_HEAD_INIT(mute_control.list), + .name = "Mute Control", + .type = UAC_FU_MUTE, + /* Todo: add real Mute control code */ + .set = generic_set_cmd, + .get = generic_get_cmd, +}; + +static struct usb_audio_control volume_control = { + .list = LIST_HEAD_INIT(volume_control.list), + .name = "Volume Control", + .type = UAC_FU_VOLUME, + /* Todo: add real Volume control code */ + .set = generic_set_cmd, + .get = generic_get_cmd, +}; + +static struct usb_audio_control_selector feature_unit = { + .list = LIST_HEAD_INIT(feature_unit.list), + .id = FEATURE_UNIT_ID, + .name = "Mute & Volume Control", + .type = UAC_FEATURE_UNIT, + .desc = (struct usb_descriptor_header *)&feature_unit_desc, +}; + +#define OUTPUT_TERMINAL_ID 3 +static struct uac1_output_terminal_descriptor output_terminal_desc = { + .bLength = UAC_DT_OUTPUT_TERMINAL_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, + .bTerminalID = OUTPUT_TERMINAL_ID, + .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER, + .bAssocTerminal = FEATURE_UNIT_ID, + .bSourceID = FEATURE_UNIT_ID, +}; + +/* B.4.1 Standard AS Interface Descriptor */ +static struct usb_interface_descriptor as_interface_alt_0_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, +}; + +static struct usb_interface_descriptor as_interface_alt_1_desc = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bAlternateSetting = 1, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, +}; + +/* B.4.2 Class-Specific AS Interface Descriptor */ +static struct uac1_as_header_descriptor as_header_desc = { + .bLength = UAC_DT_AS_HEADER_SIZE, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_AS_GENERAL, + .bTerminalLink = INPUT_TERMINAL_ID, + .bDelay = 1, + .wFormatTag = UAC_FORMAT_TYPE_I_PCM, +}; + +DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1); + +static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = { + .bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1), + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FORMAT_TYPE, + .bFormatType = UAC_FORMAT_TYPE_I, + .bSubframeSize = 2, + .bBitResolution = 16, + .bSamFreqType = 1, +}; + +/* Standard ISO OUT Endpoint Descriptor */ +static struct usb_endpoint_descriptor as_out_ep_desc = { + .bLength = USB_DT_ENDPOINT_AUDIO_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE + | USB_ENDPOINT_XFER_ISOC, + .wMaxPacketSize = cpu_to_le16(UAC1_OUT_EP_MAX_PACKET_SIZE), + .bInterval = 4, +}; + +/* Class-specific AS ISO OUT Endpoint Descriptor */ +static struct uac_iso_endpoint_descriptor as_iso_out_desc = { + .bLength = UAC_ISO_ENDPOINT_DESC_SIZE, + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubtype = UAC_EP_GENERAL, + .bmAttributes = 1, + .bLockDelayUnits = 1, + .wLockDelay = __constant_cpu_to_le16(1), +}; + +static struct usb_descriptor_header *f_audio_desc[] = { + (struct usb_descriptor_header *)&ac_interface_desc, + (struct usb_descriptor_header *)&ac_header_desc, + + (struct usb_descriptor_header *)&input_terminal_desc, + (struct usb_descriptor_header *)&output_terminal_desc, + (struct usb_descriptor_header *)&feature_unit_desc, + + (struct usb_descriptor_header *)&as_interface_alt_0_desc, + (struct usb_descriptor_header *)&as_interface_alt_1_desc, + (struct usb_descriptor_header *)&as_header_desc, + + (struct usb_descriptor_header *)&as_type_i_desc, + + (struct usb_descriptor_header *)&as_out_ep_desc, + (struct usb_descriptor_header *)&as_iso_out_desc, + NULL, +}; + +enum { + STR_AC_IF, + STR_INPUT_TERMINAL, + STR_INPUT_TERMINAL_CH_NAMES, + STR_FEAT_DESC_0, + STR_OUTPUT_TERMINAL, + STR_AS_IF_ALT0, + STR_AS_IF_ALT1, +}; + +static struct usb_string strings_uac1[] = { + [STR_AC_IF].s = "AC Interface", + [STR_INPUT_TERMINAL].s = "Input terminal", + [STR_INPUT_TERMINAL_CH_NAMES].s = "Channels", + [STR_FEAT_DESC_0].s = "Volume control & mute", + [STR_OUTPUT_TERMINAL].s = "Output terminal", + [STR_AS_IF_ALT0].s = "AS Interface", + [STR_AS_IF_ALT1].s = "AS Interface", + { }, +}; + +static struct usb_gadget_strings str_uac1 = { + .language = 0x0409, /* en-us */ + .strings = strings_uac1, +}; + +static struct usb_gadget_strings *uac1_strings[] = { + &str_uac1, + NULL, +}; + +/* + * This function is an ALSA sound card following USB Audio Class Spec 1.0. + */ + +/*-------------------------------------------------------------------------*/ +struct f_audio_buf { + u8 *buf; + int actual; + struct list_head list; +}; + +static struct f_audio_buf *f_audio_buffer_alloc(int buf_size) +{ + struct f_audio_buf *copy_buf; + + copy_buf = kzalloc(sizeof *copy_buf, GFP_ATOMIC); + if (!copy_buf) + return ERR_PTR(-ENOMEM); + + copy_buf->buf = kzalloc(buf_size, GFP_ATOMIC); + if (!copy_buf->buf) { + kfree(copy_buf); + return ERR_PTR(-ENOMEM); + } + + return copy_buf; +} + +static void f_audio_buffer_free(struct f_audio_buf *audio_buf) +{ + kfree(audio_buf->buf); + kfree(audio_buf); +} +/*-------------------------------------------------------------------------*/ + +struct f_audio { + struct gaudio card; + + /* endpoints handle full and/or high speeds */ + struct usb_ep *out_ep; + + spinlock_t lock; + struct f_audio_buf *copy_buf; + struct work_struct playback_work; + struct list_head play_queue; + + /* Control Set command */ + struct list_head cs; + u8 set_cmd; + struct usb_audio_control *set_con; +}; + +static inline struct f_audio *func_to_audio(struct usb_function *f) +{ + return container_of(f, struct f_audio, card.func); +} + +/*-------------------------------------------------------------------------*/ + +static void f_audio_playback_work(struct work_struct *data) +{ + struct f_audio *audio = container_of(data, struct f_audio, + playback_work); + struct f_audio_buf *play_buf; + + spin_lock_irq(&audio->lock); + if (list_empty(&audio->play_queue)) { + spin_unlock_irq(&audio->lock); + return; + } + play_buf = list_first_entry(&audio->play_queue, + struct f_audio_buf, list); + list_del(&play_buf->list); + spin_unlock_irq(&audio->lock); + + u_audio_playback(&audio->card, play_buf->buf, play_buf->actual); + f_audio_buffer_free(play_buf); +} + +static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_audio *audio = req->context; + struct usb_composite_dev *cdev = audio->card.func.config->cdev; + struct f_audio_buf *copy_buf = audio->copy_buf; + struct f_uac1_opts *opts; + int audio_buf_size; + int err; + + opts = container_of(audio->card.func.fi, struct f_uac1_opts, + func_inst); + audio_buf_size = opts->audio_buf_size; + + if (!copy_buf) + return -EINVAL; + + /* Copy buffer is full, add it to the play_queue */ + if (audio_buf_size - copy_buf->actual < req->actual) { + list_add_tail(©_buf->list, &audio->play_queue); + schedule_work(&audio->playback_work); + copy_buf = f_audio_buffer_alloc(audio_buf_size); + if (IS_ERR(copy_buf)) + return -ENOMEM; + } + + memcpy(copy_buf->buf + copy_buf->actual, req->buf, req->actual); + copy_buf->actual += req->actual; + audio->copy_buf = copy_buf; + + err = usb_ep_queue(ep, req, GFP_ATOMIC); + if (err) + ERROR(cdev, "%s queue req: %d\n", ep->name, err); + + return 0; + +} + +static void f_audio_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct f_audio *audio = req->context; + int status = req->status; + u32 data = 0; + struct usb_ep *out_ep = audio->out_ep; + + switch (status) { + + case 0: /* normal completion? */ + if (ep == out_ep) + f_audio_out_ep_complete(ep, req); + else if (audio->set_con) { + memcpy(&data, req->buf, req->length); + audio->set_con->set(audio->set_con, audio->set_cmd, + le16_to_cpu(data)); + audio->set_con = NULL; + } + break; + default: + break; + } +} + +static int audio_set_intf_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct f_audio *audio = func_to_audio(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + u8 con_sel = (w_value >> 8) & 0xFF; + u8 cmd = (ctrl->bRequest & 0x0F); + struct usb_audio_control_selector *cs; + struct usb_audio_control *con; + + DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n", + ctrl->bRequest, w_value, len, id); + + list_for_each_entry(cs, &audio->cs, list) { + if (cs->id == id) { + list_for_each_entry(con, &cs->control, list) { + if (con->type == con_sel) { + audio->set_con = con; + break; + } + } + break; + } + } + + audio->set_cmd = cmd; + req->context = audio; + req->complete = f_audio_complete; + + return len; +} + +static int audio_get_intf_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct f_audio *audio = func_to_audio(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u8 id = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + u8 con_sel = (w_value >> 8) & 0xFF; + u8 cmd = (ctrl->bRequest & 0x0F); + struct usb_audio_control_selector *cs; + struct usb_audio_control *con; + + DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, entity %d\n", + ctrl->bRequest, w_value, len, id); + + list_for_each_entry(cs, &audio->cs, list) { + if (cs->id == id) { + list_for_each_entry(con, &cs->control, list) { + if (con->type == con_sel && con->get) { + value = con->get(con, cmd); + break; + } + } + break; + } + } + + req->context = audio; + req->complete = f_audio_complete; + len = min_t(size_t, sizeof(value), len); + memcpy(req->buf, &value, len); + + return len; +} + +static int audio_set_endpoint_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + int value = -EOPNOTSUPP; + u16 ep = le16_to_cpu(ctrl->wIndex); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + + DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", + ctrl->bRequest, w_value, len, ep); + + switch (ctrl->bRequest) { + case UAC_SET_CUR: + value = len; + break; + + case UAC_SET_MIN: + break; + + case UAC_SET_MAX: + break; + + case UAC_SET_RES: + break; + + case UAC_SET_MEM: + break; + + default: + break; + } + + return value; +} + +static int audio_get_endpoint_req(struct usb_function *f, + const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + int value = -EOPNOTSUPP; + u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF); + u16 len = le16_to_cpu(ctrl->wLength); + u16 w_value = le16_to_cpu(ctrl->wValue); + + DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n", + ctrl->bRequest, w_value, len, ep); + + switch (ctrl->bRequest) { + case UAC_GET_CUR: + case UAC_GET_MIN: + case UAC_GET_MAX: + case UAC_GET_RES: + value = len; + break; + case UAC_GET_MEM: + break; + default: + break; + } + + return value; +} + +static int +f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_request *req = cdev->req; + int value = -EOPNOTSUPP; + u16 w_index = le16_to_cpu(ctrl->wIndex); + u16 w_value = le16_to_cpu(ctrl->wValue); + u16 w_length = le16_to_cpu(ctrl->wLength); + + /* composite driver infrastructure handles everything; interface + * activation uses set_alt(). + */ + switch (ctrl->bRequestType) { + case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE: + value = audio_set_intf_req(f, ctrl); + break; + + case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE: + value = audio_get_intf_req(f, ctrl); + break; + + case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: + value = audio_set_endpoint_req(f, ctrl); + break; + + case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT: + value = audio_get_endpoint_req(f, ctrl); + break; + + default: + ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + } + + /* respond with data transfer or status phase? */ + if (value >= 0) { + DBG(cdev, "audio req%02x.%02x v%04x i%04x l%d\n", + ctrl->bRequestType, ctrl->bRequest, + w_value, w_index, w_length); + req->zero = 0; + req->length = value; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) + ERROR(cdev, "audio response on err %d\n", value); + } + + /* device either stalls (value < 0) or reports success */ + return value; +} + +static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt) +{ + struct f_audio *audio = func_to_audio(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct usb_ep *out_ep = audio->out_ep; + struct usb_request *req; + struct f_uac1_opts *opts; + int req_buf_size, req_count, audio_buf_size; + int i = 0, err = 0; + + DBG(cdev, "intf %d, alt %d\n", intf, alt); + + opts = container_of(f->fi, struct f_uac1_opts, func_inst); + req_buf_size = opts->req_buf_size; + req_count = opts->req_count; + audio_buf_size = opts->audio_buf_size; + + if (intf == 1) { + if (alt == 1) { + err = config_ep_by_speed(cdev->gadget, f, out_ep); + if (err) + return err; + + usb_ep_enable(out_ep); + out_ep->driver_data = audio; + audio->copy_buf = f_audio_buffer_alloc(audio_buf_size); + if (IS_ERR(audio->copy_buf)) + return -ENOMEM; + + /* + * allocate a bunch of read buffers + * and queue them all at once. + */ + for (i = 0; i < req_count && err == 0; i++) { + req = usb_ep_alloc_request(out_ep, GFP_ATOMIC); + if (req) { + req->buf = kzalloc(req_buf_size, + GFP_ATOMIC); + if (req->buf) { + req->length = req_buf_size; + req->context = audio; + req->complete = + f_audio_complete; + err = usb_ep_queue(out_ep, + req, GFP_ATOMIC); + if (err) + ERROR(cdev, + "%s queue req: %d\n", + out_ep->name, err); + } else + err = -ENOMEM; + } else + err = -ENOMEM; + } + + } else { + struct f_audio_buf *copy_buf = audio->copy_buf; + if (copy_buf) { + list_add_tail(©_buf->list, + &audio->play_queue); + schedule_work(&audio->playback_work); + } + } + } + + return err; +} + +static void f_audio_disable(struct usb_function *f) +{ + return; +} + +/*-------------------------------------------------------------------------*/ + +static void f_audio_build_desc(struct f_audio *audio) +{ + struct gaudio *card = &audio->card; + u8 *sam_freq; + int rate; + + /* Set channel numbers */ + input_terminal_desc.bNrChannels = u_audio_get_playback_channels(card); + as_type_i_desc.bNrChannels = u_audio_get_playback_channels(card); + + /* Set sample rates */ + rate = u_audio_get_playback_rate(card); + sam_freq = as_type_i_desc.tSamFreq[0]; + memcpy(sam_freq, &rate, 3); + + /* Todo: Set Sample bits and other parameters */ + + return; +} + +/* audio function driver setup/binding */ +static int +f_audio_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct f_audio *audio = func_to_audio(f); + struct usb_string *us; + int status; + struct usb_ep *ep = NULL; + struct f_uac1_opts *audio_opts; + + audio_opts = container_of(f->fi, struct f_uac1_opts, func_inst); + audio->card.gadget = c->cdev->gadget; + /* set up ASLA audio devices */ + if (!audio_opts->bound) { + status = gaudio_setup(&audio->card); + if (status < 0) + return status; + audio_opts->bound = true; + } + us = usb_gstrings_attach(cdev, uac1_strings, ARRAY_SIZE(strings_uac1)); + if (IS_ERR(us)) + return PTR_ERR(us); + ac_interface_desc.iInterface = us[STR_AC_IF].id; + input_terminal_desc.iTerminal = us[STR_INPUT_TERMINAL].id; + input_terminal_desc.iChannelNames = us[STR_INPUT_TERMINAL_CH_NAMES].id; + feature_unit_desc.iFeature = us[STR_FEAT_DESC_0].id; + output_terminal_desc.iTerminal = us[STR_OUTPUT_TERMINAL].id; + as_interface_alt_0_desc.iInterface = us[STR_AS_IF_ALT0].id; + as_interface_alt_1_desc.iInterface = us[STR_AS_IF_ALT1].id; + + + f_audio_build_desc(audio); + + /* allocate instance-specific interface IDs, and patch descriptors */ + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + ac_interface_desc.bInterfaceNumber = status; + + status = usb_interface_id(c, f); + if (status < 0) + goto fail; + as_interface_alt_0_desc.bInterfaceNumber = status; + as_interface_alt_1_desc.bInterfaceNumber = status; + + status = -ENODEV; + + /* allocate instance-specific endpoints */ + ep = usb_ep_autoconfig(cdev->gadget, &as_out_ep_desc); + if (!ep) + goto fail; + audio->out_ep = ep; + audio->out_ep->desc = &as_out_ep_desc; + ep->driver_data = cdev; /* claim */ + + status = -ENOMEM; + + /* copy descriptors, and track endpoint copies */ + status = usb_assign_descriptors(f, f_audio_desc, f_audio_desc, NULL); + if (status) + goto fail; + return 0; + +fail: + gaudio_cleanup(&audio->card); + if (ep) + ep->driver_data = NULL; + return status; +} + +/*-------------------------------------------------------------------------*/ + +static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value) +{ + con->data[cmd] = value; + + return 0; +} + +static int generic_get_cmd(struct usb_audio_control *con, u8 cmd) +{ + return con->data[cmd]; +} + +/* Todo: add more control selecotor dynamically */ +static int control_selector_init(struct f_audio *audio) +{ + INIT_LIST_HEAD(&audio->cs); + list_add(&feature_unit.list, &audio->cs); + + INIT_LIST_HEAD(&feature_unit.control); + list_add(&mute_control.list, &feature_unit.control); + list_add(&volume_control.list, &feature_unit.control); + + volume_control.data[UAC__CUR] = 0xffc0; + volume_control.data[UAC__MIN] = 0xe3a0; + volume_control.data[UAC__MAX] = 0xfff0; + volume_control.data[UAC__RES] = 0x0030; + + return 0; +} + +static inline struct f_uac1_opts *to_f_uac1_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_uac1_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_uac1_opts); +CONFIGFS_ATTR_OPS(f_uac1_opts); + +static void f_uac1_attr_release(struct config_item *item) +{ + struct f_uac1_opts *opts = to_f_uac1_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations f_uac1_item_ops = { + .release = f_uac1_attr_release, + .show_attribute = f_uac1_opts_attr_show, + .store_attribute = f_uac1_opts_attr_store, +}; + +#define UAC1_INT_ATTRIBUTE(name) \ +static ssize_t f_uac1_opts_##name##_show(struct f_uac1_opts *opts, \ + char *page) \ +{ \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", opts->name); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t f_uac1_opts_##name##_store(struct f_uac1_opts *opts, \ + const char *page, size_t len) \ +{ \ + int ret; \ + u32 num; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou32(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + opts->name = num; \ + ret = len; \ + \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +static struct f_uac1_opts_attribute f_uac1_opts_##name = \ + __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \ + f_uac1_opts_##name##_show, \ + f_uac1_opts_##name##_store) + +UAC1_INT_ATTRIBUTE(req_buf_size); +UAC1_INT_ATTRIBUTE(req_count); +UAC1_INT_ATTRIBUTE(audio_buf_size); + +#define UAC1_STR_ATTRIBUTE(name) \ +static ssize_t f_uac1_opts_##name##_show(struct f_uac1_opts *opts, \ + char *page) \ +{ \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%s\n", opts->name); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t f_uac1_opts_##name##_store(struct f_uac1_opts *opts, \ + const char *page, size_t len) \ +{ \ + int ret = -EBUSY; \ + char *tmp; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) \ + goto end; \ + \ + tmp = kstrndup(page, len, GFP_KERNEL); \ + if (tmp) { \ + ret = -ENOMEM; \ + goto end; \ + } \ + if (opts->name##_alloc) \ + kfree(opts->name); \ + opts->name##_alloc = true; \ + opts->name = tmp; \ + ret = len; \ + \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +static struct f_uac1_opts_attribute f_uac1_opts_##name = \ + __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \ + f_uac1_opts_##name##_show, \ + f_uac1_opts_##name##_store) + +UAC1_STR_ATTRIBUTE(fn_play); +UAC1_STR_ATTRIBUTE(fn_cap); +UAC1_STR_ATTRIBUTE(fn_cntl); + +static struct configfs_attribute *f_uac1_attrs[] = { + &f_uac1_opts_req_buf_size.attr, + &f_uac1_opts_req_count.attr, + &f_uac1_opts_audio_buf_size.attr, + &f_uac1_opts_fn_play.attr, + &f_uac1_opts_fn_cap.attr, + &f_uac1_opts_fn_cntl.attr, + NULL, +}; + +static struct config_item_type f_uac1_func_type = { + .ct_item_ops = &f_uac1_item_ops, + .ct_attrs = f_uac1_attrs, + .ct_owner = THIS_MODULE, +}; + +static void f_audio_free_inst(struct usb_function_instance *f) +{ + struct f_uac1_opts *opts; + + opts = container_of(f, struct f_uac1_opts, func_inst); + if (opts->fn_play_alloc) + kfree(opts->fn_play); + if (opts->fn_cap_alloc) + kfree(opts->fn_cap); + if (opts->fn_cntl_alloc) + kfree(opts->fn_cntl); + kfree(opts); +} + +static struct usb_function_instance *f_audio_alloc_inst(void) +{ + struct f_uac1_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = f_audio_free_inst; + + config_group_init_type_name(&opts->func_inst.group, "", + &f_uac1_func_type); + + opts->req_buf_size = UAC1_OUT_EP_MAX_PACKET_SIZE; + opts->req_count = UAC1_REQ_COUNT; + opts->audio_buf_size = UAC1_AUDIO_BUF_SIZE; + opts->fn_play = FILE_PCM_PLAYBACK; + opts->fn_cap = FILE_PCM_CAPTURE; + opts->fn_cntl = FILE_CONTROL; + return &opts->func_inst; +} + +static void f_audio_free(struct usb_function *f) +{ + struct f_audio *audio = func_to_audio(f); + struct f_uac1_opts *opts; + + gaudio_cleanup(&audio->card); + opts = container_of(f->fi, struct f_uac1_opts, func_inst); + kfree(audio); + mutex_lock(&opts->lock); + --opts->refcnt; + mutex_unlock(&opts->lock); +} + +static void f_audio_unbind(struct usb_configuration *c, struct usb_function *f) +{ + usb_free_all_descriptors(f); +} + +static struct usb_function *f_audio_alloc(struct usb_function_instance *fi) +{ + struct f_audio *audio; + struct f_uac1_opts *opts; + + /* allocate and initialize one new instance */ + audio = kzalloc(sizeof(*audio), GFP_KERNEL); + if (!audio) + return ERR_PTR(-ENOMEM); + + audio->card.func.name = "g_audio"; + + opts = container_of(fi, struct f_uac1_opts, func_inst); + mutex_lock(&opts->lock); + ++opts->refcnt; + mutex_unlock(&opts->lock); + INIT_LIST_HEAD(&audio->play_queue); + spin_lock_init(&audio->lock); + + audio->card.func.bind = f_audio_bind; + audio->card.func.unbind = f_audio_unbind; + audio->card.func.set_alt = f_audio_set_alt; + audio->card.func.setup = f_audio_setup; + audio->card.func.disable = f_audio_disable; + audio->card.func.free_func = f_audio_free; + + control_selector_init(audio); + + INIT_WORK(&audio->playback_work, f_audio_playback_work); + + return &audio->card.func; +} + +DECLARE_USB_FUNCTION_INIT(uac1, f_audio_alloc_inst, f_audio_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Bryan Wu"); diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c new file mode 100644 index 000000000..6d3eb8b00 --- /dev/null +++ b/drivers/usb/gadget/function/f_uac2.c @@ -0,0 +1,1595 @@ +/* + * f_uac2.c -- USB Audio Class 2.0 Function + * + * Copyright (C) 2011 + * Yadwinder Singh (yadi.brar01@gmail.com) + * Jaswinder Singh (jaswinder.singh@linaro.org) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/usb/audio.h> +#include <linux/usb/audio-v2.h> +#include <linux/platform_device.h> +#include <linux/module.h> + +#include <sound/core.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> + +#include "u_uac2.h" + +/* Keep everyone on toes */ +#define USB_XFERS 2 + +/* + * The driver implements a simple UAC_2 topology. + * USB-OUT -> IT_1 -> OT_3 -> ALSA_Capture + * ALSA_Playback -> IT_2 -> OT_4 -> USB-IN + * Capture and Playback sampling rates are independently + * controlled by two clock sources : + * CLK_5 := c_srate, and CLK_6 := p_srate + */ +#define USB_OUT_IT_ID 1 +#define IO_IN_IT_ID 2 +#define IO_OUT_OT_ID 3 +#define USB_IN_OT_ID 4 +#define USB_OUT_CLK_ID 5 +#define USB_IN_CLK_ID 6 + +#define CONTROL_ABSENT 0 +#define CONTROL_RDONLY 1 +#define CONTROL_RDWR 3 + +#define CLK_FREQ_CTRL 0 +#define CLK_VLD_CTRL 2 + +#define COPY_CTRL 0 +#define CONN_CTRL 2 +#define OVRLD_CTRL 4 +#define CLSTR_CTRL 6 +#define UNFLW_CTRL 8 +#define OVFLW_CTRL 10 + +static const char *uac2_name = "snd_uac2"; + +struct uac2_req { + struct uac2_rtd_params *pp; /* parent param */ + struct usb_request *req; +}; + +struct uac2_rtd_params { + struct snd_uac2_chip *uac2; /* parent chip */ + bool ep_enabled; /* if the ep is enabled */ + /* Size of the ring buffer */ + size_t dma_bytes; + unsigned char *dma_area; + + struct snd_pcm_substream *ss; + + /* Ring buffer */ + ssize_t hw_ptr; + + void *rbuf; + + size_t period_size; + + unsigned max_psize; + struct uac2_req ureq[USB_XFERS]; + + spinlock_t lock; +}; + +struct snd_uac2_chip { + struct platform_device pdev; + struct platform_driver pdrv; + + struct uac2_rtd_params p_prm; + struct uac2_rtd_params c_prm; + + struct snd_card *card; + struct snd_pcm *pcm; + + /* timekeeping for the playback endpoint */ + unsigned int p_interval; + unsigned int p_residue; + + /* pre-calculated values for playback iso completion */ + unsigned int p_pktsize; + unsigned int p_pktsize_residue; + unsigned int p_framesize; +}; + +#define BUFF_SIZE_MAX (PAGE_SIZE * 16) +#define PRD_SIZE_MAX PAGE_SIZE +#define MIN_PERIODS 4 + +static struct snd_pcm_hardware uac2_pcm_hardware = { + .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER + | SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID + | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME, + .rates = SNDRV_PCM_RATE_CONTINUOUS, + .periods_max = BUFF_SIZE_MAX / PRD_SIZE_MAX, + .buffer_bytes_max = BUFF_SIZE_MAX, + .period_bytes_max = PRD_SIZE_MAX, + .periods_min = MIN_PERIODS, +}; + +struct audio_dev { + u8 ac_intf, ac_alt; + u8 as_out_intf, as_out_alt; + u8 as_in_intf, as_in_alt; + + struct usb_ep *in_ep, *out_ep; + struct usb_function func; + + /* The ALSA Sound Card it represents on the USB-Client side */ + struct snd_uac2_chip uac2; +}; + +static inline +struct audio_dev *func_to_agdev(struct usb_function *f) +{ + return container_of(f, struct audio_dev, func); +} + +static inline +struct audio_dev *uac2_to_agdev(struct snd_uac2_chip *u) +{ + return container_of(u, struct audio_dev, uac2); +} + +static inline +struct snd_uac2_chip *pdev_to_uac2(struct platform_device *p) +{ + return container_of(p, struct snd_uac2_chip, pdev); +} + +static inline +struct f_uac2_opts *agdev_to_uac2_opts(struct audio_dev *agdev) +{ + return container_of(agdev->func.fi, struct f_uac2_opts, func_inst); +} + +static inline +uint num_channels(uint chanmask) +{ + uint num = 0; + + while (chanmask) { + num += (chanmask & 1); + chanmask >>= 1; + } + + return num; +} + +static void +agdev_iso_complete(struct usb_ep *ep, struct usb_request *req) +{ + unsigned pending; + unsigned long flags; + unsigned int hw_ptr; + bool update_alsa = false; + int status = req->status; + struct uac2_req *ur = req->context; + struct snd_pcm_substream *substream; + struct uac2_rtd_params *prm = ur->pp; + struct snd_uac2_chip *uac2 = prm->uac2; + + /* i/f shutting down */ + if (!prm->ep_enabled || req->status == -ESHUTDOWN) + return; + + /* + * We can't really do much about bad xfers. + * Afterall, the ISOCH xfers could fail legitimately. + */ + if (status) + pr_debug("%s: iso_complete status(%d) %d/%d\n", + __func__, status, req->actual, req->length); + + substream = prm->ss; + + /* Do nothing if ALSA isn't active */ + if (!substream) + goto exit; + + spin_lock_irqsave(&prm->lock, flags); + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + /* + * For each IN packet, take the quotient of the current data + * rate and the endpoint's interval as the base packet size. + * If there is a residue from this division, add it to the + * residue accumulator. + */ + req->length = uac2->p_pktsize; + uac2->p_residue += uac2->p_pktsize_residue; + + /* + * Whenever there are more bytes in the accumulator than we + * need to add one more sample frame, increase this packet's + * size and decrease the accumulator. + */ + if (uac2->p_residue / uac2->p_interval >= uac2->p_framesize) { + req->length += uac2->p_framesize; + uac2->p_residue -= uac2->p_framesize * + uac2->p_interval; + } + + req->actual = req->length; + } + + pending = prm->hw_ptr % prm->period_size; + pending += req->actual; + if (pending >= prm->period_size) + update_alsa = true; + + hw_ptr = prm->hw_ptr; + prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes; + + spin_unlock_irqrestore(&prm->lock, flags); + + /* Pack USB load in ALSA ring buffer */ + pending = prm->dma_bytes - hw_ptr; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + if (unlikely(pending < req->actual)) { + memcpy(req->buf, prm->dma_area + hw_ptr, pending); + memcpy(req->buf + pending, prm->dma_area, + req->actual - pending); + } else { + memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); + } + } else { + if (unlikely(pending < req->actual)) { + memcpy(prm->dma_area + hw_ptr, req->buf, pending); + memcpy(prm->dma_area, req->buf + pending, + req->actual - pending); + } else { + memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); + } + } + +exit: + if (usb_ep_queue(ep, req, GFP_ATOMIC)) + dev_err(&uac2->pdev.dev, "%d Error!\n", __LINE__); + + if (update_alsa) + snd_pcm_period_elapsed(substream); + + return; +} + +static int +uac2_pcm_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream); + struct uac2_rtd_params *prm; + unsigned long flags; + int err = 0; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + prm = &uac2->p_prm; + else + prm = &uac2->c_prm; + + spin_lock_irqsave(&prm->lock, flags); + + /* Reset */ + prm->hw_ptr = 0; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + prm->ss = substream; + break; + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + prm->ss = NULL; + break; + default: + err = -EINVAL; + } + + spin_unlock_irqrestore(&prm->lock, flags); + + /* Clear buffer after Play stops */ + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss) + memset(prm->rbuf, 0, prm->max_psize * USB_XFERS); + + return err; +} + +static snd_pcm_uframes_t uac2_pcm_pointer(struct snd_pcm_substream *substream) +{ + struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream); + struct uac2_rtd_params *prm; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + prm = &uac2->p_prm; + else + prm = &uac2->c_prm; + + return bytes_to_frames(substream->runtime, prm->hw_ptr); +} + +static int uac2_pcm_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *hw_params) +{ + struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream); + struct uac2_rtd_params *prm; + int err; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + prm = &uac2->p_prm; + else + prm = &uac2->c_prm; + + err = snd_pcm_lib_malloc_pages(substream, + params_buffer_bytes(hw_params)); + if (err >= 0) { + prm->dma_bytes = substream->runtime->dma_bytes; + prm->dma_area = substream->runtime->dma_area; + prm->period_size = params_period_bytes(hw_params); + } + + return err; +} + +static int uac2_pcm_hw_free(struct snd_pcm_substream *substream) +{ + struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream); + struct uac2_rtd_params *prm; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) + prm = &uac2->p_prm; + else + prm = &uac2->c_prm; + + prm->dma_area = NULL; + prm->dma_bytes = 0; + prm->period_size = 0; + + return snd_pcm_lib_free_pages(substream); +} + +static int uac2_pcm_open(struct snd_pcm_substream *substream) +{ + struct snd_uac2_chip *uac2 = snd_pcm_substream_chip(substream); + struct snd_pcm_runtime *runtime = substream->runtime; + struct audio_dev *audio_dev; + struct f_uac2_opts *opts; + int p_ssize, c_ssize; + int p_srate, c_srate; + int p_chmask, c_chmask; + + audio_dev = uac2_to_agdev(uac2); + opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst); + p_ssize = opts->p_ssize; + c_ssize = opts->c_ssize; + p_srate = opts->p_srate; + c_srate = opts->c_srate; + p_chmask = opts->p_chmask; + c_chmask = opts->c_chmask; + uac2->p_residue = 0; + + runtime->hw = uac2_pcm_hardware; + + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { + spin_lock_init(&uac2->p_prm.lock); + runtime->hw.rate_min = p_srate; + switch (p_ssize) { + case 3: + runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE; + break; + case 4: + runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE; + break; + default: + runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; + break; + } + runtime->hw.channels_min = num_channels(p_chmask); + runtime->hw.period_bytes_min = 2 * uac2->p_prm.max_psize + / runtime->hw.periods_min; + } else { + spin_lock_init(&uac2->c_prm.lock); + runtime->hw.rate_min = c_srate; + switch (c_ssize) { + case 3: + runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE; + break; + case 4: + runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE; + break; + default: + runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE; + break; + } + runtime->hw.channels_min = num_channels(c_chmask); + runtime->hw.period_bytes_min = 2 * uac2->c_prm.max_psize + / runtime->hw.periods_min; + } + + runtime->hw.rate_max = runtime->hw.rate_min; + runtime->hw.channels_max = runtime->hw.channels_min; + + snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); + + return 0; +} + +/* ALSA cries without these function pointers */ +static int uac2_pcm_null(struct snd_pcm_substream *substream) +{ + return 0; +} + +static struct snd_pcm_ops uac2_pcm_ops = { + .open = uac2_pcm_open, + .close = uac2_pcm_null, + .ioctl = snd_pcm_lib_ioctl, + .hw_params = uac2_pcm_hw_params, + .hw_free = uac2_pcm_hw_free, + .trigger = uac2_pcm_trigger, + .pointer = uac2_pcm_pointer, + .prepare = uac2_pcm_null, +}; + +static int snd_uac2_probe(struct platform_device *pdev) +{ + struct snd_uac2_chip *uac2 = pdev_to_uac2(pdev); + struct snd_card *card; + struct snd_pcm *pcm; + struct audio_dev *audio_dev; + struct f_uac2_opts *opts; + int err; + int p_chmask, c_chmask; + + audio_dev = uac2_to_agdev(uac2); + opts = container_of(audio_dev->func.fi, struct f_uac2_opts, func_inst); + p_chmask = opts->p_chmask; + c_chmask = opts->c_chmask; + + /* Choose any slot, with no id */ + err = snd_card_new(&pdev->dev, -1, NULL, THIS_MODULE, 0, &card); + if (err < 0) + return err; + + uac2->card = card; + + /* + * Create first PCM device + * Create a substream only for non-zero channel streams + */ + err = snd_pcm_new(uac2->card, "UAC2 PCM", 0, + p_chmask ? 1 : 0, c_chmask ? 1 : 0, &pcm); + if (err < 0) + goto snd_fail; + + strcpy(pcm->name, "UAC2 PCM"); + pcm->private_data = uac2; + + uac2->pcm = pcm; + + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac2_pcm_ops); + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac2_pcm_ops); + + strcpy(card->driver, "UAC2_Gadget"); + strcpy(card->shortname, "UAC2_Gadget"); + sprintf(card->longname, "UAC2_Gadget %i", pdev->id); + + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, + snd_dma_continuous_data(GFP_KERNEL), 0, BUFF_SIZE_MAX); + + err = snd_card_register(card); + if (!err) { + platform_set_drvdata(pdev, card); + return 0; + } + +snd_fail: + snd_card_free(card); + + uac2->pcm = NULL; + uac2->card = NULL; + + return err; +} + +static int snd_uac2_remove(struct platform_device *pdev) +{ + struct snd_card *card = platform_get_drvdata(pdev); + + if (card) + return snd_card_free(card); + + return 0; +} + +static void snd_uac2_release(struct device *dev) +{ + dev_dbg(dev, "releasing '%s'\n", dev_name(dev)); +} + +static int alsa_uac2_init(struct audio_dev *agdev) +{ + struct snd_uac2_chip *uac2 = &agdev->uac2; + int err; + + uac2->pdrv.probe = snd_uac2_probe; + uac2->pdrv.remove = snd_uac2_remove; + uac2->pdrv.driver.name = uac2_name; + + uac2->pdev.id = 0; + uac2->pdev.name = uac2_name; + uac2->pdev.dev.release = snd_uac2_release; + + /* Register snd_uac2 driver */ + err = platform_driver_register(&uac2->pdrv); + if (err) + return err; + + /* Register snd_uac2 device */ + err = platform_device_register(&uac2->pdev); + if (err) + platform_driver_unregister(&uac2->pdrv); + + return err; +} + +static void alsa_uac2_exit(struct audio_dev *agdev) +{ + struct snd_uac2_chip *uac2 = &agdev->uac2; + + platform_driver_unregister(&uac2->pdrv); + platform_device_unregister(&uac2->pdev); +} + + +/* --------- USB Function Interface ------------- */ + +enum { + STR_ASSOC, + STR_IF_CTRL, + STR_CLKSRC_IN, + STR_CLKSRC_OUT, + STR_USB_IT, + STR_IO_IT, + STR_USB_OT, + STR_IO_OT, + STR_AS_OUT_ALT0, + STR_AS_OUT_ALT1, + STR_AS_IN_ALT0, + STR_AS_IN_ALT1, +}; + +static char clksrc_in[8]; +static char clksrc_out[8]; + +static struct usb_string strings_fn[] = { + [STR_ASSOC].s = "Source/Sink", + [STR_IF_CTRL].s = "Topology Control", + [STR_CLKSRC_IN].s = clksrc_in, + [STR_CLKSRC_OUT].s = clksrc_out, + [STR_USB_IT].s = "USBH Out", + [STR_IO_IT].s = "USBD Out", + [STR_USB_OT].s = "USBH In", + [STR_IO_OT].s = "USBD In", + [STR_AS_OUT_ALT0].s = "Playback Inactive", + [STR_AS_OUT_ALT1].s = "Playback Active", + [STR_AS_IN_ALT0].s = "Capture Inactive", + [STR_AS_IN_ALT1].s = "Capture Active", + { }, +}; + +static struct usb_gadget_strings str_fn = { + .language = 0x0409, /* en-us */ + .strings = strings_fn, +}; + +static struct usb_gadget_strings *fn_strings[] = { + &str_fn, + NULL, +}; + +static struct usb_qualifier_descriptor devqual_desc = { + .bLength = sizeof devqual_desc, + .bDescriptorType = USB_DT_DEVICE_QUALIFIER, + + .bcdUSB = cpu_to_le16(0x200), + .bDeviceClass = USB_CLASS_MISC, + .bDeviceSubClass = 0x02, + .bDeviceProtocol = 0x01, + .bNumConfigurations = 1, + .bRESERVED = 0, +}; + +static struct usb_interface_assoc_descriptor iad_desc = { + .bLength = sizeof iad_desc, + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + + .bFirstInterface = 0, + .bInterfaceCount = 3, + .bFunctionClass = USB_CLASS_AUDIO, + .bFunctionSubClass = UAC2_FUNCTION_SUBCLASS_UNDEFINED, + .bFunctionProtocol = UAC_VERSION_2, +}; + +/* Audio Control Interface */ +static struct usb_interface_descriptor std_ac_if_desc = { + .bLength = sizeof std_ac_if_desc, + .bDescriptorType = USB_DT_INTERFACE, + + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, + .bInterfaceProtocol = UAC_VERSION_2, +}; + +/* Clock source for IN traffic */ +static struct uac_clock_source_descriptor in_clk_src_desc = { + .bLength = sizeof in_clk_src_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC2_CLOCK_SOURCE, + .bClockID = USB_IN_CLK_ID, + .bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED, + .bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL), + .bAssocTerminal = 0, +}; + +/* Clock source for OUT traffic */ +static struct uac_clock_source_descriptor out_clk_src_desc = { + .bLength = sizeof out_clk_src_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC2_CLOCK_SOURCE, + .bClockID = USB_OUT_CLK_ID, + .bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED, + .bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL), + .bAssocTerminal = 0, +}; + +/* Input Terminal for USB_OUT */ +static struct uac2_input_terminal_descriptor usb_out_it_desc = { + .bLength = sizeof usb_out_it_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC_INPUT_TERMINAL, + .bTerminalID = USB_OUT_IT_ID, + .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING), + .bAssocTerminal = 0, + .bCSourceID = USB_OUT_CLK_ID, + .iChannelNames = 0, + .bmControls = (CONTROL_RDWR << COPY_CTRL), +}; + +/* Input Terminal for I/O-In */ +static struct uac2_input_terminal_descriptor io_in_it_desc = { + .bLength = sizeof io_in_it_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC_INPUT_TERMINAL, + .bTerminalID = IO_IN_IT_ID, + .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED), + .bAssocTerminal = 0, + .bCSourceID = USB_IN_CLK_ID, + .iChannelNames = 0, + .bmControls = (CONTROL_RDWR << COPY_CTRL), +}; + +/* Ouput Terminal for USB_IN */ +static struct uac2_output_terminal_descriptor usb_in_ot_desc = { + .bLength = sizeof usb_in_ot_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, + .bTerminalID = USB_IN_OT_ID, + .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING), + .bAssocTerminal = 0, + .bSourceID = IO_IN_IT_ID, + .bCSourceID = USB_IN_CLK_ID, + .bmControls = (CONTROL_RDWR << COPY_CTRL), +}; + +/* Ouput Terminal for I/O-Out */ +static struct uac2_output_terminal_descriptor io_out_ot_desc = { + .bLength = sizeof io_out_ot_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, + .bTerminalID = IO_OUT_OT_ID, + .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED), + .bAssocTerminal = 0, + .bSourceID = USB_OUT_IT_ID, + .bCSourceID = USB_OUT_CLK_ID, + .bmControls = (CONTROL_RDWR << COPY_CTRL), +}; + +static struct uac2_ac_header_descriptor ac_hdr_desc = { + .bLength = sizeof ac_hdr_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC_MS_HEADER, + .bcdADC = cpu_to_le16(0x200), + .bCategory = UAC2_FUNCTION_IO_BOX, + .wTotalLength = sizeof in_clk_src_desc + sizeof out_clk_src_desc + + sizeof usb_out_it_desc + sizeof io_in_it_desc + + sizeof usb_in_ot_desc + sizeof io_out_ot_desc, + .bmControls = 0, +}; + +/* Audio Streaming OUT Interface - Alt0 */ +static struct usb_interface_descriptor std_as_out_if0_desc = { + .bLength = sizeof std_as_out_if0_desc, + .bDescriptorType = USB_DT_INTERFACE, + + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, + .bInterfaceProtocol = UAC_VERSION_2, +}; + +/* Audio Streaming OUT Interface - Alt1 */ +static struct usb_interface_descriptor std_as_out_if1_desc = { + .bLength = sizeof std_as_out_if1_desc, + .bDescriptorType = USB_DT_INTERFACE, + + .bAlternateSetting = 1, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, + .bInterfaceProtocol = UAC_VERSION_2, +}; + +/* Audio Stream OUT Intface Desc */ +static struct uac2_as_header_descriptor as_out_hdr_desc = { + .bLength = sizeof as_out_hdr_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC_AS_GENERAL, + .bTerminalLink = USB_OUT_IT_ID, + .bmControls = 0, + .bFormatType = UAC_FORMAT_TYPE_I, + .bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM), + .iChannelNames = 0, +}; + +/* Audio USB_OUT Format */ +static struct uac2_format_type_i_descriptor as_out_fmt1_desc = { + .bLength = sizeof as_out_fmt1_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FORMAT_TYPE, + .bFormatType = UAC_FORMAT_TYPE_I, +}; + +/* STD AS ISO OUT Endpoint */ +static struct usb_endpoint_descriptor fs_epout_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1023), + .bInterval = 1, +}; + +static struct usb_endpoint_descriptor hs_epout_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1024), + .bInterval = 4, +}; + +/* CS AS ISO OUT Endpoint */ +static struct uac2_iso_endpoint_descriptor as_iso_out_desc = { + .bLength = sizeof as_iso_out_desc, + .bDescriptorType = USB_DT_CS_ENDPOINT, + + .bDescriptorSubtype = UAC_EP_GENERAL, + .bmAttributes = 0, + .bmControls = 0, + .bLockDelayUnits = 0, + .wLockDelay = 0, +}; + +/* Audio Streaming IN Interface - Alt0 */ +static struct usb_interface_descriptor std_as_in_if0_desc = { + .bLength = sizeof std_as_in_if0_desc, + .bDescriptorType = USB_DT_INTERFACE, + + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, + .bInterfaceProtocol = UAC_VERSION_2, +}; + +/* Audio Streaming IN Interface - Alt1 */ +static struct usb_interface_descriptor std_as_in_if1_desc = { + .bLength = sizeof std_as_in_if1_desc, + .bDescriptorType = USB_DT_INTERFACE, + + .bAlternateSetting = 1, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOSTREAMING, + .bInterfaceProtocol = UAC_VERSION_2, +}; + +/* Audio Stream IN Intface Desc */ +static struct uac2_as_header_descriptor as_in_hdr_desc = { + .bLength = sizeof as_in_hdr_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + + .bDescriptorSubtype = UAC_AS_GENERAL, + .bTerminalLink = USB_IN_OT_ID, + .bmControls = 0, + .bFormatType = UAC_FORMAT_TYPE_I, + .bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM), + .iChannelNames = 0, +}; + +/* Audio USB_IN Format */ +static struct uac2_format_type_i_descriptor as_in_fmt1_desc = { + .bLength = sizeof as_in_fmt1_desc, + .bDescriptorType = USB_DT_CS_INTERFACE, + .bDescriptorSubtype = UAC_FORMAT_TYPE, + .bFormatType = UAC_FORMAT_TYPE_I, +}; + +/* STD AS ISO IN Endpoint */ +static struct usb_endpoint_descriptor fs_epin_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1023), + .bInterval = 1, +}; + +static struct usb_endpoint_descriptor hs_epin_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, + .wMaxPacketSize = cpu_to_le16(1024), + .bInterval = 4, +}; + +/* CS AS ISO IN Endpoint */ +static struct uac2_iso_endpoint_descriptor as_iso_in_desc = { + .bLength = sizeof as_iso_in_desc, + .bDescriptorType = USB_DT_CS_ENDPOINT, + + .bDescriptorSubtype = UAC_EP_GENERAL, + .bmAttributes = 0, + .bmControls = 0, + .bLockDelayUnits = 0, + .wLockDelay = 0, +}; + +static struct usb_descriptor_header *fs_audio_desc[] = { + (struct usb_descriptor_header *)&iad_desc, + (struct usb_descriptor_header *)&std_ac_if_desc, + + (struct usb_descriptor_header *)&ac_hdr_desc, + (struct usb_descriptor_header *)&in_clk_src_desc, + (struct usb_descriptor_header *)&out_clk_src_desc, + (struct usb_descriptor_header *)&usb_out_it_desc, + (struct usb_descriptor_header *)&io_in_it_desc, + (struct usb_descriptor_header *)&usb_in_ot_desc, + (struct usb_descriptor_header *)&io_out_ot_desc, + + (struct usb_descriptor_header *)&std_as_out_if0_desc, + (struct usb_descriptor_header *)&std_as_out_if1_desc, + + (struct usb_descriptor_header *)&as_out_hdr_desc, + (struct usb_descriptor_header *)&as_out_fmt1_desc, + (struct usb_descriptor_header *)&fs_epout_desc, + (struct usb_descriptor_header *)&as_iso_out_desc, + + (struct usb_descriptor_header *)&std_as_in_if0_desc, + (struct usb_descriptor_header *)&std_as_in_if1_desc, + + (struct usb_descriptor_header *)&as_in_hdr_desc, + (struct usb_descriptor_header *)&as_in_fmt1_desc, + (struct usb_descriptor_header *)&fs_epin_desc, + (struct usb_descriptor_header *)&as_iso_in_desc, + NULL, +}; + +static struct usb_descriptor_header *hs_audio_desc[] = { + (struct usb_descriptor_header *)&iad_desc, + (struct usb_descriptor_header *)&std_ac_if_desc, + + (struct usb_descriptor_header *)&ac_hdr_desc, + (struct usb_descriptor_header *)&in_clk_src_desc, + (struct usb_descriptor_header *)&out_clk_src_desc, + (struct usb_descriptor_header *)&usb_out_it_desc, + (struct usb_descriptor_header *)&io_in_it_desc, + (struct usb_descriptor_header *)&usb_in_ot_desc, + (struct usb_descriptor_header *)&io_out_ot_desc, + + (struct usb_descriptor_header *)&std_as_out_if0_desc, + (struct usb_descriptor_header *)&std_as_out_if1_desc, + + (struct usb_descriptor_header *)&as_out_hdr_desc, + (struct usb_descriptor_header *)&as_out_fmt1_desc, + (struct usb_descriptor_header *)&hs_epout_desc, + (struct usb_descriptor_header *)&as_iso_out_desc, + + (struct usb_descriptor_header *)&std_as_in_if0_desc, + (struct usb_descriptor_header *)&std_as_in_if1_desc, + + (struct usb_descriptor_header *)&as_in_hdr_desc, + (struct usb_descriptor_header *)&as_in_fmt1_desc, + (struct usb_descriptor_header *)&hs_epin_desc, + (struct usb_descriptor_header *)&as_iso_in_desc, + NULL, +}; + +struct cntrl_cur_lay3 { + __u32 dCUR; +}; + +struct cntrl_range_lay3 { + __u16 wNumSubRanges; + __u32 dMIN; + __u32 dMAX; + __u32 dRES; +} __packed; + +static inline void +free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep) +{ + struct snd_uac2_chip *uac2 = prm->uac2; + int i; + + if (!prm->ep_enabled) + return; + + prm->ep_enabled = false; + + for (i = 0; i < USB_XFERS; i++) { + if (prm->ureq[i].req) { + usb_ep_dequeue(ep, prm->ureq[i].req); + usb_ep_free_request(ep, prm->ureq[i].req); + prm->ureq[i].req = NULL; + } + } + + if (usb_ep_disable(ep)) + dev_err(&uac2->pdev.dev, + "%s:%d Error!\n", __func__, __LINE__); +} + +static int +afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) +{ + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + struct usb_composite_dev *cdev = cfg->cdev; + struct usb_gadget *gadget = cdev->gadget; + struct device *dev = &uac2->pdev.dev; + struct uac2_rtd_params *prm; + struct f_uac2_opts *uac2_opts; + struct usb_string *us; + int ret; + + uac2_opts = container_of(fn->fi, struct f_uac2_opts, func_inst); + + us = usb_gstrings_attach(cdev, fn_strings, ARRAY_SIZE(strings_fn)); + if (IS_ERR(us)) + return PTR_ERR(us); + iad_desc.iFunction = us[STR_ASSOC].id; + std_ac_if_desc.iInterface = us[STR_IF_CTRL].id; + in_clk_src_desc.iClockSource = us[STR_CLKSRC_IN].id; + out_clk_src_desc.iClockSource = us[STR_CLKSRC_OUT].id; + usb_out_it_desc.iTerminal = us[STR_USB_IT].id; + io_in_it_desc.iTerminal = us[STR_IO_IT].id; + usb_in_ot_desc.iTerminal = us[STR_USB_OT].id; + io_out_ot_desc.iTerminal = us[STR_IO_OT].id; + std_as_out_if0_desc.iInterface = us[STR_AS_OUT_ALT0].id; + std_as_out_if1_desc.iInterface = us[STR_AS_OUT_ALT1].id; + std_as_in_if0_desc.iInterface = us[STR_AS_IN_ALT0].id; + std_as_in_if1_desc.iInterface = us[STR_AS_IN_ALT1].id; + + + /* Initialize the configurable parameters */ + usb_out_it_desc.bNrChannels = num_channels(uac2_opts->c_chmask); + usb_out_it_desc.bmChannelConfig = cpu_to_le32(uac2_opts->c_chmask); + io_in_it_desc.bNrChannels = num_channels(uac2_opts->p_chmask); + io_in_it_desc.bmChannelConfig = cpu_to_le32(uac2_opts->p_chmask); + as_out_hdr_desc.bNrChannels = num_channels(uac2_opts->c_chmask); + as_out_hdr_desc.bmChannelConfig = cpu_to_le32(uac2_opts->c_chmask); + as_in_hdr_desc.bNrChannels = num_channels(uac2_opts->p_chmask); + as_in_hdr_desc.bmChannelConfig = cpu_to_le32(uac2_opts->p_chmask); + as_out_fmt1_desc.bSubslotSize = uac2_opts->c_ssize; + as_out_fmt1_desc.bBitResolution = uac2_opts->c_ssize * 8; + as_in_fmt1_desc.bSubslotSize = uac2_opts->p_ssize; + as_in_fmt1_desc.bBitResolution = uac2_opts->p_ssize * 8; + + snprintf(clksrc_in, sizeof(clksrc_in), "%uHz", uac2_opts->p_srate); + snprintf(clksrc_out, sizeof(clksrc_out), "%uHz", uac2_opts->c_srate); + + ret = usb_interface_id(cfg, fn); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + std_ac_if_desc.bInterfaceNumber = ret; + agdev->ac_intf = ret; + agdev->ac_alt = 0; + + ret = usb_interface_id(cfg, fn); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + std_as_out_if0_desc.bInterfaceNumber = ret; + std_as_out_if1_desc.bInterfaceNumber = ret; + agdev->as_out_intf = ret; + agdev->as_out_alt = 0; + + ret = usb_interface_id(cfg, fn); + if (ret < 0) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return ret; + } + std_as_in_if0_desc.bInterfaceNumber = ret; + std_as_in_if1_desc.bInterfaceNumber = ret; + agdev->as_in_intf = ret; + agdev->as_in_alt = 0; + + agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); + if (!agdev->out_ep) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + goto err; + } + agdev->out_ep->driver_data = agdev; + + agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); + if (!agdev->in_ep) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + goto err; + } + agdev->in_ep->driver_data = agdev; + + uac2->p_prm.uac2 = uac2; + uac2->c_prm.uac2 = uac2; + + hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; + hs_epout_desc.wMaxPacketSize = fs_epout_desc.wMaxPacketSize; + hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; + hs_epin_desc.wMaxPacketSize = fs_epin_desc.wMaxPacketSize; + + ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL); + if (ret) + goto err; + + prm = &agdev->uac2.c_prm; + prm->max_psize = hs_epout_desc.wMaxPacketSize; + prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); + if (!prm->rbuf) { + prm->max_psize = 0; + goto err_free_descs; + } + + prm = &agdev->uac2.p_prm; + prm->max_psize = hs_epin_desc.wMaxPacketSize; + prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); + if (!prm->rbuf) { + prm->max_psize = 0; + goto err_free_descs; + } + + ret = alsa_uac2_init(agdev); + if (ret) + goto err_free_descs; + return 0; + +err_free_descs: + usb_free_all_descriptors(fn); +err: + kfree(agdev->uac2.p_prm.rbuf); + kfree(agdev->uac2.c_prm.rbuf); + if (agdev->in_ep) + agdev->in_ep->driver_data = NULL; + if (agdev->out_ep) + agdev->out_ep->driver_data = NULL; + return -EINVAL; +} + +static int +afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt) +{ + struct usb_composite_dev *cdev = fn->config->cdev; + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + struct usb_gadget *gadget = cdev->gadget; + struct device *dev = &uac2->pdev.dev; + struct usb_request *req; + struct usb_ep *ep; + struct uac2_rtd_params *prm; + int req_len, i; + + /* No i/f has more than 2 alt settings */ + if (alt > 1) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return -EINVAL; + } + + if (intf == agdev->ac_intf) { + /* Control I/f has only 1 AltSetting - 0 */ + if (alt) { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return -EINVAL; + } + return 0; + } + + if (intf == agdev->as_out_intf) { + ep = agdev->out_ep; + prm = &uac2->c_prm; + config_ep_by_speed(gadget, fn, ep); + agdev->as_out_alt = alt; + req_len = prm->max_psize; + } else if (intf == agdev->as_in_intf) { + struct f_uac2_opts *opts = agdev_to_uac2_opts(agdev); + unsigned int factor, rate; + struct usb_endpoint_descriptor *ep_desc; + + ep = agdev->in_ep; + prm = &uac2->p_prm; + config_ep_by_speed(gadget, fn, ep); + agdev->as_in_alt = alt; + + /* pre-calculate the playback endpoint's interval */ + if (gadget->speed == USB_SPEED_FULL) { + ep_desc = &fs_epin_desc; + factor = 1000; + } else { + ep_desc = &hs_epin_desc; + factor = 125; + } + + /* pre-compute some values for iso_complete() */ + uac2->p_framesize = opts->p_ssize * + num_channels(opts->p_chmask); + rate = opts->p_srate * uac2->p_framesize; + uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; + uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, + prm->max_psize); + + if (uac2->p_pktsize < prm->max_psize) + uac2->p_pktsize_residue = rate % uac2->p_interval; + else + uac2->p_pktsize_residue = 0; + + req_len = uac2->p_pktsize; + uac2->p_residue = 0; + } else { + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + return -EINVAL; + } + + if (alt == 0) { + free_ep(prm, ep); + return 0; + } + + prm->ep_enabled = true; + usb_ep_enable(ep); + + for (i = 0; i < USB_XFERS; i++) { + if (!prm->ureq[i].req) { + req = usb_ep_alloc_request(ep, GFP_ATOMIC); + if (req == NULL) + return -ENOMEM; + + prm->ureq[i].req = req; + prm->ureq[i].pp = prm; + + req->zero = 0; + req->context = &prm->ureq[i]; + req->length = req_len; + req->complete = agdev_iso_complete; + req->buf = prm->rbuf + i * prm->max_psize; + } + + if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC)) + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); + } + + return 0; +} + +static int +afunc_get_alt(struct usb_function *fn, unsigned intf) +{ + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + + if (intf == agdev->ac_intf) + return agdev->ac_alt; + else if (intf == agdev->as_out_intf) + return agdev->as_out_alt; + else if (intf == agdev->as_in_intf) + return agdev->as_in_alt; + else + dev_err(&uac2->pdev.dev, + "%s:%d Invalid Interface %d!\n", + __func__, __LINE__, intf); + + return -EINVAL; +} + +static void +afunc_disable(struct usb_function *fn) +{ + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + + free_ep(&uac2->p_prm, agdev->in_ep); + agdev->as_in_alt = 0; + + free_ep(&uac2->c_prm, agdev->out_ep); + agdev->as_out_alt = 0; +} + +static int +in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) +{ + struct usb_request *req = fn->config->cdev->req; + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + struct f_uac2_opts *opts; + u16 w_length = le16_to_cpu(cr->wLength); + u16 w_index = le16_to_cpu(cr->wIndex); + u16 w_value = le16_to_cpu(cr->wValue); + u8 entity_id = (w_index >> 8) & 0xff; + u8 control_selector = w_value >> 8; + int value = -EOPNOTSUPP; + int p_srate, c_srate; + + opts = agdev_to_uac2_opts(agdev); + p_srate = opts->p_srate; + c_srate = opts->c_srate; + + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { + struct cntrl_cur_lay3 c; + + if (entity_id == USB_IN_CLK_ID) + c.dCUR = p_srate; + else if (entity_id == USB_OUT_CLK_ID) + c.dCUR = c_srate; + + value = min_t(unsigned, w_length, sizeof c); + memcpy(req->buf, &c, value); + } else if (control_selector == UAC2_CS_CONTROL_CLOCK_VALID) { + *(u8 *)req->buf = 1; + value = min_t(unsigned, w_length, 1); + } else { + dev_err(&uac2->pdev.dev, + "%s:%d control_selector=%d TODO!\n", + __func__, __LINE__, control_selector); + } + + return value; +} + +static int +in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr) +{ + struct usb_request *req = fn->config->cdev->req; + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + struct f_uac2_opts *opts; + u16 w_length = le16_to_cpu(cr->wLength); + u16 w_index = le16_to_cpu(cr->wIndex); + u16 w_value = le16_to_cpu(cr->wValue); + u8 entity_id = (w_index >> 8) & 0xff; + u8 control_selector = w_value >> 8; + struct cntrl_range_lay3 r; + int value = -EOPNOTSUPP; + int p_srate, c_srate; + + opts = agdev_to_uac2_opts(agdev); + p_srate = opts->p_srate; + c_srate = opts->c_srate; + + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { + if (entity_id == USB_IN_CLK_ID) + r.dMIN = p_srate; + else if (entity_id == USB_OUT_CLK_ID) + r.dMIN = c_srate; + else + return -EOPNOTSUPP; + + r.dMAX = r.dMIN; + r.dRES = 0; + r.wNumSubRanges = 1; + + value = min_t(unsigned, w_length, sizeof r); + memcpy(req->buf, &r, value); + } else { + dev_err(&uac2->pdev.dev, + "%s:%d control_selector=%d TODO!\n", + __func__, __LINE__, control_selector); + } + + return value; +} + +static int +ac_rq_in(struct usb_function *fn, const struct usb_ctrlrequest *cr) +{ + if (cr->bRequest == UAC2_CS_CUR) + return in_rq_cur(fn, cr); + else if (cr->bRequest == UAC2_CS_RANGE) + return in_rq_range(fn, cr); + else + return -EOPNOTSUPP; +} + +static int +out_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) +{ + u16 w_length = le16_to_cpu(cr->wLength); + u16 w_value = le16_to_cpu(cr->wValue); + u8 control_selector = w_value >> 8; + + if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) + return w_length; + + return -EOPNOTSUPP; +} + +static int +setup_rq_inf(struct usb_function *fn, const struct usb_ctrlrequest *cr) +{ + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + u16 w_index = le16_to_cpu(cr->wIndex); + u8 intf = w_index & 0xff; + + if (intf != agdev->ac_intf) { + dev_err(&uac2->pdev.dev, + "%s:%d Error!\n", __func__, __LINE__); + return -EOPNOTSUPP; + } + + if (cr->bRequestType & USB_DIR_IN) + return ac_rq_in(fn, cr); + else if (cr->bRequest == UAC2_CS_CUR) + return out_rq_cur(fn, cr); + + return -EOPNOTSUPP; +} + +static int +afunc_setup(struct usb_function *fn, const struct usb_ctrlrequest *cr) +{ + struct usb_composite_dev *cdev = fn->config->cdev; + struct audio_dev *agdev = func_to_agdev(fn); + struct snd_uac2_chip *uac2 = &agdev->uac2; + struct usb_request *req = cdev->req; + u16 w_length = le16_to_cpu(cr->wLength); + int value = -EOPNOTSUPP; + + /* Only Class specific requests are supposed to reach here */ + if ((cr->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) + return -EOPNOTSUPP; + + if ((cr->bRequestType & USB_RECIP_MASK) == USB_RECIP_INTERFACE) + value = setup_rq_inf(fn, cr); + else + dev_err(&uac2->pdev.dev, "%s:%d Error!\n", __func__, __LINE__); + + if (value >= 0) { + req->length = value; + req->zero = value < w_length; + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); + if (value < 0) { + dev_err(&uac2->pdev.dev, + "%s:%d Error!\n", __func__, __LINE__); + req->status = 0; + } + } + + return value; +} + +static inline struct f_uac2_opts *to_f_uac2_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_uac2_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_uac2_opts); +CONFIGFS_ATTR_OPS(f_uac2_opts); + +static void f_uac2_attr_release(struct config_item *item) +{ + struct f_uac2_opts *opts = to_f_uac2_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations f_uac2_item_ops = { + .release = f_uac2_attr_release, + .show_attribute = f_uac2_opts_attr_show, + .store_attribute = f_uac2_opts_attr_store, +}; + +#define UAC2_ATTRIBUTE(name) \ +static ssize_t f_uac2_opts_##name##_show(struct f_uac2_opts *opts, \ + char *page) \ +{ \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%u\n", opts->name); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t f_uac2_opts_##name##_store(struct f_uac2_opts *opts, \ + const char *page, size_t len) \ +{ \ + int ret; \ + u32 num; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou32(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + opts->name = num; \ + ret = len; \ + \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +static struct f_uac2_opts_attribute f_uac2_opts_##name = \ + __CONFIGFS_ATTR(name, S_IRUGO | S_IWUSR, \ + f_uac2_opts_##name##_show, \ + f_uac2_opts_##name##_store) + +UAC2_ATTRIBUTE(p_chmask); +UAC2_ATTRIBUTE(p_srate); +UAC2_ATTRIBUTE(p_ssize); +UAC2_ATTRIBUTE(c_chmask); +UAC2_ATTRIBUTE(c_srate); +UAC2_ATTRIBUTE(c_ssize); + +static struct configfs_attribute *f_uac2_attrs[] = { + &f_uac2_opts_p_chmask.attr, + &f_uac2_opts_p_srate.attr, + &f_uac2_opts_p_ssize.attr, + &f_uac2_opts_c_chmask.attr, + &f_uac2_opts_c_srate.attr, + &f_uac2_opts_c_ssize.attr, + NULL, +}; + +static struct config_item_type f_uac2_func_type = { + .ct_item_ops = &f_uac2_item_ops, + .ct_attrs = f_uac2_attrs, + .ct_owner = THIS_MODULE, +}; + +static void afunc_free_inst(struct usb_function_instance *f) +{ + struct f_uac2_opts *opts; + + opts = container_of(f, struct f_uac2_opts, func_inst); + kfree(opts); +} + +static struct usb_function_instance *afunc_alloc_inst(void) +{ + struct f_uac2_opts *opts; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + + mutex_init(&opts->lock); + opts->func_inst.free_func_inst = afunc_free_inst; + + config_group_init_type_name(&opts->func_inst.group, "", + &f_uac2_func_type); + + opts->p_chmask = UAC2_DEF_PCHMASK; + opts->p_srate = UAC2_DEF_PSRATE; + opts->p_ssize = UAC2_DEF_PSSIZE; + opts->c_chmask = UAC2_DEF_CCHMASK; + opts->c_srate = UAC2_DEF_CSRATE; + opts->c_ssize = UAC2_DEF_CSSIZE; + return &opts->func_inst; +} + +static void afunc_free(struct usb_function *f) +{ + struct audio_dev *agdev; + struct f_uac2_opts *opts; + + agdev = func_to_agdev(f); + opts = container_of(f->fi, struct f_uac2_opts, func_inst); + kfree(agdev); + mutex_lock(&opts->lock); + --opts->refcnt; + mutex_unlock(&opts->lock); +} + +static void afunc_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct audio_dev *agdev = func_to_agdev(f); + struct uac2_rtd_params *prm; + + alsa_uac2_exit(agdev); + + prm = &agdev->uac2.p_prm; + kfree(prm->rbuf); + + prm = &agdev->uac2.c_prm; + kfree(prm->rbuf); + usb_free_all_descriptors(f); + + if (agdev->in_ep) + agdev->in_ep->driver_data = NULL; + if (agdev->out_ep) + agdev->out_ep->driver_data = NULL; +} + +static struct usb_function *afunc_alloc(struct usb_function_instance *fi) +{ + struct audio_dev *agdev; + struct f_uac2_opts *opts; + + agdev = kzalloc(sizeof(*agdev), GFP_KERNEL); + if (agdev == NULL) + return ERR_PTR(-ENOMEM); + + opts = container_of(fi, struct f_uac2_opts, func_inst); + mutex_lock(&opts->lock); + ++opts->refcnt; + mutex_unlock(&opts->lock); + + agdev->func.name = "uac2_func"; + agdev->func.bind = afunc_bind; + agdev->func.unbind = afunc_unbind; + agdev->func.set_alt = afunc_set_alt; + agdev->func.get_alt = afunc_get_alt; + agdev->func.disable = afunc_disable; + agdev->func.setup = afunc_setup; + agdev->func.free_func = afunc_free; + + return &agdev->func; +} + +DECLARE_USB_FUNCTION_INIT(uac2, afunc_alloc_inst, afunc_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yadwinder Singh"); +MODULE_AUTHOR("Jaswinder Singh"); diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c new file mode 100644 index 000000000..cf0df8fbb --- /dev/null +++ b/drivers/usb/gadget/function/f_uvc.c @@ -0,0 +1,958 @@ +/* + * uvc_gadget.c -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/string.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/video.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> + +#include <media/v4l2-dev.h> +#include <media/v4l2-event.h> + +#include "u_uvc.h" +#include "uvc.h" +#include "uvc_configfs.h" +#include "uvc_v4l2.h" +#include "uvc_video.h" + +unsigned int uvc_gadget_trace_param; + +/* -------------------------------------------------------------------------- + * Function descriptors + */ + +/* string IDs are assigned dynamically */ + +#define UVC_STRING_CONTROL_IDX 0 +#define UVC_STRING_STREAMING_IDX 1 + +static struct usb_string uvc_en_us_strings[] = { + [UVC_STRING_CONTROL_IDX].s = "UVC Camera", + [UVC_STRING_STREAMING_IDX].s = "Video Streaming", + { } +}; + +static struct usb_gadget_strings uvc_stringtab = { + .language = 0x0409, /* en-us */ + .strings = uvc_en_us_strings, +}; + +static struct usb_gadget_strings *uvc_function_strings[] = { + &uvc_stringtab, + NULL, +}; + +#define UVC_INTF_VIDEO_CONTROL 0 +#define UVC_INTF_VIDEO_STREAMING 1 + +#define UVC_STATUS_MAX_PACKET_SIZE 16 /* 16 bytes status */ + +static struct usb_interface_assoc_descriptor uvc_iad = { + .bLength = sizeof(uvc_iad), + .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION, + .bFirstInterface = 0, + .bInterfaceCount = 2, + .bFunctionClass = USB_CLASS_VIDEO, + .bFunctionSubClass = UVC_SC_VIDEO_INTERFACE_COLLECTION, + .bFunctionProtocol = 0x00, + .iFunction = 0, +}; + +static struct usb_interface_descriptor uvc_control_intf = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = UVC_INTF_VIDEO_CONTROL, + .bAlternateSetting = 0, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = UVC_SC_VIDEOCONTROL, + .bInterfaceProtocol = 0x00, + .iInterface = 0, +}; + +static struct usb_endpoint_descriptor uvc_control_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_INT, + .wMaxPacketSize = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE), + .bInterval = 8, +}; + +static struct usb_ss_ep_comp_descriptor uvc_ss_control_comp = { + .bLength = sizeof(uvc_ss_control_comp), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* The following 3 values can be tweaked if necessary. */ + .bMaxBurst = 0, + .bmAttributes = 0, + .wBytesPerInterval = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE), +}; + +static struct uvc_control_endpoint_descriptor uvc_control_cs_ep = { + .bLength = UVC_DT_CONTROL_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_CS_ENDPOINT, + .bDescriptorSubType = UVC_EP_INTERRUPT, + .wMaxTransferSize = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE), +}; + +static struct usb_interface_descriptor uvc_streaming_intf_alt0 = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING, + .bAlternateSetting = 0, + .bNumEndpoints = 0, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING, + .bInterfaceProtocol = 0x00, + .iInterface = 0, +}; + +static struct usb_interface_descriptor uvc_streaming_intf_alt1 = { + .bLength = USB_DT_INTERFACE_SIZE, + .bDescriptorType = USB_DT_INTERFACE, + .bInterfaceNumber = UVC_INTF_VIDEO_STREAMING, + .bAlternateSetting = 1, + .bNumEndpoints = 1, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = UVC_SC_VIDEOSTREAMING, + .bInterfaceProtocol = 0x00, + .iInterface = 0, +}; + +static struct usb_endpoint_descriptor uvc_fs_streaming_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_ASYNC + | USB_ENDPOINT_XFER_ISOC, + /* The wMaxPacketSize and bInterval values will be initialized from + * module parameters. + */ +}; + +static struct usb_endpoint_descriptor uvc_hs_streaming_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_ASYNC + | USB_ENDPOINT_XFER_ISOC, + /* The wMaxPacketSize and bInterval values will be initialized from + * module parameters. + */ +}; + +static struct usb_endpoint_descriptor uvc_ss_streaming_ep = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_SYNC_ASYNC + | USB_ENDPOINT_XFER_ISOC, + /* The wMaxPacketSize and bInterval values will be initialized from + * module parameters. + */ +}; + +static struct usb_ss_ep_comp_descriptor uvc_ss_streaming_comp = { + .bLength = sizeof(uvc_ss_streaming_comp), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + /* The bMaxBurst, bmAttributes and wBytesPerInterval values will be + * initialized from module parameters. + */ +}; + +static const struct usb_descriptor_header * const uvc_fs_streaming[] = { + (struct usb_descriptor_header *) &uvc_streaming_intf_alt1, + (struct usb_descriptor_header *) &uvc_fs_streaming_ep, + NULL, +}; + +static const struct usb_descriptor_header * const uvc_hs_streaming[] = { + (struct usb_descriptor_header *) &uvc_streaming_intf_alt1, + (struct usb_descriptor_header *) &uvc_hs_streaming_ep, + NULL, +}; + +static const struct usb_descriptor_header * const uvc_ss_streaming[] = { + (struct usb_descriptor_header *) &uvc_streaming_intf_alt1, + (struct usb_descriptor_header *) &uvc_ss_streaming_ep, + (struct usb_descriptor_header *) &uvc_ss_streaming_comp, + NULL, +}; + +void uvc_set_trace_param(unsigned int trace) +{ + uvc_gadget_trace_param = trace; +} +EXPORT_SYMBOL(uvc_set_trace_param); + +/* -------------------------------------------------------------------------- + * Control requests + */ + +static void +uvc_function_ep0_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct uvc_device *uvc = req->context; + struct v4l2_event v4l2_event; + struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; + + if (uvc->event_setup_out) { + uvc->event_setup_out = 0; + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_DATA; + uvc_event->data.length = req->actual; + memcpy(&uvc_event->data.data, req->buf, req->actual); + v4l2_event_queue(&uvc->vdev, &v4l2_event); + } +} + +static int +uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) +{ + struct uvc_device *uvc = to_uvc(f); + struct v4l2_event v4l2_event; + struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; + + /* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n", + * ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue), + * le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength)); + */ + + if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) { + INFO(f->config->cdev, "invalid request type\n"); + return -EINVAL; + } + + /* Stall too big requests. */ + if (le16_to_cpu(ctrl->wLength) > UVC_MAX_REQUEST_SIZE) + return -EINVAL; + + /* Tell the complete callback to generate an event for the next request + * that will be enqueued by UVCIOC_SEND_RESPONSE. + */ + uvc->event_setup_out = !(ctrl->bRequestType & USB_DIR_IN); + uvc->event_length = le16_to_cpu(ctrl->wLength); + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_SETUP; + memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req)); + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + return 0; +} + +void uvc_function_setup_continue(struct uvc_device *uvc) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + + usb_composite_setup_continue(cdev); +} + +static int +uvc_function_get_alt(struct usb_function *f, unsigned interface) +{ + struct uvc_device *uvc = to_uvc(f); + + INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface); + + if (interface == uvc->control_intf) + return 0; + else if (interface != uvc->streaming_intf) + return -EINVAL; + else + return uvc->video.ep->driver_data ? 1 : 0; +} + +static int +uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt) +{ + struct uvc_device *uvc = to_uvc(f); + struct usb_composite_dev *cdev = f->config->cdev; + struct v4l2_event v4l2_event; + struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; + int ret; + + INFO(cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt); + + if (interface == uvc->control_intf) { + if (alt) + return -EINVAL; + + if (uvc->control_ep->driver_data) { + INFO(cdev, "reset UVC Control\n"); + usb_ep_disable(uvc->control_ep); + uvc->control_ep->driver_data = NULL; + } + + if (!uvc->control_ep->desc) + if (config_ep_by_speed(cdev->gadget, f, uvc->control_ep)) + return -EINVAL; + + usb_ep_enable(uvc->control_ep); + uvc->control_ep->driver_data = uvc; + + if (uvc->state == UVC_STATE_DISCONNECTED) { + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_CONNECT; + uvc_event->speed = cdev->gadget->speed; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + uvc->state = UVC_STATE_CONNECTED; + } + + return 0; + } + + if (interface != uvc->streaming_intf) + return -EINVAL; + + /* TODO + if (usb_endpoint_xfer_bulk(&uvc->desc.vs_ep)) + return alt ? -EINVAL : 0; + */ + + switch (alt) { + case 0: + if (uvc->state != UVC_STATE_STREAMING) + return 0; + + if (uvc->video.ep) { + usb_ep_disable(uvc->video.ep); + uvc->video.ep->driver_data = NULL; + } + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_STREAMOFF; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + uvc->state = UVC_STATE_CONNECTED; + return 0; + + case 1: + if (uvc->state != UVC_STATE_CONNECTED) + return 0; + + if (!uvc->video.ep) + return -EINVAL; + + if (uvc->video.ep->driver_data) { + INFO(cdev, "reset UVC\n"); + usb_ep_disable(uvc->video.ep); + uvc->video.ep->driver_data = NULL; + } + + ret = config_ep_by_speed(f->config->cdev->gadget, + &(uvc->func), uvc->video.ep); + if (ret) + return ret; + usb_ep_enable(uvc->video.ep); + uvc->video.ep->driver_data = uvc; + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_STREAMON; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + return USB_GADGET_DELAYED_STATUS; + + default: + return -EINVAL; + } +} + +static void +uvc_function_disable(struct usb_function *f) +{ + struct uvc_device *uvc = to_uvc(f); + struct v4l2_event v4l2_event; + + INFO(f->config->cdev, "uvc_function_disable\n"); + + memset(&v4l2_event, 0, sizeof(v4l2_event)); + v4l2_event.type = UVC_EVENT_DISCONNECT; + v4l2_event_queue(&uvc->vdev, &v4l2_event); + + uvc->state = UVC_STATE_DISCONNECTED; + + if (uvc->video.ep->driver_data) { + usb_ep_disable(uvc->video.ep); + uvc->video.ep->driver_data = NULL; + } + + if (uvc->control_ep->driver_data) { + usb_ep_disable(uvc->control_ep); + uvc->control_ep->driver_data = NULL; + } +} + +/* -------------------------------------------------------------------------- + * Connection / disconnection + */ + +void +uvc_function_connect(struct uvc_device *uvc) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + int ret; + + if ((ret = usb_function_activate(&uvc->func)) < 0) + INFO(cdev, "UVC connect failed with %d\n", ret); +} + +void +uvc_function_disconnect(struct uvc_device *uvc) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + int ret; + + if ((ret = usb_function_deactivate(&uvc->func)) < 0) + INFO(cdev, "UVC disconnect failed with %d\n", ret); +} + +/* -------------------------------------------------------------------------- + * USB probe and disconnect + */ + +static int +uvc_register_video(struct uvc_device *uvc) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + + /* TODO reference counting. */ + uvc->vdev.v4l2_dev = &uvc->v4l2_dev; + uvc->vdev.fops = &uvc_v4l2_fops; + uvc->vdev.ioctl_ops = &uvc_v4l2_ioctl_ops; + uvc->vdev.release = video_device_release_empty; + uvc->vdev.vfl_dir = VFL_DIR_TX; + uvc->vdev.lock = &uvc->video.mutex; + strlcpy(uvc->vdev.name, cdev->gadget->name, sizeof(uvc->vdev.name)); + + video_set_drvdata(&uvc->vdev, uvc); + + return video_register_device(&uvc->vdev, VFL_TYPE_GRABBER, -1); +} + +#define UVC_COPY_DESCRIPTOR(mem, dst, desc) \ + do { \ + memcpy(mem, desc, (desc)->bLength); \ + *(dst)++ = mem; \ + mem += (desc)->bLength; \ + } while (0); + +#define UVC_COPY_DESCRIPTORS(mem, dst, src) \ + do { \ + const struct usb_descriptor_header * const *__src; \ + for (__src = src; *__src; ++__src) { \ + memcpy(mem, *__src, (*__src)->bLength); \ + *dst++ = mem; \ + mem += (*__src)->bLength; \ + } \ + } while (0) + +static struct usb_descriptor_header ** +uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed) +{ + struct uvc_input_header_descriptor *uvc_streaming_header; + struct uvc_header_descriptor *uvc_control_header; + const struct uvc_descriptor_header * const *uvc_control_desc; + const struct uvc_descriptor_header * const *uvc_streaming_cls; + const struct usb_descriptor_header * const *uvc_streaming_std; + const struct usb_descriptor_header * const *src; + struct usb_descriptor_header **dst; + struct usb_descriptor_header **hdr; + unsigned int control_size; + unsigned int streaming_size; + unsigned int n_desc; + unsigned int bytes; + void *mem; + + switch (speed) { + case USB_SPEED_SUPER: + uvc_control_desc = uvc->desc.ss_control; + uvc_streaming_cls = uvc->desc.ss_streaming; + uvc_streaming_std = uvc_ss_streaming; + break; + + case USB_SPEED_HIGH: + uvc_control_desc = uvc->desc.fs_control; + uvc_streaming_cls = uvc->desc.hs_streaming; + uvc_streaming_std = uvc_hs_streaming; + break; + + case USB_SPEED_FULL: + default: + uvc_control_desc = uvc->desc.fs_control; + uvc_streaming_cls = uvc->desc.fs_streaming; + uvc_streaming_std = uvc_fs_streaming; + break; + } + + if (!uvc_control_desc || !uvc_streaming_cls) + return ERR_PTR(-ENODEV); + + /* Descriptors layout + * + * uvc_iad + * uvc_control_intf + * Class-specific UVC control descriptors + * uvc_control_ep + * uvc_control_cs_ep + * uvc_ss_control_comp (for SS only) + * uvc_streaming_intf_alt0 + * Class-specific UVC streaming descriptors + * uvc_{fs|hs}_streaming + */ + + /* Count descriptors and compute their size. */ + control_size = 0; + streaming_size = 0; + bytes = uvc_iad.bLength + uvc_control_intf.bLength + + uvc_control_ep.bLength + uvc_control_cs_ep.bLength + + uvc_streaming_intf_alt0.bLength; + + if (speed == USB_SPEED_SUPER) { + bytes += uvc_ss_control_comp.bLength; + n_desc = 6; + } else { + n_desc = 5; + } + + for (src = (const struct usb_descriptor_header **)uvc_control_desc; + *src; ++src) { + control_size += (*src)->bLength; + bytes += (*src)->bLength; + n_desc++; + } + for (src = (const struct usb_descriptor_header **)uvc_streaming_cls; + *src; ++src) { + streaming_size += (*src)->bLength; + bytes += (*src)->bLength; + n_desc++; + } + for (src = uvc_streaming_std; *src; ++src) { + bytes += (*src)->bLength; + n_desc++; + } + + mem = kmalloc((n_desc + 1) * sizeof(*src) + bytes, GFP_KERNEL); + if (mem == NULL) + return NULL; + + hdr = mem; + dst = mem; + mem += (n_desc + 1) * sizeof(*src); + + /* Copy the descriptors. */ + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_iad); + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_intf); + + uvc_control_header = mem; + UVC_COPY_DESCRIPTORS(mem, dst, + (const struct usb_descriptor_header **)uvc_control_desc); + uvc_control_header->wTotalLength = cpu_to_le16(control_size); + uvc_control_header->bInCollection = 1; + uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf; + + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep); + if (speed == USB_SPEED_SUPER) + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_control_comp); + + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep); + UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0); + + uvc_streaming_header = mem; + UVC_COPY_DESCRIPTORS(mem, dst, + (const struct usb_descriptor_header**)uvc_streaming_cls); + uvc_streaming_header->wTotalLength = cpu_to_le16(streaming_size); + uvc_streaming_header->bEndpointAddress = uvc->video.ep->address; + + UVC_COPY_DESCRIPTORS(mem, dst, uvc_streaming_std); + + *dst = NULL; + return hdr; +} + +static int +uvc_function_bind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct uvc_device *uvc = to_uvc(f); + struct usb_string *us; + unsigned int max_packet_mult; + unsigned int max_packet_size; + struct usb_ep *ep; + struct f_uvc_opts *opts; + int ret = -EINVAL; + + INFO(cdev, "uvc_function_bind\n"); + + opts = fi_to_f_uvc_opts(f->fi); + /* Sanity check the streaming endpoint module parameters. + */ + opts->streaming_interval = clamp(opts->streaming_interval, 1U, 16U); + opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); + opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); + + /* Fill in the FS/HS/SS Video Streaming specific descriptors from the + * module parameters. + * + * NOTE: We assume that the user knows what they are doing and won't + * give parameters that their UDC doesn't support. + */ + if (opts->streaming_maxpacket <= 1024) { + max_packet_mult = 1; + max_packet_size = opts->streaming_maxpacket; + } else if (opts->streaming_maxpacket <= 2048) { + max_packet_mult = 2; + max_packet_size = opts->streaming_maxpacket / 2; + } else { + max_packet_mult = 3; + max_packet_size = opts->streaming_maxpacket / 3; + } + + uvc_fs_streaming_ep.wMaxPacketSize = + cpu_to_le16(min(opts->streaming_maxpacket, 1023U)); + uvc_fs_streaming_ep.bInterval = opts->streaming_interval; + + uvc_hs_streaming_ep.wMaxPacketSize = + cpu_to_le16(max_packet_size | ((max_packet_mult - 1) << 11)); + uvc_hs_streaming_ep.bInterval = opts->streaming_interval; + + uvc_ss_streaming_ep.wMaxPacketSize = cpu_to_le16(max_packet_size); + uvc_ss_streaming_ep.bInterval = opts->streaming_interval; + uvc_ss_streaming_comp.bmAttributes = max_packet_mult - 1; + uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; + uvc_ss_streaming_comp.wBytesPerInterval = + cpu_to_le16(max_packet_size * max_packet_mult * + opts->streaming_maxburst); + + /* Allocate endpoints. */ + ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); + if (!ep) { + INFO(cdev, "Unable to allocate control EP\n"); + goto error; + } + uvc->control_ep = ep; + ep->driver_data = uvc; + + if (gadget_is_superspeed(c->cdev->gadget)) + ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep, + &uvc_ss_streaming_comp); + else if (gadget_is_dualspeed(cdev->gadget)) + ep = usb_ep_autoconfig(cdev->gadget, &uvc_hs_streaming_ep); + else + ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep); + + if (!ep) { + INFO(cdev, "Unable to allocate streaming EP\n"); + goto error; + } + uvc->video.ep = ep; + ep->driver_data = uvc; + + uvc_fs_streaming_ep.bEndpointAddress = uvc->video.ep->address; + uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address; + uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address; + + us = usb_gstrings_attach(cdev, uvc_function_strings, + ARRAY_SIZE(uvc_en_us_strings)); + if (IS_ERR(us)) { + ret = PTR_ERR(us); + goto error; + } + uvc_iad.iFunction = us[UVC_STRING_CONTROL_IDX].id; + uvc_control_intf.iInterface = us[UVC_STRING_CONTROL_IDX].id; + ret = us[UVC_STRING_STREAMING_IDX].id; + uvc_streaming_intf_alt0.iInterface = ret; + uvc_streaming_intf_alt1.iInterface = ret; + + /* Allocate interface IDs. */ + if ((ret = usb_interface_id(c, f)) < 0) + goto error; + uvc_iad.bFirstInterface = ret; + uvc_control_intf.bInterfaceNumber = ret; + uvc->control_intf = ret; + + if ((ret = usb_interface_id(c, f)) < 0) + goto error; + uvc_streaming_intf_alt0.bInterfaceNumber = ret; + uvc_streaming_intf_alt1.bInterfaceNumber = ret; + uvc->streaming_intf = ret; + + /* Copy descriptors */ + f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL); + if (IS_ERR(f->fs_descriptors)) { + ret = PTR_ERR(f->fs_descriptors); + f->fs_descriptors = NULL; + goto error; + } + if (gadget_is_dualspeed(cdev->gadget)) { + f->hs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_HIGH); + if (IS_ERR(f->hs_descriptors)) { + ret = PTR_ERR(f->hs_descriptors); + f->hs_descriptors = NULL; + goto error; + } + } + if (gadget_is_superspeed(c->cdev->gadget)) { + f->ss_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_SUPER); + if (IS_ERR(f->ss_descriptors)) { + ret = PTR_ERR(f->ss_descriptors); + f->ss_descriptors = NULL; + goto error; + } + } + + /* Preallocate control endpoint request. */ + uvc->control_req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL); + uvc->control_buf = kmalloc(UVC_MAX_REQUEST_SIZE, GFP_KERNEL); + if (uvc->control_req == NULL || uvc->control_buf == NULL) { + ret = -ENOMEM; + goto error; + } + + uvc->control_req->buf = uvc->control_buf; + uvc->control_req->complete = uvc_function_ep0_complete; + uvc->control_req->context = uvc; + + /* Avoid letting this gadget enumerate until the userspace server is + * active. + */ + if ((ret = usb_function_deactivate(f)) < 0) + goto error; + + if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) { + printk(KERN_INFO "v4l2_device_register failed\n"); + goto error; + } + + /* Initialise video. */ + ret = uvcg_video_init(&uvc->video); + if (ret < 0) + goto error; + + /* Register a V4L2 device. */ + ret = uvc_register_video(uvc); + if (ret < 0) { + printk(KERN_INFO "Unable to register video device\n"); + goto error; + } + + return 0; + +error: + v4l2_device_unregister(&uvc->v4l2_dev); + + if (uvc->control_ep) + uvc->control_ep->driver_data = NULL; + if (uvc->video.ep) + uvc->video.ep->driver_data = NULL; + + if (uvc->control_req) + usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); + kfree(uvc->control_buf); + + usb_free_all_descriptors(f); + return ret; +} + +/* -------------------------------------------------------------------------- + * USB gadget function + */ + +static void uvc_free_inst(struct usb_function_instance *f) +{ + struct f_uvc_opts *opts = fi_to_f_uvc_opts(f); + + mutex_destroy(&opts->lock); + kfree(opts); +} + +static struct usb_function_instance *uvc_alloc_inst(void) +{ + struct f_uvc_opts *opts; + struct uvc_camera_terminal_descriptor *cd; + struct uvc_processing_unit_descriptor *pd; + struct uvc_output_terminal_descriptor *od; + struct uvc_color_matching_descriptor *md; + struct uvc_descriptor_header **ctl_cls; + + opts = kzalloc(sizeof(*opts), GFP_KERNEL); + if (!opts) + return ERR_PTR(-ENOMEM); + opts->func_inst.free_func_inst = uvc_free_inst; + mutex_init(&opts->lock); + + cd = &opts->uvc_camera_terminal; + cd->bLength = UVC_DT_CAMERA_TERMINAL_SIZE(3); + cd->bDescriptorType = USB_DT_CS_INTERFACE; + cd->bDescriptorSubType = UVC_VC_INPUT_TERMINAL; + cd->bTerminalID = 1; + cd->wTerminalType = cpu_to_le16(0x0201); + cd->bAssocTerminal = 0; + cd->iTerminal = 0; + cd->wObjectiveFocalLengthMin = cpu_to_le16(0); + cd->wObjectiveFocalLengthMax = cpu_to_le16(0); + cd->wOcularFocalLength = cpu_to_le16(0); + cd->bControlSize = 3; + cd->bmControls[0] = 2; + cd->bmControls[1] = 0; + cd->bmControls[2] = 0; + + pd = &opts->uvc_processing; + pd->bLength = UVC_DT_PROCESSING_UNIT_SIZE(2); + pd->bDescriptorType = USB_DT_CS_INTERFACE; + pd->bDescriptorSubType = UVC_VC_PROCESSING_UNIT; + pd->bUnitID = 2; + pd->bSourceID = 1; + pd->wMaxMultiplier = cpu_to_le16(16*1024); + pd->bControlSize = 2; + pd->bmControls[0] = 1; + pd->bmControls[1] = 0; + pd->iProcessing = 0; + + od = &opts->uvc_output_terminal; + od->bLength = UVC_DT_OUTPUT_TERMINAL_SIZE; + od->bDescriptorType = USB_DT_CS_INTERFACE; + od->bDescriptorSubType = UVC_VC_OUTPUT_TERMINAL; + od->bTerminalID = 3; + od->wTerminalType = cpu_to_le16(0x0101); + od->bAssocTerminal = 0; + od->bSourceID = 2; + od->iTerminal = 0; + + md = &opts->uvc_color_matching; + md->bLength = UVC_DT_COLOR_MATCHING_SIZE; + md->bDescriptorType = USB_DT_CS_INTERFACE; + md->bDescriptorSubType = UVC_VS_COLORFORMAT; + md->bColorPrimaries = 1; + md->bTransferCharacteristics = 1; + md->bMatrixCoefficients = 4; + + /* Prepare fs control class descriptors for configfs-based gadgets */ + ctl_cls = opts->uvc_fs_control_cls; + ctl_cls[0] = NULL; /* assigned elsewhere by configfs */ + ctl_cls[1] = (struct uvc_descriptor_header *)cd; + ctl_cls[2] = (struct uvc_descriptor_header *)pd; + ctl_cls[3] = (struct uvc_descriptor_header *)od; + ctl_cls[4] = NULL; /* NULL-terminate */ + opts->fs_control = + (const struct uvc_descriptor_header * const *)ctl_cls; + + /* Prepare hs control class descriptors for configfs-based gadgets */ + ctl_cls = opts->uvc_ss_control_cls; + ctl_cls[0] = NULL; /* assigned elsewhere by configfs */ + ctl_cls[1] = (struct uvc_descriptor_header *)cd; + ctl_cls[2] = (struct uvc_descriptor_header *)pd; + ctl_cls[3] = (struct uvc_descriptor_header *)od; + ctl_cls[4] = NULL; /* NULL-terminate */ + opts->ss_control = + (const struct uvc_descriptor_header * const *)ctl_cls; + + opts->streaming_interval = 1; + opts->streaming_maxpacket = 1024; + + uvcg_attach_configfs(opts); + return &opts->func_inst; +} + +static void uvc_free(struct usb_function *f) +{ + struct uvc_device *uvc = to_uvc(f); + struct f_uvc_opts *opts = container_of(f->fi, struct f_uvc_opts, + func_inst); + --opts->refcnt; + kfree(uvc); +} + +static void uvc_unbind(struct usb_configuration *c, struct usb_function *f) +{ + struct usb_composite_dev *cdev = c->cdev; + struct uvc_device *uvc = to_uvc(f); + + INFO(cdev, "%s\n", __func__); + + video_unregister_device(&uvc->vdev); + v4l2_device_unregister(&uvc->v4l2_dev); + uvc->control_ep->driver_data = NULL; + uvc->video.ep->driver_data = NULL; + + usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); + kfree(uvc->control_buf); + + usb_free_all_descriptors(f); +} + +static struct usb_function *uvc_alloc(struct usb_function_instance *fi) +{ + struct uvc_device *uvc; + struct f_uvc_opts *opts; + struct uvc_descriptor_header **strm_cls; + + uvc = kzalloc(sizeof(*uvc), GFP_KERNEL); + if (uvc == NULL) + return ERR_PTR(-ENOMEM); + + mutex_init(&uvc->video.mutex); + uvc->state = UVC_STATE_DISCONNECTED; + opts = fi_to_f_uvc_opts(fi); + + mutex_lock(&opts->lock); + if (opts->uvc_fs_streaming_cls) { + strm_cls = opts->uvc_fs_streaming_cls; + opts->fs_streaming = + (const struct uvc_descriptor_header * const *)strm_cls; + } + if (opts->uvc_hs_streaming_cls) { + strm_cls = opts->uvc_hs_streaming_cls; + opts->hs_streaming = + (const struct uvc_descriptor_header * const *)strm_cls; + } + if (opts->uvc_ss_streaming_cls) { + strm_cls = opts->uvc_ss_streaming_cls; + opts->ss_streaming = + (const struct uvc_descriptor_header * const *)strm_cls; + } + + uvc->desc.fs_control = opts->fs_control; + uvc->desc.ss_control = opts->ss_control; + uvc->desc.fs_streaming = opts->fs_streaming; + uvc->desc.hs_streaming = opts->hs_streaming; + uvc->desc.ss_streaming = opts->ss_streaming; + ++opts->refcnt; + mutex_unlock(&opts->lock); + + /* Register the function. */ + uvc->func.name = "uvc"; + uvc->func.bind = uvc_function_bind; + uvc->func.unbind = uvc_unbind; + uvc->func.get_alt = uvc_function_get_alt; + uvc->func.set_alt = uvc_function_set_alt; + uvc->func.disable = uvc_function_disable; + uvc->func.setup = uvc_function_setup; + uvc->func.free_func = uvc_free; + + return &uvc->func; +} + +DECLARE_USB_FUNCTION_INIT(uvc, uvc_alloc_inst, uvc_alloc); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Laurent Pinchart"); diff --git a/drivers/usb/gadget/function/f_uvc.h b/drivers/usb/gadget/function/f_uvc.h new file mode 100644 index 000000000..d0a73bdcb --- /dev/null +++ b/drivers/usb/gadget/function/f_uvc.h @@ -0,0 +1,28 @@ +/* + * f_uvc.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _F_UVC_H_ +#define _F_UVC_H_ + +#include <linux/usb/composite.h> +#include <linux/usb/video.h> + +#include "uvc.h" + +void uvc_function_setup_continue(struct uvc_device *uvc); + +void uvc_function_connect(struct uvc_device *uvc); + +void uvc_function_disconnect(struct uvc_device *uvc); + +#endif /* _F_UVC_H_ */ + diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h new file mode 100644 index 000000000..15f180904 --- /dev/null +++ b/drivers/usb/gadget/function/g_zero.h @@ -0,0 +1,67 @@ +/* + * This header declares the utility functions used by "Gadget Zero", plus + * interfaces to its two single-configuration function drivers. + */ + +#ifndef __G_ZERO_H +#define __G_ZERO_H + +#define GZERO_BULK_BUFLEN 4096 +#define GZERO_QLEN 32 +#define GZERO_ISOC_INTERVAL 4 +#define GZERO_ISOC_MAXPACKET 1024 + +struct usb_zero_options { + unsigned pattern; + unsigned isoc_interval; + unsigned isoc_maxpacket; + unsigned isoc_mult; + unsigned isoc_maxburst; + unsigned bulk_buflen; + unsigned qlen; +}; + +struct f_ss_opts { + struct usb_function_instance func_inst; + unsigned pattern; + unsigned isoc_interval; + unsigned isoc_maxpacket; + unsigned isoc_mult; + unsigned isoc_maxburst; + unsigned bulk_buflen; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +struct f_lb_opts { + struct usb_function_instance func_inst; + unsigned bulk_buflen; + unsigned qlen; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +void lb_modexit(void); +int lb_modinit(void); + +/* common utilities */ +void free_ep_req(struct usb_ep *ep, struct usb_request *req); +void disable_endpoints(struct usb_composite_dev *cdev, + struct usb_ep *in, struct usb_ep *out, + struct usb_ep *iso_in, struct usb_ep *iso_out); + +#endif /* __G_ZERO_H */ diff --git a/drivers/usb/gadget/function/ndis.h b/drivers/usb/gadget/function/ndis.h new file mode 100644 index 000000000..a19f72dec --- /dev/null +++ b/drivers/usb/gadget/function/ndis.h @@ -0,0 +1,47 @@ +/* + * ndis.h + * + * ntddndis.h modified by Benedikt Spranger <b.spranger@pengutronix.de> + * + * Thanks to the cygwin development team, + * espacially to Casper S. Hornstrup <chorns@users.sourceforge.net> + * + * THIS SOFTWARE IS NOT COPYRIGHTED + * + * This source code is offered for use in the public domain. You may + * use, modify or distribute it freely. + */ + +#ifndef _LINUX_NDIS_H +#define _LINUX_NDIS_H + +enum NDIS_DEVICE_POWER_STATE { + NdisDeviceStateUnspecified = 0, + NdisDeviceStateD0, + NdisDeviceStateD1, + NdisDeviceStateD2, + NdisDeviceStateD3, + NdisDeviceStateMaximum +}; + +struct NDIS_PM_WAKE_UP_CAPABILITIES { + enum NDIS_DEVICE_POWER_STATE MinMagicPacketWakeUp; + enum NDIS_DEVICE_POWER_STATE MinPatternWakeUp; + enum NDIS_DEVICE_POWER_STATE MinLinkChangeWakeUp; +}; + +struct NDIS_PNP_CAPABILITIES { + __le32 Flags; + struct NDIS_PM_WAKE_UP_CAPABILITIES WakeUpCapabilities; +}; + +struct NDIS_PM_PACKET_PATTERN { + __le32 Priority; + __le32 Reserved; + __le32 MaskSize; + __le32 PatternOffset; + __le32 PatternSize; + __le32 PatternFlags; +}; + +#endif /* _LINUX_NDIS_H */ diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c new file mode 100644 index 000000000..95d2324f6 --- /dev/null +++ b/drivers/usb/gadget/function/rndis.c @@ -0,0 +1,1190 @@ +/* + * RNDIS MSG parser + * + * Authors: Benedikt Spranger, Pengutronix + * Robert Schwebel, Pengutronix + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This software was originally developed in conformance with + * Microsoft's Remote NDIS Specification License Agreement. + * + * 03/12/2004 Kai-Uwe Bloem <linux-development@auerswald.de> + * Fixed message length bug in init_response + * + * 03/25/2004 Kai-Uwe Bloem <linux-development@auerswald.de> + * Fixed rndis_rm_hdr length bug. + * + * Copyright (C) 2004 by David Brownell + * updates to merge with Linux 2.6, better match RNDIS spec + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/proc_fs.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/netdevice.h> + +#include <asm/io.h> +#include <asm/byteorder.h> +#include <asm/unaligned.h> + +#include "u_rndis.h" + +#undef VERBOSE_DEBUG + +#include "rndis.h" + + +/* The driver for your USB chip needs to support ep0 OUT to work with + * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional). + * + * Windows hosts need an INF file like Documentation/usb/linux.inf + * and will be happier if you provide the host_addr module parameter. + */ + +#if 0 +static int rndis_debug = 0; +module_param (rndis_debug, int, 0); +MODULE_PARM_DESC (rndis_debug, "enable debugging"); +#else +#define rndis_debug 0 +#endif + +#define RNDIS_MAX_CONFIGS 1 + + +static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS]; + +/* Driver Version */ +static const __le32 rndis_driver_version = cpu_to_le32(1); + +/* Function Prototypes */ +static rndis_resp_t *rndis_add_response(int configNr, u32 length); + + +/* supported OIDs */ +static const u32 oid_supported_list[] = +{ + /* the general stuff */ + RNDIS_OID_GEN_SUPPORTED_LIST, + RNDIS_OID_GEN_HARDWARE_STATUS, + RNDIS_OID_GEN_MEDIA_SUPPORTED, + RNDIS_OID_GEN_MEDIA_IN_USE, + RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE, + RNDIS_OID_GEN_LINK_SPEED, + RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE, + RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE, + RNDIS_OID_GEN_VENDOR_ID, + RNDIS_OID_GEN_VENDOR_DESCRIPTION, + RNDIS_OID_GEN_VENDOR_DRIVER_VERSION, + RNDIS_OID_GEN_CURRENT_PACKET_FILTER, + RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE, + RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, + RNDIS_OID_GEN_PHYSICAL_MEDIUM, + + /* the statistical stuff */ + RNDIS_OID_GEN_XMIT_OK, + RNDIS_OID_GEN_RCV_OK, + RNDIS_OID_GEN_XMIT_ERROR, + RNDIS_OID_GEN_RCV_ERROR, + RNDIS_OID_GEN_RCV_NO_BUFFER, +#ifdef RNDIS_OPTIONAL_STATS + RNDIS_OID_GEN_DIRECTED_BYTES_XMIT, + RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT, + RNDIS_OID_GEN_MULTICAST_BYTES_XMIT, + RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT, + RNDIS_OID_GEN_BROADCAST_BYTES_XMIT, + RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT, + RNDIS_OID_GEN_DIRECTED_BYTES_RCV, + RNDIS_OID_GEN_DIRECTED_FRAMES_RCV, + RNDIS_OID_GEN_MULTICAST_BYTES_RCV, + RNDIS_OID_GEN_MULTICAST_FRAMES_RCV, + RNDIS_OID_GEN_BROADCAST_BYTES_RCV, + RNDIS_OID_GEN_BROADCAST_FRAMES_RCV, + RNDIS_OID_GEN_RCV_CRC_ERROR, + RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH, +#endif /* RNDIS_OPTIONAL_STATS */ + + /* mandatory 802.3 */ + /* the general stuff */ + RNDIS_OID_802_3_PERMANENT_ADDRESS, + RNDIS_OID_802_3_CURRENT_ADDRESS, + RNDIS_OID_802_3_MULTICAST_LIST, + RNDIS_OID_802_3_MAC_OPTIONS, + RNDIS_OID_802_3_MAXIMUM_LIST_SIZE, + + /* the statistical stuff */ + RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT, + RNDIS_OID_802_3_XMIT_ONE_COLLISION, + RNDIS_OID_802_3_XMIT_MORE_COLLISIONS, +#ifdef RNDIS_OPTIONAL_STATS + RNDIS_OID_802_3_XMIT_DEFERRED, + RNDIS_OID_802_3_XMIT_MAX_COLLISIONS, + RNDIS_OID_802_3_RCV_OVERRUN, + RNDIS_OID_802_3_XMIT_UNDERRUN, + RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE, + RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST, + RNDIS_OID_802_3_XMIT_LATE_COLLISIONS, +#endif /* RNDIS_OPTIONAL_STATS */ + +#ifdef RNDIS_PM + /* PM and wakeup are "mandatory" for USB, but the RNDIS specs + * don't say what they mean ... and the NDIS specs are often + * confusing and/or ambiguous in this context. (That is, more + * so than their specs for the other OIDs.) + * + * FIXME someone who knows what these should do, please + * implement them! + */ + + /* power management */ + OID_PNP_CAPABILITIES, + OID_PNP_QUERY_POWER, + OID_PNP_SET_POWER, + +#ifdef RNDIS_WAKEUP + /* wake up host */ + OID_PNP_ENABLE_WAKE_UP, + OID_PNP_ADD_WAKE_UP_PATTERN, + OID_PNP_REMOVE_WAKE_UP_PATTERN, +#endif /* RNDIS_WAKEUP */ +#endif /* RNDIS_PM */ +}; + + +/* NDIS Functions */ +static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf, + unsigned buf_len, rndis_resp_t *r) +{ + int retval = -ENOTSUPP; + u32 length = 4; /* usually */ + __le32 *outbuf; + int i, count; + rndis_query_cmplt_type *resp; + struct net_device *net; + struct rtnl_link_stats64 temp; + const struct rtnl_link_stats64 *stats; + + if (!r) return -ENOMEM; + resp = (rndis_query_cmplt_type *)r->buf; + + if (!resp) return -ENOMEM; + + if (buf_len && rndis_debug > 1) { + pr_debug("query OID %08x value, len %d:\n", OID, buf_len); + for (i = 0; i < buf_len; i += 16) { + pr_debug("%03d: %08x %08x %08x %08x\n", i, + get_unaligned_le32(&buf[i]), + get_unaligned_le32(&buf[i + 4]), + get_unaligned_le32(&buf[i + 8]), + get_unaligned_le32(&buf[i + 12])); + } + } + + /* response goes here, right after the header */ + outbuf = (__le32 *)&resp[1]; + resp->InformationBufferOffset = cpu_to_le32(16); + + net = rndis_per_dev_params[configNr].dev; + stats = dev_get_stats(net, &temp); + + switch (OID) { + + /* general oids (table 4-1) */ + + /* mandatory */ + case RNDIS_OID_GEN_SUPPORTED_LIST: + pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__); + length = sizeof(oid_supported_list); + count = length / sizeof(u32); + for (i = 0; i < count; i++) + outbuf[i] = cpu_to_le32(oid_supported_list[i]); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_HARDWARE_STATUS: + pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__); + /* Bogus question! + * Hardware must be ready to receive high level protocols. + * BTW: + * reddite ergo quae sunt Caesaris Caesari + * et quae sunt Dei Deo! + */ + *outbuf = cpu_to_le32(0); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_MEDIA_SUPPORTED: + pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__); + *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_MEDIA_IN_USE: + pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__); + /* one medium, one transport... (maybe you do it better) */ + *outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE: + pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__); + if (rndis_per_dev_params[configNr].dev) { + *outbuf = cpu_to_le32( + rndis_per_dev_params[configNr].dev->mtu); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_GEN_LINK_SPEED: + if (rndis_debug > 1) + pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__); + if (rndis_per_dev_params[configNr].media_state + == RNDIS_MEDIA_STATE_DISCONNECTED) + *outbuf = cpu_to_le32(0); + else + *outbuf = cpu_to_le32( + rndis_per_dev_params[configNr].speed); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE: + pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__); + if (rndis_per_dev_params[configNr].dev) { + *outbuf = cpu_to_le32( + rndis_per_dev_params[configNr].dev->mtu); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE: + pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__); + if (rndis_per_dev_params[configNr].dev) { + *outbuf = cpu_to_le32( + rndis_per_dev_params[configNr].dev->mtu); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_GEN_VENDOR_ID: + pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__); + *outbuf = cpu_to_le32( + rndis_per_dev_params[configNr].vendorID); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_VENDOR_DESCRIPTION: + pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__); + if (rndis_per_dev_params[configNr].vendorDescr) { + length = strlen(rndis_per_dev_params[configNr]. + vendorDescr); + memcpy(outbuf, + rndis_per_dev_params[configNr].vendorDescr, + length); + } else { + outbuf[0] = 0; + } + retval = 0; + break; + + case RNDIS_OID_GEN_VENDOR_DRIVER_VERSION: + pr_debug("%s: RNDIS_OID_GEN_VENDOR_DRIVER_VERSION\n", __func__); + /* Created as LE */ + *outbuf = rndis_driver_version; + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_CURRENT_PACKET_FILTER: + pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER\n", __func__); + *outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE: + pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__); + *outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS: + if (rndis_debug > 1) + pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__); + *outbuf = cpu_to_le32(rndis_per_dev_params[configNr] + .media_state); + retval = 0; + break; + + case RNDIS_OID_GEN_PHYSICAL_MEDIUM: + pr_debug("%s: RNDIS_OID_GEN_PHYSICAL_MEDIUM\n", __func__); + *outbuf = cpu_to_le32(0); + retval = 0; + break; + + /* The RNDIS specification is incomplete/wrong. Some versions + * of MS-Windows expect OIDs that aren't specified there. Other + * versions emit undefined RNDIS messages. DOCUMENT ALL THESE! + */ + case RNDIS_OID_GEN_MAC_OPTIONS: /* from WinME */ + pr_debug("%s: RNDIS_OID_GEN_MAC_OPTIONS\n", __func__); + *outbuf = cpu_to_le32( + RNDIS_MAC_OPTION_RECEIVE_SERIALIZED + | RNDIS_MAC_OPTION_FULL_DUPLEX); + retval = 0; + break; + + /* statistics OIDs (table 4-2) */ + + /* mandatory */ + case RNDIS_OID_GEN_XMIT_OK: + if (rndis_debug > 1) + pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__); + if (stats) { + *outbuf = cpu_to_le32(stats->tx_packets + - stats->tx_errors - stats->tx_dropped); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_GEN_RCV_OK: + if (rndis_debug > 1) + pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__); + if (stats) { + *outbuf = cpu_to_le32(stats->rx_packets + - stats->rx_errors - stats->rx_dropped); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_GEN_XMIT_ERROR: + if (rndis_debug > 1) + pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__); + if (stats) { + *outbuf = cpu_to_le32(stats->tx_errors); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_GEN_RCV_ERROR: + if (rndis_debug > 1) + pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__); + if (stats) { + *outbuf = cpu_to_le32(stats->rx_errors); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_GEN_RCV_NO_BUFFER: + pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__); + if (stats) { + *outbuf = cpu_to_le32(stats->rx_dropped); + retval = 0; + } + break; + + /* ieee802.3 OIDs (table 4-3) */ + + /* mandatory */ + case RNDIS_OID_802_3_PERMANENT_ADDRESS: + pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__); + if (rndis_per_dev_params[configNr].dev) { + length = ETH_ALEN; + memcpy(outbuf, + rndis_per_dev_params[configNr].host_mac, + length); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_802_3_CURRENT_ADDRESS: + pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__); + if (rndis_per_dev_params[configNr].dev) { + length = ETH_ALEN; + memcpy(outbuf, + rndis_per_dev_params [configNr].host_mac, + length); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_802_3_MULTICAST_LIST: + pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__); + /* Multicast base address only */ + *outbuf = cpu_to_le32(0xE0000000); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_802_3_MAXIMUM_LIST_SIZE: + pr_debug("%s: RNDIS_OID_802_3_MAXIMUM_LIST_SIZE\n", __func__); + /* Multicast base address only */ + *outbuf = cpu_to_le32(1); + retval = 0; + break; + + case RNDIS_OID_802_3_MAC_OPTIONS: + pr_debug("%s: RNDIS_OID_802_3_MAC_OPTIONS\n", __func__); + *outbuf = cpu_to_le32(0); + retval = 0; + break; + + /* ieee802.3 statistics OIDs (table 4-4) */ + + /* mandatory */ + case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT: + pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__); + if (stats) { + *outbuf = cpu_to_le32(stats->rx_frame_errors); + retval = 0; + } + break; + + /* mandatory */ + case RNDIS_OID_802_3_XMIT_ONE_COLLISION: + pr_debug("%s: RNDIS_OID_802_3_XMIT_ONE_COLLISION\n", __func__); + *outbuf = cpu_to_le32(0); + retval = 0; + break; + + /* mandatory */ + case RNDIS_OID_802_3_XMIT_MORE_COLLISIONS: + pr_debug("%s: RNDIS_OID_802_3_XMIT_MORE_COLLISIONS\n", __func__); + *outbuf = cpu_to_le32(0); + retval = 0; + break; + + default: + pr_warning("%s: query unknown OID 0x%08X\n", + __func__, OID); + } + if (retval < 0) + length = 0; + + resp->InformationBufferLength = cpu_to_le32(length); + r->length = length + sizeof(*resp); + resp->MessageLength = cpu_to_le32(r->length); + return retval; +} + +static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len, + rndis_resp_t *r) +{ + rndis_set_cmplt_type *resp; + int i, retval = -ENOTSUPP; + struct rndis_params *params; + + if (!r) + return -ENOMEM; + resp = (rndis_set_cmplt_type *)r->buf; + if (!resp) + return -ENOMEM; + + if (buf_len && rndis_debug > 1) { + pr_debug("set OID %08x value, len %d:\n", OID, buf_len); + for (i = 0; i < buf_len; i += 16) { + pr_debug("%03d: %08x %08x %08x %08x\n", i, + get_unaligned_le32(&buf[i]), + get_unaligned_le32(&buf[i + 4]), + get_unaligned_le32(&buf[i + 8]), + get_unaligned_le32(&buf[i + 12])); + } + } + + params = &rndis_per_dev_params[configNr]; + switch (OID) { + case RNDIS_OID_GEN_CURRENT_PACKET_FILTER: + + /* these NDIS_PACKET_TYPE_* bitflags are shared with + * cdc_filter; it's not RNDIS-specific + * NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in: + * PROMISCUOUS, DIRECTED, + * MULTICAST, ALL_MULTICAST, BROADCAST + */ + *params->filter = (u16)get_unaligned_le32(buf); + pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER %08x\n", + __func__, *params->filter); + + /* this call has a significant side effect: it's + * what makes the packet flow start and stop, like + * activating the CDC Ethernet altsetting. + */ + retval = 0; + if (*params->filter) { + params->state = RNDIS_DATA_INITIALIZED; + netif_carrier_on(params->dev); + if (netif_running(params->dev)) + netif_wake_queue(params->dev); + } else { + params->state = RNDIS_INITIALIZED; + netif_carrier_off(params->dev); + netif_stop_queue(params->dev); + } + break; + + case RNDIS_OID_802_3_MULTICAST_LIST: + /* I think we can ignore this */ + pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__); + retval = 0; + break; + + default: + pr_warning("%s: set unknown OID 0x%08X, size %d\n", + __func__, OID, buf_len); + } + + return retval; +} + +/* + * Response Functions + */ + +static int rndis_init_response(int configNr, rndis_init_msg_type *buf) +{ + rndis_init_cmplt_type *resp; + rndis_resp_t *r; + struct rndis_params *params = rndis_per_dev_params + configNr; + + if (!params->dev) + return -ENOTSUPP; + + r = rndis_add_response(configNr, sizeof(rndis_init_cmplt_type)); + if (!r) + return -ENOMEM; + resp = (rndis_init_cmplt_type *)r->buf; + + resp->MessageType = cpu_to_le32(RNDIS_MSG_INIT_C); + resp->MessageLength = cpu_to_le32(52); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + resp->MajorVersion = cpu_to_le32(RNDIS_MAJOR_VERSION); + resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION); + resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS); + resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3); + resp->MaxPacketsPerTransfer = cpu_to_le32(1); + resp->MaxTransferSize = cpu_to_le32( + params->dev->mtu + + sizeof(struct ethhdr) + + sizeof(struct rndis_packet_msg_type) + + 22); + resp->PacketAlignmentFactor = cpu_to_le32(0); + resp->AFListOffset = cpu_to_le32(0); + resp->AFListSize = cpu_to_le32(0); + + params->resp_avail(params->v); + return 0; +} + +static int rndis_query_response(int configNr, rndis_query_msg_type *buf) +{ + rndis_query_cmplt_type *resp; + rndis_resp_t *r; + struct rndis_params *params = rndis_per_dev_params + configNr; + + /* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */ + if (!params->dev) + return -ENOTSUPP; + + /* + * we need more memory: + * gen_ndis_query_resp expects enough space for + * rndis_query_cmplt_type followed by data. + * oid_supported_list is the largest data reply + */ + r = rndis_add_response(configNr, + sizeof(oid_supported_list) + sizeof(rndis_query_cmplt_type)); + if (!r) + return -ENOMEM; + resp = (rndis_query_cmplt_type *)r->buf; + + resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + + if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID), + le32_to_cpu(buf->InformationBufferOffset) + + 8 + (u8 *)buf, + le32_to_cpu(buf->InformationBufferLength), + r)) { + /* OID not supported */ + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); + resp->MessageLength = cpu_to_le32(sizeof *resp); + resp->InformationBufferLength = cpu_to_le32(0); + resp->InformationBufferOffset = cpu_to_le32(0); + } else + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + + params->resp_avail(params->v); + return 0; +} + +static int rndis_set_response(int configNr, rndis_set_msg_type *buf) +{ + u32 BufLength, BufOffset; + rndis_set_cmplt_type *resp; + rndis_resp_t *r; + struct rndis_params *params = rndis_per_dev_params + configNr; + + r = rndis_add_response(configNr, sizeof(rndis_set_cmplt_type)); + if (!r) + return -ENOMEM; + resp = (rndis_set_cmplt_type *)r->buf; + + BufLength = le32_to_cpu(buf->InformationBufferLength); + BufOffset = le32_to_cpu(buf->InformationBufferOffset); + +#ifdef VERBOSE_DEBUG + pr_debug("%s: Length: %d\n", __func__, BufLength); + pr_debug("%s: Offset: %d\n", __func__, BufOffset); + pr_debug("%s: InfoBuffer: ", __func__); + + for (i = 0; i < BufLength; i++) { + pr_debug("%02x ", *(((u8 *) buf) + i + 8 + BufOffset)); + } + + pr_debug("\n"); +#endif + + resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C); + resp->MessageLength = cpu_to_le32(16); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID), + ((u8 *)buf) + 8 + BufOffset, BufLength, r)) + resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED); + else + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + + params->resp_avail(params->v); + return 0; +} + +static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf) +{ + rndis_reset_cmplt_type *resp; + rndis_resp_t *r; + struct rndis_params *params = rndis_per_dev_params + configNr; + + r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type)); + if (!r) + return -ENOMEM; + resp = (rndis_reset_cmplt_type *)r->buf; + + resp->MessageType = cpu_to_le32(RNDIS_MSG_RESET_C); + resp->MessageLength = cpu_to_le32(16); + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + /* resent information */ + resp->AddressingReset = cpu_to_le32(1); + + params->resp_avail(params->v); + return 0; +} + +static int rndis_keepalive_response(int configNr, + rndis_keepalive_msg_type *buf) +{ + rndis_keepalive_cmplt_type *resp; + rndis_resp_t *r; + struct rndis_params *params = rndis_per_dev_params + configNr; + + /* host "should" check only in RNDIS_DATA_INITIALIZED state */ + + r = rndis_add_response(configNr, sizeof(rndis_keepalive_cmplt_type)); + if (!r) + return -ENOMEM; + resp = (rndis_keepalive_cmplt_type *)r->buf; + + resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C); + resp->MessageLength = cpu_to_le32(16); + resp->RequestID = buf->RequestID; /* Still LE in msg buffer */ + resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS); + + params->resp_avail(params->v); + return 0; +} + + +/* + * Device to Host Comunication + */ +static int rndis_indicate_status_msg(int configNr, u32 status) +{ + rndis_indicate_status_msg_type *resp; + rndis_resp_t *r; + struct rndis_params *params = rndis_per_dev_params + configNr; + + if (params->state == RNDIS_UNINITIALIZED) + return -ENOTSUPP; + + r = rndis_add_response(configNr, + sizeof(rndis_indicate_status_msg_type)); + if (!r) + return -ENOMEM; + resp = (rndis_indicate_status_msg_type *)r->buf; + + resp->MessageType = cpu_to_le32(RNDIS_MSG_INDICATE); + resp->MessageLength = cpu_to_le32(20); + resp->Status = cpu_to_le32(status); + resp->StatusBufferLength = cpu_to_le32(0); + resp->StatusBufferOffset = cpu_to_le32(0); + + params->resp_avail(params->v); + return 0; +} + +int rndis_signal_connect(int configNr) +{ + rndis_per_dev_params[configNr].media_state + = RNDIS_MEDIA_STATE_CONNECTED; + return rndis_indicate_status_msg(configNr, + RNDIS_STATUS_MEDIA_CONNECT); +} +EXPORT_SYMBOL_GPL(rndis_signal_connect); + +int rndis_signal_disconnect(int configNr) +{ + rndis_per_dev_params[configNr].media_state + = RNDIS_MEDIA_STATE_DISCONNECTED; + return rndis_indicate_status_msg(configNr, + RNDIS_STATUS_MEDIA_DISCONNECT); +} +EXPORT_SYMBOL_GPL(rndis_signal_disconnect); + +void rndis_uninit(int configNr) +{ + u8 *buf; + u32 length; + + if (configNr >= RNDIS_MAX_CONFIGS) + return; + rndis_per_dev_params[configNr].state = RNDIS_UNINITIALIZED; + + /* drain the response queue */ + while ((buf = rndis_get_next_response(configNr, &length))) + rndis_free_response(configNr, buf); +} +EXPORT_SYMBOL_GPL(rndis_uninit); + +void rndis_set_host_mac(int configNr, const u8 *addr) +{ + rndis_per_dev_params[configNr].host_mac = addr; +} +EXPORT_SYMBOL_GPL(rndis_set_host_mac); + +/* + * Message Parser + */ +int rndis_msg_parser(u8 configNr, u8 *buf) +{ + u32 MsgType, MsgLength; + __le32 *tmp; + struct rndis_params *params; + + if (!buf) + return -ENOMEM; + + tmp = (__le32 *)buf; + MsgType = get_unaligned_le32(tmp++); + MsgLength = get_unaligned_le32(tmp++); + + if (configNr >= RNDIS_MAX_CONFIGS) + return -ENOTSUPP; + params = &rndis_per_dev_params[configNr]; + + /* NOTE: RNDIS is *EXTREMELY* chatty ... Windows constantly polls for + * rx/tx statistics and link status, in addition to KEEPALIVE traffic + * and normal HC level polling to see if there's any IN traffic. + */ + + /* For USB: responses may take up to 10 seconds */ + switch (MsgType) { + case RNDIS_MSG_INIT: + pr_debug("%s: RNDIS_MSG_INIT\n", + __func__); + params->state = RNDIS_INITIALIZED; + return rndis_init_response(configNr, + (rndis_init_msg_type *)buf); + + case RNDIS_MSG_HALT: + pr_debug("%s: RNDIS_MSG_HALT\n", + __func__); + params->state = RNDIS_UNINITIALIZED; + if (params->dev) { + netif_carrier_off(params->dev); + netif_stop_queue(params->dev); + } + return 0; + + case RNDIS_MSG_QUERY: + return rndis_query_response(configNr, + (rndis_query_msg_type *)buf); + + case RNDIS_MSG_SET: + return rndis_set_response(configNr, + (rndis_set_msg_type *)buf); + + case RNDIS_MSG_RESET: + pr_debug("%s: RNDIS_MSG_RESET\n", + __func__); + return rndis_reset_response(configNr, + (rndis_reset_msg_type *)buf); + + case RNDIS_MSG_KEEPALIVE: + /* For USB: host does this every 5 seconds */ + if (rndis_debug > 1) + pr_debug("%s: RNDIS_MSG_KEEPALIVE\n", + __func__); + return rndis_keepalive_response(configNr, + (rndis_keepalive_msg_type *) + buf); + + default: + /* At least Windows XP emits some undefined RNDIS messages. + * In one case those messages seemed to relate to the host + * suspending itself. + */ + pr_warning("%s: unknown RNDIS message 0x%08X len %d\n", + __func__, MsgType, MsgLength); + print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET, + buf, MsgLength); + break; + } + + return -ENOTSUPP; +} +EXPORT_SYMBOL_GPL(rndis_msg_parser); + +int rndis_register(void (*resp_avail)(void *v), void *v) +{ + u8 i; + + if (!resp_avail) + return -EINVAL; + + for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { + if (!rndis_per_dev_params[i].used) { + rndis_per_dev_params[i].used = 1; + rndis_per_dev_params[i].resp_avail = resp_avail; + rndis_per_dev_params[i].v = v; + pr_debug("%s: configNr = %d\n", __func__, i); + return i; + } + } + pr_debug("failed\n"); + + return -ENODEV; +} +EXPORT_SYMBOL_GPL(rndis_register); + +void rndis_deregister(int configNr) +{ + pr_debug("%s:\n", __func__); + + if (configNr >= RNDIS_MAX_CONFIGS) return; + rndis_per_dev_params[configNr].used = 0; +} +EXPORT_SYMBOL_GPL(rndis_deregister); + +int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter) +{ + pr_debug("%s:\n", __func__); + if (!dev) + return -EINVAL; + if (configNr >= RNDIS_MAX_CONFIGS) return -1; + + rndis_per_dev_params[configNr].dev = dev; + rndis_per_dev_params[configNr].filter = cdc_filter; + + return 0; +} +EXPORT_SYMBOL_GPL(rndis_set_param_dev); + +int rndis_set_param_vendor(u8 configNr, u32 vendorID, const char *vendorDescr) +{ + pr_debug("%s:\n", __func__); + if (!vendorDescr) return -1; + if (configNr >= RNDIS_MAX_CONFIGS) return -1; + + rndis_per_dev_params[configNr].vendorID = vendorID; + rndis_per_dev_params[configNr].vendorDescr = vendorDescr; + + return 0; +} +EXPORT_SYMBOL_GPL(rndis_set_param_vendor); + +int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed) +{ + pr_debug("%s: %u %u\n", __func__, medium, speed); + if (configNr >= RNDIS_MAX_CONFIGS) return -1; + + rndis_per_dev_params[configNr].medium = medium; + rndis_per_dev_params[configNr].speed = speed; + + return 0; +} +EXPORT_SYMBOL_GPL(rndis_set_param_medium); + +void rndis_add_hdr(struct sk_buff *skb) +{ + struct rndis_packet_msg_type *header; + + if (!skb) + return; + header = (void *)skb_push(skb, sizeof(*header)); + memset(header, 0, sizeof *header); + header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET); + header->MessageLength = cpu_to_le32(skb->len); + header->DataOffset = cpu_to_le32(36); + header->DataLength = cpu_to_le32(skb->len - sizeof(*header)); +} +EXPORT_SYMBOL_GPL(rndis_add_hdr); + +void rndis_free_response(int configNr, u8 *buf) +{ + rndis_resp_t *r; + struct list_head *act, *tmp; + + list_for_each_safe(act, tmp, + &(rndis_per_dev_params[configNr].resp_queue)) + { + r = list_entry(act, rndis_resp_t, list); + if (r && r->buf == buf) { + list_del(&r->list); + kfree(r); + } + } +} +EXPORT_SYMBOL_GPL(rndis_free_response); + +u8 *rndis_get_next_response(int configNr, u32 *length) +{ + rndis_resp_t *r; + struct list_head *act, *tmp; + + if (!length) return NULL; + + list_for_each_safe(act, tmp, + &(rndis_per_dev_params[configNr].resp_queue)) + { + r = list_entry(act, rndis_resp_t, list); + if (!r->send) { + r->send = 1; + *length = r->length; + return r->buf; + } + } + + return NULL; +} +EXPORT_SYMBOL_GPL(rndis_get_next_response); + +static rndis_resp_t *rndis_add_response(int configNr, u32 length) +{ + rndis_resp_t *r; + + /* NOTE: this gets copied into ether.c USB_BUFSIZ bytes ... */ + r = kmalloc(sizeof(rndis_resp_t) + length, GFP_ATOMIC); + if (!r) return NULL; + + r->buf = (u8 *)(r + 1); + r->length = length; + r->send = 0; + + list_add_tail(&r->list, + &(rndis_per_dev_params[configNr].resp_queue)); + return r; +} + +int rndis_rm_hdr(struct gether *port, + struct sk_buff *skb, + struct sk_buff_head *list) +{ + /* tmp points to a struct rndis_packet_msg_type */ + __le32 *tmp = (void *)skb->data; + + /* MessageType, MessageLength */ + if (cpu_to_le32(RNDIS_MSG_PACKET) + != get_unaligned(tmp++)) { + dev_kfree_skb_any(skb); + return -EINVAL; + } + tmp++; + + /* DataOffset, DataLength */ + if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) { + dev_kfree_skb_any(skb); + return -EOVERFLOW; + } + skb_trim(skb, get_unaligned_le32(tmp++)); + + skb_queue_tail(list, skb); + return 0; +} +EXPORT_SYMBOL_GPL(rndis_rm_hdr); + +#ifdef CONFIG_USB_GADGET_DEBUG_FILES + +static int rndis_proc_show(struct seq_file *m, void *v) +{ + rndis_params *param = m->private; + + seq_printf(m, + "Config Nr. %d\n" + "used : %s\n" + "state : %s\n" + "medium : 0x%08X\n" + "speed : %d\n" + "cable : %s\n" + "vendor ID : 0x%08X\n" + "vendor : %s\n", + param->confignr, (param->used) ? "y" : "n", + ({ char *s = "?"; + switch (param->state) { + case RNDIS_UNINITIALIZED: + s = "RNDIS_UNINITIALIZED"; break; + case RNDIS_INITIALIZED: + s = "RNDIS_INITIALIZED"; break; + case RNDIS_DATA_INITIALIZED: + s = "RNDIS_DATA_INITIALIZED"; break; + } s; }), + param->medium, + (param->media_state) ? 0 : param->speed*100, + (param->media_state) ? "disconnected" : "connected", + param->vendorID, param->vendorDescr); + return 0; +} + +static ssize_t rndis_proc_write(struct file *file, const char __user *buffer, + size_t count, loff_t *ppos) +{ + rndis_params *p = PDE_DATA(file_inode(file)); + u32 speed = 0; + int i, fl_speed = 0; + + for (i = 0; i < count; i++) { + char c; + if (get_user(c, buffer)) + return -EFAULT; + switch (c) { + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + fl_speed = 1; + speed = speed * 10 + c - '0'; + break; + case 'C': + case 'c': + rndis_signal_connect(p->confignr); + break; + case 'D': + case 'd': + rndis_signal_disconnect(p->confignr); + break; + default: + if (fl_speed) p->speed = speed; + else pr_debug("%c is not valid\n", c); + break; + } + + buffer++; + } + + return count; +} + +static int rndis_proc_open(struct inode *inode, struct file *file) +{ + return single_open(file, rndis_proc_show, PDE_DATA(inode)); +} + +static const struct file_operations rndis_proc_fops = { + .owner = THIS_MODULE, + .open = rndis_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = rndis_proc_write, +}; + +#define NAME_TEMPLATE "driver/rndis-%03d" + +static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS]; + +#endif /* CONFIG_USB_GADGET_DEBUG_FILES */ + + +int rndis_init(void) +{ + u8 i; + + for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { +#ifdef CONFIG_USB_GADGET_DEBUG_FILES + char name [20]; + + sprintf(name, NAME_TEMPLATE, i); + rndis_connect_state[i] = proc_create_data(name, 0660, NULL, + &rndis_proc_fops, + (void *)(rndis_per_dev_params + i)); + if (!rndis_connect_state[i]) { + pr_debug("%s: remove entries", __func__); + while (i) { + sprintf(name, NAME_TEMPLATE, --i); + remove_proc_entry(name, NULL); + } + pr_debug("\n"); + return -EIO; + } +#endif + rndis_per_dev_params[i].confignr = i; + rndis_per_dev_params[i].used = 0; + rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED; + rndis_per_dev_params[i].media_state + = RNDIS_MEDIA_STATE_DISCONNECTED; + INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue)); + } + + return 0; +} + +void rndis_exit(void) +{ +#ifdef CONFIG_USB_GADGET_DEBUG_FILES + u8 i; + char name[20]; + + for (i = 0; i < RNDIS_MAX_CONFIGS; i++) { + sprintf(name, NAME_TEMPLATE, i); + remove_proc_entry(name, NULL); + } +#endif +} + diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h new file mode 100644 index 000000000..0f4abb4c3 --- /dev/null +++ b/drivers/usb/gadget/function/rndis.h @@ -0,0 +1,220 @@ +/* + * RNDIS Definitions for Remote NDIS + * + * Authors: Benedikt Spranger, Pengutronix + * Robert Schwebel, Pengutronix + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2, as published by the Free Software Foundation. + * + * This software was originally developed in conformance with + * Microsoft's Remote NDIS Specification License Agreement. + */ + +#ifndef _LINUX_RNDIS_H +#define _LINUX_RNDIS_H + +#include <linux/rndis.h> +#include "u_ether.h" +#include "ndis.h" + +#define RNDIS_MAXIMUM_FRAME_SIZE 1518 +#define RNDIS_MAX_TOTAL_SIZE 1558 + +typedef struct rndis_init_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; + __le32 MajorVersion; + __le32 MinorVersion; + __le32 MaxTransferSize; +} rndis_init_msg_type; + +typedef struct rndis_init_cmplt_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; + __le32 Status; + __le32 MajorVersion; + __le32 MinorVersion; + __le32 DeviceFlags; + __le32 Medium; + __le32 MaxPacketsPerTransfer; + __le32 MaxTransferSize; + __le32 PacketAlignmentFactor; + __le32 AFListOffset; + __le32 AFListSize; +} rndis_init_cmplt_type; + +typedef struct rndis_halt_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; +} rndis_halt_msg_type; + +typedef struct rndis_query_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; + __le32 OID; + __le32 InformationBufferLength; + __le32 InformationBufferOffset; + __le32 DeviceVcHandle; +} rndis_query_msg_type; + +typedef struct rndis_query_cmplt_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; + __le32 Status; + __le32 InformationBufferLength; + __le32 InformationBufferOffset; +} rndis_query_cmplt_type; + +typedef struct rndis_set_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; + __le32 OID; + __le32 InformationBufferLength; + __le32 InformationBufferOffset; + __le32 DeviceVcHandle; +} rndis_set_msg_type; + +typedef struct rndis_set_cmplt_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; + __le32 Status; +} rndis_set_cmplt_type; + +typedef struct rndis_reset_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 Reserved; +} rndis_reset_msg_type; + +typedef struct rndis_reset_cmplt_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 Status; + __le32 AddressingReset; +} rndis_reset_cmplt_type; + +typedef struct rndis_indicate_status_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 Status; + __le32 StatusBufferLength; + __le32 StatusBufferOffset; +} rndis_indicate_status_msg_type; + +typedef struct rndis_keepalive_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; +} rndis_keepalive_msg_type; + +typedef struct rndis_keepalive_cmplt_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 RequestID; + __le32 Status; +} rndis_keepalive_cmplt_type; + +struct rndis_packet_msg_type +{ + __le32 MessageType; + __le32 MessageLength; + __le32 DataOffset; + __le32 DataLength; + __le32 OOBDataOffset; + __le32 OOBDataLength; + __le32 NumOOBDataElements; + __le32 PerPacketInfoOffset; + __le32 PerPacketInfoLength; + __le32 VcHandle; + __le32 Reserved; +} __attribute__ ((packed)); + +struct rndis_config_parameter +{ + __le32 ParameterNameOffset; + __le32 ParameterNameLength; + __le32 ParameterType; + __le32 ParameterValueOffset; + __le32 ParameterValueLength; +}; + +/* implementation specific */ +enum rndis_state +{ + RNDIS_UNINITIALIZED, + RNDIS_INITIALIZED, + RNDIS_DATA_INITIALIZED, +}; + +typedef struct rndis_resp_t +{ + struct list_head list; + u8 *buf; + u32 length; + int send; +} rndis_resp_t; + +typedef struct rndis_params +{ + u8 confignr; + u8 used; + u16 saved_filter; + enum rndis_state state; + u32 medium; + u32 speed; + u32 media_state; + + const u8 *host_mac; + u16 *filter; + struct net_device *dev; + + u32 vendorID; + const char *vendorDescr; + void (*resp_avail)(void *v); + void *v; + struct list_head resp_queue; +} rndis_params; + +/* RNDIS Message parser and other useless functions */ +int rndis_msg_parser (u8 configNr, u8 *buf); +int rndis_register(void (*resp_avail)(void *v), void *v); +void rndis_deregister (int configNr); +int rndis_set_param_dev (u8 configNr, struct net_device *dev, + u16 *cdc_filter); +int rndis_set_param_vendor (u8 configNr, u32 vendorID, + const char *vendorDescr); +int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed); +void rndis_add_hdr (struct sk_buff *skb); +int rndis_rm_hdr(struct gether *port, struct sk_buff *skb, + struct sk_buff_head *list); +u8 *rndis_get_next_response (int configNr, u32 *length); +void rndis_free_response (int configNr, u8 *buf); + +void rndis_uninit (int configNr); +int rndis_signal_connect (int configNr); +int rndis_signal_disconnect (int configNr); +int rndis_state (int configNr); +extern void rndis_set_host_mac (int configNr, const u8 *addr); + +#endif /* _LINUX_RNDIS_H */ diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c new file mode 100644 index 000000000..648f9e489 --- /dev/null +++ b/drivers/usb/gadget/function/storage_common.c @@ -0,0 +1,504 @@ +/* + * storage_common.c -- Common definitions for mass storage functionality + * + * Copyright (C) 2003-2008 Alan Stern + * Copyeight (C) 2009 Samsung Electronics + * Author: Michal Nazarewicz (mina86@mina86.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* + * This file requires the following identifiers used in USB strings to + * be defined (each of type pointer to char): + * - fsg_string_interface -- name of the interface + */ + +/* + * When USB_GADGET_DEBUG_FILES is defined the module param num_buffers + * sets the number of pipeline buffers (length of the fsg_buffhd array). + * The valid range of num_buffers is: num >= 2 && num <= 4. + */ + +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/usb/composite.h> + +#include "storage_common.h" + +/* There is only one interface. */ + +struct usb_interface_descriptor fsg_intf_desc = { + .bLength = sizeof fsg_intf_desc, + .bDescriptorType = USB_DT_INTERFACE, + + .bNumEndpoints = 2, /* Adjusted during fsg_bind() */ + .bInterfaceClass = USB_CLASS_MASS_STORAGE, + .bInterfaceSubClass = USB_SC_SCSI, /* Adjusted during fsg_bind() */ + .bInterfaceProtocol = USB_PR_BULK, /* Adjusted during fsg_bind() */ + .iInterface = FSG_STRING_INTERFACE, +}; +EXPORT_SYMBOL_GPL(fsg_intf_desc); + +/* + * Three full-speed endpoint descriptors: bulk-in, bulk-out, and + * interrupt-in. + */ + +struct usb_endpoint_descriptor fsg_fs_bulk_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_IN, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + /* wMaxPacketSize set by autoconfiguration */ +}; +EXPORT_SYMBOL_GPL(fsg_fs_bulk_in_desc); + +struct usb_endpoint_descriptor fsg_fs_bulk_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + .bEndpointAddress = USB_DIR_OUT, + .bmAttributes = USB_ENDPOINT_XFER_BULK, + /* wMaxPacketSize set by autoconfiguration */ +}; +EXPORT_SYMBOL_GPL(fsg_fs_bulk_out_desc); + +struct usb_descriptor_header *fsg_fs_function[] = { + (struct usb_descriptor_header *) &fsg_intf_desc, + (struct usb_descriptor_header *) &fsg_fs_bulk_in_desc, + (struct usb_descriptor_header *) &fsg_fs_bulk_out_desc, + NULL, +}; +EXPORT_SYMBOL_GPL(fsg_fs_function); + + +/* + * USB 2.0 devices need to expose both high speed and full speed + * descriptors, unless they only run at full speed. + * + * That means alternate endpoint descriptors (bigger packets) + * and a "device qualifier" ... plus more construction options + * for the configuration descriptor. + */ +struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */ + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), +}; +EXPORT_SYMBOL_GPL(fsg_hs_bulk_in_desc); + +struct usb_endpoint_descriptor fsg_hs_bulk_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */ + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(512), + .bInterval = 1, /* NAK every 1 uframe */ +}; +EXPORT_SYMBOL_GPL(fsg_hs_bulk_out_desc); + + +struct usb_descriptor_header *fsg_hs_function[] = { + (struct usb_descriptor_header *) &fsg_intf_desc, + (struct usb_descriptor_header *) &fsg_hs_bulk_in_desc, + (struct usb_descriptor_header *) &fsg_hs_bulk_out_desc, + NULL, +}; +EXPORT_SYMBOL_GPL(fsg_hs_function); + +struct usb_endpoint_descriptor fsg_ss_bulk_in_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */ + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; +EXPORT_SYMBOL_GPL(fsg_ss_bulk_in_desc); + +struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = { + .bLength = sizeof(fsg_ss_bulk_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /*.bMaxBurst = DYNAMIC, */ +}; +EXPORT_SYMBOL_GPL(fsg_ss_bulk_in_comp_desc); + +struct usb_endpoint_descriptor fsg_ss_bulk_out_desc = { + .bLength = USB_DT_ENDPOINT_SIZE, + .bDescriptorType = USB_DT_ENDPOINT, + + /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */ + .bmAttributes = USB_ENDPOINT_XFER_BULK, + .wMaxPacketSize = cpu_to_le16(1024), +}; +EXPORT_SYMBOL_GPL(fsg_ss_bulk_out_desc); + +struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = { + .bLength = sizeof(fsg_ss_bulk_in_comp_desc), + .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, + + /*.bMaxBurst = DYNAMIC, */ +}; +EXPORT_SYMBOL_GPL(fsg_ss_bulk_out_comp_desc); + +struct usb_descriptor_header *fsg_ss_function[] = { + (struct usb_descriptor_header *) &fsg_intf_desc, + (struct usb_descriptor_header *) &fsg_ss_bulk_in_desc, + (struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc, + (struct usb_descriptor_header *) &fsg_ss_bulk_out_desc, + (struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc, + NULL, +}; +EXPORT_SYMBOL_GPL(fsg_ss_function); + + + /*-------------------------------------------------------------------------*/ + +/* + * If the next two routines are called while the gadget is registered, + * the caller must own fsg->filesem for writing. + */ + +void fsg_lun_close(struct fsg_lun *curlun) +{ + if (curlun->filp) { + LDBG(curlun, "close backing file\n"); + fput(curlun->filp); + curlun->filp = NULL; + } +} +EXPORT_SYMBOL_GPL(fsg_lun_close); + +int fsg_lun_open(struct fsg_lun *curlun, const char *filename) +{ + int ro; + struct file *filp = NULL; + int rc = -EINVAL; + struct inode *inode = NULL; + loff_t size; + loff_t num_sectors; + loff_t min_sectors; + unsigned int blkbits; + unsigned int blksize; + + /* R/W if we can, R/O if we must */ + ro = curlun->initially_ro; + if (!ro) { + filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0); + if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES) + ro = 1; + } + if (ro) + filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0); + if (IS_ERR(filp)) { + LINFO(curlun, "unable to open backing file: %s\n", filename); + return PTR_ERR(filp); + } + + if (!(filp->f_mode & FMODE_WRITE)) + ro = 1; + + inode = file_inode(filp); + if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { + LINFO(curlun, "invalid file type: %s\n", filename); + goto out; + } + + /* + * If we can't read the file, it's no good. + * If we can't write the file, use it read-only. + */ + if (!(filp->f_mode & FMODE_CAN_READ)) { + LINFO(curlun, "file not readable: %s\n", filename); + goto out; + } + if (!(filp->f_mode & FMODE_CAN_WRITE)) + ro = 1; + + size = i_size_read(inode->i_mapping->host); + if (size < 0) { + LINFO(curlun, "unable to find file size: %s\n", filename); + rc = (int) size; + goto out; + } + + if (curlun->cdrom) { + blksize = 2048; + blkbits = 11; + } else if (inode->i_bdev) { + blksize = bdev_logical_block_size(inode->i_bdev); + blkbits = blksize_bits(blksize); + } else { + blksize = 512; + blkbits = 9; + } + + num_sectors = size >> blkbits; /* File size in logic-block-size blocks */ + min_sectors = 1; + if (curlun->cdrom) { + min_sectors = 300; /* Smallest track is 300 frames */ + if (num_sectors >= 256*60*75) { + num_sectors = 256*60*75 - 1; + LINFO(curlun, "file too big: %s\n", filename); + LINFO(curlun, "using only first %d blocks\n", + (int) num_sectors); + } + } + if (num_sectors < min_sectors) { + LINFO(curlun, "file too small: %s\n", filename); + rc = -ETOOSMALL; + goto out; + } + + if (fsg_lun_is_open(curlun)) + fsg_lun_close(curlun); + + curlun->blksize = blksize; + curlun->blkbits = blkbits; + curlun->ro = ro; + curlun->filp = filp; + curlun->file_length = size; + curlun->num_sectors = num_sectors; + LDBG(curlun, "open backing file: %s\n", filename); + return 0; + +out: + fput(filp); + return rc; +} +EXPORT_SYMBOL_GPL(fsg_lun_open); + + +/*-------------------------------------------------------------------------*/ + +/* + * Sync the file data, don't bother with the metadata. + * This code was copied from fs/buffer.c:sys_fdatasync(). + */ +int fsg_lun_fsync_sub(struct fsg_lun *curlun) +{ + struct file *filp = curlun->filp; + + if (curlun->ro || !filp) + return 0; + return vfs_fsync(filp, 1); +} +EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub); + +void store_cdrom_address(u8 *dest, int msf, u32 addr) +{ + if (msf) { + /* Convert to Minutes-Seconds-Frames */ + addr >>= 2; /* Convert to 2048-byte frames */ + addr += 2*75; /* Lead-in occupies 2 seconds */ + dest[3] = addr % 75; /* Frames */ + addr /= 75; + dest[2] = addr % 60; /* Seconds */ + addr /= 60; + dest[1] = addr; /* Minutes */ + dest[0] = 0; /* Reserved */ + } else { + /* Absolute sector */ + put_unaligned_be32(addr, dest); + } +} +EXPORT_SYMBOL_GPL(store_cdrom_address); + +/*-------------------------------------------------------------------------*/ + + +ssize_t fsg_show_ro(struct fsg_lun *curlun, char *buf) +{ + return sprintf(buf, "%d\n", fsg_lun_is_open(curlun) + ? curlun->ro + : curlun->initially_ro); +} +EXPORT_SYMBOL_GPL(fsg_show_ro); + +ssize_t fsg_show_nofua(struct fsg_lun *curlun, char *buf) +{ + return sprintf(buf, "%u\n", curlun->nofua); +} +EXPORT_SYMBOL_GPL(fsg_show_nofua); + +ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem, + char *buf) +{ + char *p; + ssize_t rc; + + down_read(filesem); + if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */ + p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1); + if (IS_ERR(p)) + rc = PTR_ERR(p); + else { + rc = strlen(p); + memmove(buf, p, rc); + buf[rc] = '\n'; /* Add a newline */ + buf[++rc] = 0; + } + } else { /* No file, return 0 bytes */ + *buf = 0; + rc = 0; + } + up_read(filesem); + return rc; +} +EXPORT_SYMBOL_GPL(fsg_show_file); + +ssize_t fsg_show_cdrom(struct fsg_lun *curlun, char *buf) +{ + return sprintf(buf, "%u\n", curlun->cdrom); +} +EXPORT_SYMBOL_GPL(fsg_show_cdrom); + +ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf) +{ + return sprintf(buf, "%u\n", curlun->removable); +} +EXPORT_SYMBOL_GPL(fsg_show_removable); + +/* + * The caller must hold fsg->filesem for reading when calling this function. + */ +static ssize_t _fsg_store_ro(struct fsg_lun *curlun, bool ro) +{ + if (fsg_lun_is_open(curlun)) { + LDBG(curlun, "read-only status change prevented\n"); + return -EBUSY; + } + + curlun->ro = ro; + curlun->initially_ro = ro; + LDBG(curlun, "read-only status set to %d\n", curlun->ro); + + return 0; +} + +ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem, + const char *buf, size_t count) +{ + ssize_t rc; + bool ro; + + rc = strtobool(buf, &ro); + if (rc) + return rc; + + /* + * Allow the write-enable status to change only while the + * backing file is closed. + */ + down_read(filesem); + rc = _fsg_store_ro(curlun, ro); + if (!rc) + rc = count; + up_read(filesem); + + return rc; +} +EXPORT_SYMBOL_GPL(fsg_store_ro); + +ssize_t fsg_store_nofua(struct fsg_lun *curlun, const char *buf, size_t count) +{ + bool nofua; + int ret; + + ret = strtobool(buf, &nofua); + if (ret) + return ret; + + /* Sync data when switching from async mode to sync */ + if (!nofua && curlun->nofua) + fsg_lun_fsync_sub(curlun); + + curlun->nofua = nofua; + + return count; +} +EXPORT_SYMBOL_GPL(fsg_store_nofua); + +ssize_t fsg_store_file(struct fsg_lun *curlun, struct rw_semaphore *filesem, + const char *buf, size_t count) +{ + int rc = 0; + + if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) { + LDBG(curlun, "eject attempt prevented\n"); + return -EBUSY; /* "Door is locked" */ + } + + /* Remove a trailing newline */ + if (count > 0 && buf[count-1] == '\n') + ((char *) buf)[count-1] = 0; /* Ugh! */ + + /* Load new medium */ + down_write(filesem); + if (count > 0 && buf[0]) { + /* fsg_lun_open() will close existing file if any. */ + rc = fsg_lun_open(curlun, buf); + if (rc == 0) + curlun->unit_attention_data = + SS_NOT_READY_TO_READY_TRANSITION; + } else if (fsg_lun_is_open(curlun)) { + fsg_lun_close(curlun); + curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; + } + up_write(filesem); + return (rc < 0 ? rc : count); +} +EXPORT_SYMBOL_GPL(fsg_store_file); + +ssize_t fsg_store_cdrom(struct fsg_lun *curlun, struct rw_semaphore *filesem, + const char *buf, size_t count) +{ + bool cdrom; + int ret; + + ret = strtobool(buf, &cdrom); + if (ret) + return ret; + + down_read(filesem); + ret = cdrom ? _fsg_store_ro(curlun, true) : 0; + + if (!ret) { + curlun->cdrom = cdrom; + ret = count; + } + up_read(filesem); + + return ret; +} +EXPORT_SYMBOL_GPL(fsg_store_cdrom); + +ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf, + size_t count) +{ + bool removable; + int ret; + + ret = strtobool(buf, &removable); + if (ret) + return ret; + + curlun->removable = removable; + + return count; +} +EXPORT_SYMBOL_GPL(fsg_store_removable); + +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/storage_common.h b/drivers/usb/gadget/function/storage_common.h new file mode 100644 index 000000000..70c891469 --- /dev/null +++ b/drivers/usb/gadget/function/storage_common.h @@ -0,0 +1,225 @@ +#ifndef USB_STORAGE_COMMON_H +#define USB_STORAGE_COMMON_H + +#include <linux/device.h> +#include <linux/usb/storage.h> +#include <scsi/scsi.h> +#include <asm/unaligned.h> + +#ifndef DEBUG +#undef VERBOSE_DEBUG +#undef DUMP_MSGS +#endif /* !DEBUG */ + +#ifdef VERBOSE_DEBUG +#define VLDBG LDBG +#else +#define VLDBG(lun, fmt, args...) do { } while (0) +#endif /* VERBOSE_DEBUG */ + +#define _LMSG(func, lun, fmt, args...) \ + do { \ + if ((lun)->name_pfx && *(lun)->name_pfx) \ + func("%s/%s: " fmt, *(lun)->name_pfx, \ + (lun)->name, ## args); \ + else \ + func("%s: " fmt, (lun)->name, ## args); \ + } while (0) + +#define LDBG(lun, fmt, args...) _LMSG(pr_debug, lun, fmt, ## args) +#define LERROR(lun, fmt, args...) _LMSG(pr_err, lun, fmt, ## args) +#define LWARN(lun, fmt, args...) _LMSG(pr_warn, lun, fmt, ## args) +#define LINFO(lun, fmt, args...) _LMSG(pr_info, lun, fmt, ## args) + + +#ifdef DUMP_MSGS + +# define dump_msg(fsg, /* const char * */ label, \ + /* const u8 * */ buf, /* unsigned */ length) \ +do { \ + if (length < 512) { \ + DBG(fsg, "%s, length %u:\n", label, length); \ + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \ + 16, 1, buf, length, 0); \ + } \ +} while (0) + +# define dump_cdb(fsg) do { } while (0) + +#else + +# define dump_msg(fsg, /* const char * */ label, \ + /* const u8 * */ buf, /* unsigned */ length) do { } while (0) + +# ifdef VERBOSE_DEBUG + +# define dump_cdb(fsg) \ + print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, \ + 16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0) \ + +# else + +# define dump_cdb(fsg) do { } while (0) + +# endif /* VERBOSE_DEBUG */ + +#endif /* DUMP_MSGS */ + +/* Length of a SCSI Command Data Block */ +#define MAX_COMMAND_SIZE 16 + +/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */ +#define SS_NO_SENSE 0 +#define SS_COMMUNICATION_FAILURE 0x040800 +#define SS_INVALID_COMMAND 0x052000 +#define SS_INVALID_FIELD_IN_CDB 0x052400 +#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100 +#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500 +#define SS_MEDIUM_NOT_PRESENT 0x023a00 +#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302 +#define SS_NOT_READY_TO_READY_TRANSITION 0x062800 +#define SS_RESET_OCCURRED 0x062900 +#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900 +#define SS_UNRECOVERED_READ_ERROR 0x031100 +#define SS_WRITE_ERROR 0x030c02 +#define SS_WRITE_PROTECTED 0x072700 + +#define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */ +#define ASC(x) ((u8) ((x) >> 8)) +#define ASCQ(x) ((u8) (x)) + +struct fsg_lun { + struct file *filp; + loff_t file_length; + loff_t num_sectors; + + unsigned int initially_ro:1; + unsigned int ro:1; + unsigned int removable:1; + unsigned int cdrom:1; + unsigned int prevent_medium_removal:1; + unsigned int registered:1; + unsigned int info_valid:1; + unsigned int nofua:1; + + u32 sense_data; + u32 sense_data_info; + u32 unit_attention_data; + + unsigned int blkbits; /* Bits of logical block size + of bound block device */ + unsigned int blksize; /* logical block size of bound block device */ + struct device dev; + const char *name; /* "lun.name" */ + const char **name_pfx; /* "function.name" */ +}; + +static inline bool fsg_lun_is_open(struct fsg_lun *curlun) +{ + return curlun->filp != NULL; +} + +/* Default size of buffer length. */ +#define FSG_BUFLEN ((u32)16384) + +/* Maximal number of LUNs supported in mass storage function */ +#define FSG_MAX_LUNS 8 + +enum fsg_buffer_state { + BUF_STATE_EMPTY = 0, + BUF_STATE_FULL, + BUF_STATE_BUSY +}; + +struct fsg_buffhd { + void *buf; + enum fsg_buffer_state state; + struct fsg_buffhd *next; + + /* + * The NetChip 2280 is faster, and handles some protocol faults + * better, if we don't submit any short bulk-out read requests. + * So we will record the intended request length here. + */ + unsigned int bulk_out_intended_length; + + struct usb_request *inreq; + int inreq_busy; + struct usb_request *outreq; + int outreq_busy; +}; + +enum fsg_state { + /* This one isn't used anywhere */ + FSG_STATE_COMMAND_PHASE = -10, + FSG_STATE_DATA_PHASE, + FSG_STATE_STATUS_PHASE, + + FSG_STATE_IDLE = 0, + FSG_STATE_ABORT_BULK_OUT, + FSG_STATE_RESET, + FSG_STATE_INTERFACE_CHANGE, + FSG_STATE_CONFIG_CHANGE, + FSG_STATE_DISCONNECT, + FSG_STATE_EXIT, + FSG_STATE_TERMINATED +}; + +enum data_direction { + DATA_DIR_UNKNOWN = 0, + DATA_DIR_FROM_HOST, + DATA_DIR_TO_HOST, + DATA_DIR_NONE +}; + +static inline u32 get_unaligned_be24(u8 *buf) +{ + return 0xffffff & (u32) get_unaligned_be32(buf - 1); +} + +static inline struct fsg_lun *fsg_lun_from_dev(struct device *dev) +{ + return container_of(dev, struct fsg_lun, dev); +} + +enum { + FSG_STRING_INTERFACE +}; + +extern struct usb_interface_descriptor fsg_intf_desc; + +extern struct usb_endpoint_descriptor fsg_fs_bulk_in_desc; +extern struct usb_endpoint_descriptor fsg_fs_bulk_out_desc; +extern struct usb_descriptor_header *fsg_fs_function[]; + +extern struct usb_endpoint_descriptor fsg_hs_bulk_in_desc; +extern struct usb_endpoint_descriptor fsg_hs_bulk_out_desc; +extern struct usb_descriptor_header *fsg_hs_function[]; + +extern struct usb_endpoint_descriptor fsg_ss_bulk_in_desc; +extern struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc; +extern struct usb_endpoint_descriptor fsg_ss_bulk_out_desc; +extern struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc; +extern struct usb_descriptor_header *fsg_ss_function[]; + +void fsg_lun_close(struct fsg_lun *curlun); +int fsg_lun_open(struct fsg_lun *curlun, const char *filename); +int fsg_lun_fsync_sub(struct fsg_lun *curlun); +void store_cdrom_address(u8 *dest, int msf, u32 addr); +ssize_t fsg_show_ro(struct fsg_lun *curlun, char *buf); +ssize_t fsg_show_nofua(struct fsg_lun *curlun, char *buf); +ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem, + char *buf); +ssize_t fsg_show_cdrom(struct fsg_lun *curlun, char *buf); +ssize_t fsg_show_removable(struct fsg_lun *curlun, char *buf); +ssize_t fsg_store_ro(struct fsg_lun *curlun, struct rw_semaphore *filesem, + const char *buf, size_t count); +ssize_t fsg_store_nofua(struct fsg_lun *curlun, const char *buf, size_t count); +ssize_t fsg_store_file(struct fsg_lun *curlun, struct rw_semaphore *filesem, + const char *buf, size_t count); +ssize_t fsg_store_cdrom(struct fsg_lun *curlun, struct rw_semaphore *filesem, + const char *buf, size_t count); +ssize_t fsg_store_removable(struct fsg_lun *curlun, const char *buf, + size_t count); + +#endif /* USB_STORAGE_COMMON_H */ diff --git a/drivers/usb/gadget/function/u_ecm.h b/drivers/usb/gadget/function/u_ecm.h new file mode 100644 index 000000000..262cc03cc --- /dev/null +++ b/drivers/usb/gadget/function/u_ecm.h @@ -0,0 +1,36 @@ +/* + * u_ecm.h + * + * Utility definitions for the ecm function + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_ECM_H +#define U_ECM_H + +#include <linux/usb/composite.h> + +struct f_ecm_opts { + struct usb_function_instance func_inst; + struct net_device *net; + bool bound; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +#endif /* U_ECM_H */ diff --git a/drivers/usb/gadget/function/u_eem.h b/drivers/usb/gadget/function/u_eem.h new file mode 100644 index 000000000..e3ae97874 --- /dev/null +++ b/drivers/usb/gadget/function/u_eem.h @@ -0,0 +1,36 @@ +/* + * u_eem.h + * + * Utility definitions for the eem function + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_EEM_H +#define U_EEM_H + +#include <linux/usb/composite.h> + +struct f_eem_opts { + struct usb_function_instance func_inst; + struct net_device *net; + bool bound; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +#endif /* U_EEM_H */ diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c new file mode 100644 index 000000000..f1fd777ef --- /dev/null +++ b/drivers/usb/gadget/function/u_ether.c @@ -0,0 +1,1177 @@ +/* + * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/gfp.h> +#include <linux/device.h> +#include <linux/ctype.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> + +#include "u_ether.h" + + +/* + * This component encapsulates the Ethernet link glue needed to provide + * one (!) network link through the USB gadget stack, normally "usb0". + * + * The control and data models are handled by the function driver which + * connects to this code; such as CDC Ethernet (ECM or EEM), + * "CDC Subset", or RNDIS. That includes all descriptor and endpoint + * management. + * + * Link level addressing is handled by this component using module + * parameters; if no such parameters are provided, random link level + * addresses are used. Each end of the link uses one address. The + * host end address is exported in various ways, and is often recorded + * in configuration databases. + * + * The driver which assembles each configuration using such a link is + * responsible for ensuring that each configuration includes at most one + * instance of is network link. (The network layer provides ways for + * this single "physical" link to be used by multiple virtual links.) + */ + +#define UETH__VERSION "29-May-2008" + +struct eth_dev { + /* lock is held while accessing port_usb + */ + spinlock_t lock; + struct gether *port_usb; + + struct net_device *net; + struct usb_gadget *gadget; + + spinlock_t req_lock; /* guard {rx,tx}_reqs */ + struct list_head tx_reqs, rx_reqs; + atomic_t tx_qlen; + + struct sk_buff_head rx_frames; + + unsigned qmult; + + unsigned header_len; + struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb); + int (*unwrap)(struct gether *, + struct sk_buff *skb, + struct sk_buff_head *list); + + struct work_struct work; + + unsigned long todo; +#define WORK_RX_MEMORY 0 + + bool zlp; + u8 host_mac[ETH_ALEN]; + u8 dev_mac[ETH_ALEN]; +}; + +/*-------------------------------------------------------------------------*/ + +#define RX_EXTRA 20 /* bytes guarding against rx overflows */ + +#define DEFAULT_QLEN 2 /* double buffering by default */ + +/* for dual-speed hardware, use deeper queues at high/super speed */ +static inline int qlen(struct usb_gadget *gadget, unsigned qmult) +{ + if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || + gadget->speed == USB_SPEED_SUPER)) + return qmult * DEFAULT_QLEN; + else + return DEFAULT_QLEN; +} + +/*-------------------------------------------------------------------------*/ + +/* REVISIT there must be a better way than having two sets + * of debug calls ... + */ + +#undef DBG +#undef VDBG +#undef ERROR +#undef INFO + +#define xprintk(d, level, fmt, args...) \ + printk(level "%s: " fmt , (d)->net->name , ## args) + +#ifdef DEBUG +#undef DEBUG +#define DBG(dev, fmt, args...) \ + xprintk(dev , KERN_DEBUG , fmt , ## args) +#else +#define DBG(dev, fmt, args...) \ + do { } while (0) +#endif /* DEBUG */ + +#ifdef VERBOSE_DEBUG +#define VDBG DBG +#else +#define VDBG(dev, fmt, args...) \ + do { } while (0) +#endif /* DEBUG */ + +#define ERROR(dev, fmt, args...) \ + xprintk(dev , KERN_ERR , fmt , ## args) +#define INFO(dev, fmt, args...) \ + xprintk(dev , KERN_INFO , fmt , ## args) + +/*-------------------------------------------------------------------------*/ + +/* NETWORK DRIVER HOOKUP (to the layer above this driver) */ + +static int ueth_change_mtu(struct net_device *net, int new_mtu) +{ + struct eth_dev *dev = netdev_priv(net); + unsigned long flags; + int status = 0; + + /* don't change MTU on "live" link (peer won't know) */ + spin_lock_irqsave(&dev->lock, flags); + if (dev->port_usb) + status = -EBUSY; + else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN) + status = -ERANGE; + else + net->mtu = new_mtu; + spin_unlock_irqrestore(&dev->lock, flags); + + return status; +} + +static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p) +{ + struct eth_dev *dev = netdev_priv(net); + + strlcpy(p->driver, "g_ether", sizeof(p->driver)); + strlcpy(p->version, UETH__VERSION, sizeof(p->version)); + strlcpy(p->fw_version, dev->gadget->name, sizeof(p->fw_version)); + strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof(p->bus_info)); +} + +/* REVISIT can also support: + * - WOL (by tracking suspends and issuing remote wakeup) + * - msglevel (implies updated messaging) + * - ... probably more ethtool ops + */ + +static const struct ethtool_ops ops = { + .get_drvinfo = eth_get_drvinfo, + .get_link = ethtool_op_get_link, +}; + +static void defer_kevent(struct eth_dev *dev, int flag) +{ + if (test_and_set_bit(flag, &dev->todo)) + return; + if (!schedule_work(&dev->work)) + ERROR(dev, "kevent %d may have been dropped\n", flag); + else + DBG(dev, "kevent %d scheduled\n", flag); +} + +static void rx_complete(struct usb_ep *ep, struct usb_request *req); + +static int +rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags) +{ + struct sk_buff *skb; + int retval = -ENOMEM; + size_t size = 0; + struct usb_ep *out; + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->port_usb) + out = dev->port_usb->out_ep; + else + out = NULL; + spin_unlock_irqrestore(&dev->lock, flags); + + if (!out) + return -ENOTCONN; + + + /* Padding up to RX_EXTRA handles minor disagreements with host. + * Normally we use the USB "terminate on short read" convention; + * so allow up to (N*maxpacket), since that memory is normally + * already allocated. Some hardware doesn't deal well with short + * reads (e.g. DMA must be N*maxpacket), so for now don't trim a + * byte off the end (to force hardware errors on overflow). + * + * RNDIS uses internal framing, and explicitly allows senders to + * pad to end-of-packet. That's potentially nice for speed, but + * means receivers can't recover lost synch on their own (because + * new packets don't only start after a short RX). + */ + size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA; + size += dev->port_usb->header_len; + size += out->maxpacket - 1; + size -= size % out->maxpacket; + + if (dev->port_usb->is_fixed) + size = max_t(size_t, size, dev->port_usb->fixed_out_len); + + skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags); + if (skb == NULL) { + DBG(dev, "no rx skb\n"); + goto enomem; + } + + /* Some platforms perform better when IP packets are aligned, + * but on at least one, checksumming fails otherwise. Note: + * RNDIS headers involve variable numbers of LE32 values. + */ + skb_reserve(skb, NET_IP_ALIGN); + + req->buf = skb->data; + req->length = size; + req->complete = rx_complete; + req->context = skb; + + retval = usb_ep_queue(out, req, gfp_flags); + if (retval == -ENOMEM) +enomem: + defer_kevent(dev, WORK_RX_MEMORY); + if (retval) { + DBG(dev, "rx submit --> %d\n", retval); + if (skb) + dev_kfree_skb_any(skb); + spin_lock_irqsave(&dev->req_lock, flags); + list_add(&req->list, &dev->rx_reqs); + spin_unlock_irqrestore(&dev->req_lock, flags); + } + return retval; +} + +static void rx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct sk_buff *skb = req->context, *skb2; + struct eth_dev *dev = ep->driver_data; + int status = req->status; + + switch (status) { + + /* normal completion */ + case 0: + skb_put(skb, req->actual); + + if (dev->unwrap) { + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->port_usb) { + status = dev->unwrap(dev->port_usb, + skb, + &dev->rx_frames); + } else { + dev_kfree_skb_any(skb); + status = -ENOTCONN; + } + spin_unlock_irqrestore(&dev->lock, flags); + } else { + skb_queue_tail(&dev->rx_frames, skb); + } + skb = NULL; + + skb2 = skb_dequeue(&dev->rx_frames); + while (skb2) { + if (status < 0 + || ETH_HLEN > skb2->len + || skb2->len > VLAN_ETH_FRAME_LEN) { + dev->net->stats.rx_errors++; + dev->net->stats.rx_length_errors++; + DBG(dev, "rx length %d\n", skb2->len); + dev_kfree_skb_any(skb2); + goto next_frame; + } + skb2->protocol = eth_type_trans(skb2, dev->net); + dev->net->stats.rx_packets++; + dev->net->stats.rx_bytes += skb2->len; + + /* no buffer copies needed, unless hardware can't + * use skb buffers. + */ + status = netif_rx(skb2); +next_frame: + skb2 = skb_dequeue(&dev->rx_frames); + } + break; + + /* software-driven interface shutdown */ + case -ECONNRESET: /* unlink */ + case -ESHUTDOWN: /* disconnect etc */ + VDBG(dev, "rx shutdown, code %d\n", status); + goto quiesce; + + /* for hardware automagic (such as pxa) */ + case -ECONNABORTED: /* endpoint reset */ + DBG(dev, "rx %s reset\n", ep->name); + defer_kevent(dev, WORK_RX_MEMORY); +quiesce: + dev_kfree_skb_any(skb); + goto clean; + + /* data overrun */ + case -EOVERFLOW: + dev->net->stats.rx_over_errors++; + /* FALLTHROUGH */ + + default: + dev->net->stats.rx_errors++; + DBG(dev, "rx status %d\n", status); + break; + } + + if (skb) + dev_kfree_skb_any(skb); + if (!netif_running(dev->net)) { +clean: + spin_lock(&dev->req_lock); + list_add(&req->list, &dev->rx_reqs); + spin_unlock(&dev->req_lock); + req = NULL; + } + if (req) + rx_submit(dev, req, GFP_ATOMIC); +} + +static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n) +{ + unsigned i; + struct usb_request *req; + + if (!n) + return -ENOMEM; + + /* queue/recycle up to N requests */ + i = n; + list_for_each_entry(req, list, list) { + if (i-- == 0) + goto extra; + } + while (i--) { + req = usb_ep_alloc_request(ep, GFP_ATOMIC); + if (!req) + return list_empty(list) ? -ENOMEM : 0; + list_add(&req->list, list); + } + return 0; + +extra: + /* free extras */ + for (;;) { + struct list_head *next; + + next = req->list.next; + list_del(&req->list); + usb_ep_free_request(ep, req); + + if (next == list) + break; + + req = container_of(next, struct usb_request, list); + } + return 0; +} + +static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n) +{ + int status; + + spin_lock(&dev->req_lock); + status = prealloc(&dev->tx_reqs, link->in_ep, n); + if (status < 0) + goto fail; + status = prealloc(&dev->rx_reqs, link->out_ep, n); + if (status < 0) + goto fail; + goto done; +fail: + DBG(dev, "can't alloc requests\n"); +done: + spin_unlock(&dev->req_lock); + return status; +} + +static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags) +{ + struct usb_request *req; + unsigned long flags; + + /* fill unused rxq slots with some skb */ + spin_lock_irqsave(&dev->req_lock, flags); + while (!list_empty(&dev->rx_reqs)) { + req = container_of(dev->rx_reqs.next, + struct usb_request, list); + list_del_init(&req->list); + spin_unlock_irqrestore(&dev->req_lock, flags); + + if (rx_submit(dev, req, gfp_flags) < 0) { + defer_kevent(dev, WORK_RX_MEMORY); + return; + } + + spin_lock_irqsave(&dev->req_lock, flags); + } + spin_unlock_irqrestore(&dev->req_lock, flags); +} + +static void eth_work(struct work_struct *work) +{ + struct eth_dev *dev = container_of(work, struct eth_dev, work); + + if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) { + if (netif_running(dev->net)) + rx_fill(dev, GFP_KERNEL); + } + + if (dev->todo) + DBG(dev, "work done, flags = 0x%lx\n", dev->todo); +} + +static void tx_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct sk_buff *skb = req->context; + struct eth_dev *dev = ep->driver_data; + + switch (req->status) { + default: + dev->net->stats.tx_errors++; + VDBG(dev, "tx err %d\n", req->status); + /* FALLTHROUGH */ + case -ECONNRESET: /* unlink */ + case -ESHUTDOWN: /* disconnect etc */ + break; + case 0: + dev->net->stats.tx_bytes += skb->len; + } + dev->net->stats.tx_packets++; + + spin_lock(&dev->req_lock); + list_add(&req->list, &dev->tx_reqs); + spin_unlock(&dev->req_lock); + dev_kfree_skb_any(skb); + + atomic_dec(&dev->tx_qlen); + if (netif_carrier_ok(dev->net)) + netif_wake_queue(dev->net); +} + +static inline int is_promisc(u16 cdc_filter) +{ + return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS; +} + +static netdev_tx_t eth_start_xmit(struct sk_buff *skb, + struct net_device *net) +{ + struct eth_dev *dev = netdev_priv(net); + int length = 0; + int retval; + struct usb_request *req = NULL; + unsigned long flags; + struct usb_ep *in; + u16 cdc_filter; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->port_usb) { + in = dev->port_usb->in_ep; + cdc_filter = dev->port_usb->cdc_filter; + } else { + in = NULL; + cdc_filter = 0; + } + spin_unlock_irqrestore(&dev->lock, flags); + + if (skb && !in) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* apply outgoing CDC or RNDIS filters */ + if (skb && !is_promisc(cdc_filter)) { + u8 *dest = skb->data; + + if (is_multicast_ether_addr(dest)) { + u16 type; + + /* ignores USB_CDC_PACKET_TYPE_MULTICAST and host + * SET_ETHERNET_MULTICAST_FILTERS requests + */ + if (is_broadcast_ether_addr(dest)) + type = USB_CDC_PACKET_TYPE_BROADCAST; + else + type = USB_CDC_PACKET_TYPE_ALL_MULTICAST; + if (!(cdc_filter & type)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } + /* ignores USB_CDC_PACKET_TYPE_DIRECTED */ + } + + spin_lock_irqsave(&dev->req_lock, flags); + /* + * this freelist can be empty if an interrupt triggered disconnect() + * and reconfigured the gadget (shutting down this queue) after the + * network stack decided to xmit but before we got the spinlock. + */ + if (list_empty(&dev->tx_reqs)) { + spin_unlock_irqrestore(&dev->req_lock, flags); + return NETDEV_TX_BUSY; + } + + req = container_of(dev->tx_reqs.next, struct usb_request, list); + list_del(&req->list); + + /* temporarily stop TX queue when the freelist empties */ + if (list_empty(&dev->tx_reqs)) + netif_stop_queue(net); + spin_unlock_irqrestore(&dev->req_lock, flags); + + /* no buffer copies needed, unless the network stack did it + * or the hardware can't use skb buffers. + * or there's not enough space for extra headers we need + */ + if (dev->wrap) { + unsigned long flags; + + spin_lock_irqsave(&dev->lock, flags); + if (dev->port_usb) + skb = dev->wrap(dev->port_usb, skb); + spin_unlock_irqrestore(&dev->lock, flags); + if (!skb) { + /* Multi frame CDC protocols may store the frame for + * later which is not a dropped frame. + */ + if (dev->port_usb->supports_multi_frame) + goto multiframe; + goto drop; + } + } + + length = skb->len; + req->buf = skb->data; + req->context = skb; + req->complete = tx_complete; + + /* NCM requires no zlp if transfer is dwNtbInMaxSize */ + if (dev->port_usb->is_fixed && + length == dev->port_usb->fixed_in_len && + (length % in->maxpacket) == 0) + req->zero = 0; + else + req->zero = 1; + + /* use zlp framing on tx for strict CDC-Ether conformance, + * though any robust network rx path ignores extra padding. + * and some hardware doesn't like to write zlps. + */ + if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) + length++; + + req->length = length; + + /* throttle high/super speed IRQ rate back slightly */ + if (gadget_is_dualspeed(dev->gadget)) + req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH || + dev->gadget->speed == USB_SPEED_SUPER) + ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0) + : 0; + + retval = usb_ep_queue(in, req, GFP_ATOMIC); + switch (retval) { + default: + DBG(dev, "tx queue err %d\n", retval); + break; + case 0: + net->trans_start = jiffies; + atomic_inc(&dev->tx_qlen); + } + + if (retval) { + dev_kfree_skb_any(skb); +drop: + dev->net->stats.tx_dropped++; +multiframe: + spin_lock_irqsave(&dev->req_lock, flags); + if (list_empty(&dev->tx_reqs)) + netif_start_queue(net); + list_add(&req->list, &dev->tx_reqs); + spin_unlock_irqrestore(&dev->req_lock, flags); + } + return NETDEV_TX_OK; +} + +/*-------------------------------------------------------------------------*/ + +static void eth_start(struct eth_dev *dev, gfp_t gfp_flags) +{ + DBG(dev, "%s\n", __func__); + + /* fill the rx queue */ + rx_fill(dev, gfp_flags); + + /* and open the tx floodgates */ + atomic_set(&dev->tx_qlen, 0); + netif_wake_queue(dev->net); +} + +static int eth_open(struct net_device *net) +{ + struct eth_dev *dev = netdev_priv(net); + struct gether *link; + + DBG(dev, "%s\n", __func__); + if (netif_carrier_ok(dev->net)) + eth_start(dev, GFP_KERNEL); + + spin_lock_irq(&dev->lock); + link = dev->port_usb; + if (link && link->open) + link->open(link); + spin_unlock_irq(&dev->lock); + + return 0; +} + +static int eth_stop(struct net_device *net) +{ + struct eth_dev *dev = netdev_priv(net); + unsigned long flags; + + VDBG(dev, "%s\n", __func__); + netif_stop_queue(net); + + DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n", + dev->net->stats.rx_packets, dev->net->stats.tx_packets, + dev->net->stats.rx_errors, dev->net->stats.tx_errors + ); + + /* ensure there are no more active requests */ + spin_lock_irqsave(&dev->lock, flags); + if (dev->port_usb) { + struct gether *link = dev->port_usb; + const struct usb_endpoint_descriptor *in; + const struct usb_endpoint_descriptor *out; + + if (link->close) + link->close(link); + + /* NOTE: we have no abort-queue primitive we could use + * to cancel all pending I/O. Instead, we disable then + * reenable the endpoints ... this idiom may leave toggle + * wrong, but that's a self-correcting error. + * + * REVISIT: we *COULD* just let the transfers complete at + * their own pace; the network stack can handle old packets. + * For the moment we leave this here, since it works. + */ + in = link->in_ep->desc; + out = link->out_ep->desc; + usb_ep_disable(link->in_ep); + usb_ep_disable(link->out_ep); + if (netif_carrier_ok(net)) { + DBG(dev, "host still using in/out endpoints\n"); + link->in_ep->desc = in; + link->out_ep->desc = out; + usb_ep_enable(link->in_ep); + usb_ep_enable(link->out_ep); + } + } + spin_unlock_irqrestore(&dev->lock, flags); + + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static int get_ether_addr(const char *str, u8 *dev_addr) +{ + if (str) { + unsigned i; + + for (i = 0; i < 6; i++) { + unsigned char num; + + if ((*str == '.') || (*str == ':')) + str++; + num = hex_to_bin(*str++) << 4; + num |= hex_to_bin(*str++); + dev_addr [i] = num; + } + if (is_valid_ether_addr(dev_addr)) + return 0; + } + eth_random_addr(dev_addr); + return 1; +} + +static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len) +{ + if (len < 18) + return -EINVAL; + + snprintf(str, len, "%pM", dev_addr); + return 18; +} + +static const struct net_device_ops eth_netdev_ops = { + .ndo_open = eth_open, + .ndo_stop = eth_stop, + .ndo_start_xmit = eth_start_xmit, + .ndo_change_mtu = ueth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static struct device_type gadget_type = { + .name = "gadget", +}; + +/** + * gether_setup_name - initialize one ethernet-over-usb link + * @g: gadget to associated with these links + * @ethaddr: NULL, or a buffer in which the ethernet address of the + * host side of the link is recorded + * @netname: name for network device (for example, "usb") + * Context: may sleep + * + * This sets up the single network link that may be exported by a + * gadget driver using this framework. The link layer addresses are + * set up using module parameters. + * + * Returns an eth_dev pointer on success, or an ERR_PTR on failure. + */ +struct eth_dev *gether_setup_name(struct usb_gadget *g, + const char *dev_addr, const char *host_addr, + u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname) +{ + struct eth_dev *dev; + struct net_device *net; + int status; + + net = alloc_etherdev(sizeof *dev); + if (!net) + return ERR_PTR(-ENOMEM); + + dev = netdev_priv(net); + spin_lock_init(&dev->lock); + spin_lock_init(&dev->req_lock); + INIT_WORK(&dev->work, eth_work); + INIT_LIST_HEAD(&dev->tx_reqs); + INIT_LIST_HEAD(&dev->rx_reqs); + + skb_queue_head_init(&dev->rx_frames); + + /* network device setup */ + dev->net = net; + dev->qmult = qmult; + snprintf(net->name, sizeof(net->name), "%s%%d", netname); + + if (get_ether_addr(dev_addr, net->dev_addr)) + dev_warn(&g->dev, + "using random %s ethernet address\n", "self"); + if (get_ether_addr(host_addr, dev->host_mac)) + dev_warn(&g->dev, + "using random %s ethernet address\n", "host"); + + if (ethaddr) + memcpy(ethaddr, dev->host_mac, ETH_ALEN); + + net->netdev_ops = ð_netdev_ops; + + net->ethtool_ops = &ops; + + dev->gadget = g; + SET_NETDEV_DEV(net, &g->dev); + SET_NETDEV_DEVTYPE(net, &gadget_type); + + status = register_netdev(net); + if (status < 0) { + dev_dbg(&g->dev, "register_netdev failed, %d\n", status); + free_netdev(net); + dev = ERR_PTR(status); + } else { + INFO(dev, "MAC %pM\n", net->dev_addr); + INFO(dev, "HOST MAC %pM\n", dev->host_mac); + + /* + * two kinds of host-initiated state changes: + * - iff DATA transfer is active, carrier is "on" + * - tx queueing enabled if open *and* carrier is "on" + */ + netif_carrier_off(net); + } + + return dev; +} +EXPORT_SYMBOL_GPL(gether_setup_name); + +struct net_device *gether_setup_name_default(const char *netname) +{ + struct net_device *net; + struct eth_dev *dev; + + net = alloc_etherdev(sizeof(*dev)); + if (!net) + return ERR_PTR(-ENOMEM); + + dev = netdev_priv(net); + spin_lock_init(&dev->lock); + spin_lock_init(&dev->req_lock); + INIT_WORK(&dev->work, eth_work); + INIT_LIST_HEAD(&dev->tx_reqs); + INIT_LIST_HEAD(&dev->rx_reqs); + + skb_queue_head_init(&dev->rx_frames); + + /* network device setup */ + dev->net = net; + dev->qmult = QMULT_DEFAULT; + snprintf(net->name, sizeof(net->name), "%s%%d", netname); + + eth_random_addr(dev->dev_mac); + pr_warn("using random %s ethernet address\n", "self"); + eth_random_addr(dev->host_mac); + pr_warn("using random %s ethernet address\n", "host"); + + net->netdev_ops = ð_netdev_ops; + + net->ethtool_ops = &ops; + SET_NETDEV_DEVTYPE(net, &gadget_type); + + return net; +} +EXPORT_SYMBOL_GPL(gether_setup_name_default); + +int gether_register_netdev(struct net_device *net) +{ + struct eth_dev *dev; + struct usb_gadget *g; + struct sockaddr sa; + int status; + + if (!net->dev.parent) + return -EINVAL; + dev = netdev_priv(net); + g = dev->gadget; + status = register_netdev(net); + if (status < 0) { + dev_dbg(&g->dev, "register_netdev failed, %d\n", status); + return status; + } else { + INFO(dev, "HOST MAC %pM\n", dev->host_mac); + + /* two kinds of host-initiated state changes: + * - iff DATA transfer is active, carrier is "on" + * - tx queueing enabled if open *and* carrier is "on" + */ + netif_carrier_off(net); + } + sa.sa_family = net->type; + memcpy(sa.sa_data, dev->dev_mac, ETH_ALEN); + rtnl_lock(); + status = dev_set_mac_address(net, &sa); + rtnl_unlock(); + if (status) + pr_warn("cannot set self ethernet address: %d\n", status); + else + INFO(dev, "MAC %pM\n", dev->dev_mac); + + return status; +} +EXPORT_SYMBOL_GPL(gether_register_netdev); + +void gether_set_gadget(struct net_device *net, struct usb_gadget *g) +{ + struct eth_dev *dev; + + dev = netdev_priv(net); + dev->gadget = g; + SET_NETDEV_DEV(net, &g->dev); +} +EXPORT_SYMBOL_GPL(gether_set_gadget); + +int gether_set_dev_addr(struct net_device *net, const char *dev_addr) +{ + struct eth_dev *dev; + u8 new_addr[ETH_ALEN]; + + dev = netdev_priv(net); + if (get_ether_addr(dev_addr, new_addr)) + return -EINVAL; + memcpy(dev->dev_mac, new_addr, ETH_ALEN); + return 0; +} +EXPORT_SYMBOL_GPL(gether_set_dev_addr); + +int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len) +{ + struct eth_dev *dev; + + dev = netdev_priv(net); + return get_ether_addr_str(dev->dev_mac, dev_addr, len); +} +EXPORT_SYMBOL_GPL(gether_get_dev_addr); + +int gether_set_host_addr(struct net_device *net, const char *host_addr) +{ + struct eth_dev *dev; + u8 new_addr[ETH_ALEN]; + + dev = netdev_priv(net); + if (get_ether_addr(host_addr, new_addr)) + return -EINVAL; + memcpy(dev->host_mac, new_addr, ETH_ALEN); + return 0; +} +EXPORT_SYMBOL_GPL(gether_set_host_addr); + +int gether_get_host_addr(struct net_device *net, char *host_addr, int len) +{ + struct eth_dev *dev; + + dev = netdev_priv(net); + return get_ether_addr_str(dev->host_mac, host_addr, len); +} +EXPORT_SYMBOL_GPL(gether_get_host_addr); + +int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len) +{ + struct eth_dev *dev; + + if (len < 13) + return -EINVAL; + + dev = netdev_priv(net); + snprintf(host_addr, len, "%pm", dev->host_mac); + + return strlen(host_addr); +} +EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc); + +void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]) +{ + struct eth_dev *dev; + + dev = netdev_priv(net); + memcpy(host_mac, dev->host_mac, ETH_ALEN); +} +EXPORT_SYMBOL_GPL(gether_get_host_addr_u8); + +void gether_set_qmult(struct net_device *net, unsigned qmult) +{ + struct eth_dev *dev; + + dev = netdev_priv(net); + dev->qmult = qmult; +} +EXPORT_SYMBOL_GPL(gether_set_qmult); + +unsigned gether_get_qmult(struct net_device *net) +{ + struct eth_dev *dev; + + dev = netdev_priv(net); + return dev->qmult; +} +EXPORT_SYMBOL_GPL(gether_get_qmult); + +int gether_get_ifname(struct net_device *net, char *name, int len) +{ + rtnl_lock(); + strlcpy(name, netdev_name(net), len); + rtnl_unlock(); + return strlen(name); +} +EXPORT_SYMBOL_GPL(gether_get_ifname); + +/** + * gether_cleanup - remove Ethernet-over-USB device + * Context: may sleep + * + * This is called to free all resources allocated by @gether_setup(). + */ +void gether_cleanup(struct eth_dev *dev) +{ + if (!dev) + return; + + unregister_netdev(dev->net); + flush_work(&dev->work); + free_netdev(dev->net); +} +EXPORT_SYMBOL_GPL(gether_cleanup); + +/** + * gether_connect - notify network layer that USB link is active + * @link: the USB link, set up with endpoints, descriptors matching + * current device speed, and any framing wrapper(s) set up. + * Context: irqs blocked + * + * This is called to activate endpoints and let the network layer know + * the connection is active ("carrier detect"). It may cause the I/O + * queues to open and start letting network packets flow, but will in + * any case activate the endpoints so that they respond properly to the + * USB host. + * + * Verify net_device pointer returned using IS_ERR(). If it doesn't + * indicate some error code (negative errno), ep->driver_data values + * have been overwritten. + */ +struct net_device *gether_connect(struct gether *link) +{ + struct eth_dev *dev = link->ioport; + int result = 0; + + if (!dev) + return ERR_PTR(-EINVAL); + + link->in_ep->driver_data = dev; + result = usb_ep_enable(link->in_ep); + if (result != 0) { + DBG(dev, "enable %s --> %d\n", + link->in_ep->name, result); + goto fail0; + } + + link->out_ep->driver_data = dev; + result = usb_ep_enable(link->out_ep); + if (result != 0) { + DBG(dev, "enable %s --> %d\n", + link->out_ep->name, result); + goto fail1; + } + + if (result == 0) + result = alloc_requests(dev, link, qlen(dev->gadget, + dev->qmult)); + + if (result == 0) { + dev->zlp = link->is_zlp_ok; + DBG(dev, "qlen %d\n", qlen(dev->gadget, dev->qmult)); + + dev->header_len = link->header_len; + dev->unwrap = link->unwrap; + dev->wrap = link->wrap; + + spin_lock(&dev->lock); + dev->port_usb = link; + if (netif_running(dev->net)) { + if (link->open) + link->open(link); + } else { + if (link->close) + link->close(link); + } + spin_unlock(&dev->lock); + + netif_carrier_on(dev->net); + if (netif_running(dev->net)) + eth_start(dev, GFP_ATOMIC); + + /* on error, disable any endpoints */ + } else { + (void) usb_ep_disable(link->out_ep); +fail1: + (void) usb_ep_disable(link->in_ep); + } +fail0: + /* caller is responsible for cleanup on error */ + if (result < 0) + return ERR_PTR(result); + return dev->net; +} +EXPORT_SYMBOL_GPL(gether_connect); + +/** + * gether_disconnect - notify network layer that USB link is inactive + * @link: the USB link, on which gether_connect() was called + * Context: irqs blocked + * + * This is called to deactivate endpoints and let the network layer know + * the connection went inactive ("no carrier"). + * + * On return, the state is as if gether_connect() had never been called. + * The endpoints are inactive, and accordingly without active USB I/O. + * Pointers to endpoint descriptors and endpoint private data are nulled. + */ +void gether_disconnect(struct gether *link) +{ + struct eth_dev *dev = link->ioport; + struct usb_request *req; + + WARN_ON(!dev); + if (!dev) + return; + + DBG(dev, "%s\n", __func__); + + netif_stop_queue(dev->net); + netif_carrier_off(dev->net); + + /* disable endpoints, forcing (synchronous) completion + * of all pending i/o. then free the request objects + * and forget about the endpoints. + */ + usb_ep_disable(link->in_ep); + spin_lock(&dev->req_lock); + while (!list_empty(&dev->tx_reqs)) { + req = container_of(dev->tx_reqs.next, + struct usb_request, list); + list_del(&req->list); + + spin_unlock(&dev->req_lock); + usb_ep_free_request(link->in_ep, req); + spin_lock(&dev->req_lock); + } + spin_unlock(&dev->req_lock); + link->in_ep->driver_data = NULL; + link->in_ep->desc = NULL; + + usb_ep_disable(link->out_ep); + spin_lock(&dev->req_lock); + while (!list_empty(&dev->rx_reqs)) { + req = container_of(dev->rx_reqs.next, + struct usb_request, list); + list_del(&req->list); + + spin_unlock(&dev->req_lock); + usb_ep_free_request(link->out_ep, req); + spin_lock(&dev->req_lock); + } + spin_unlock(&dev->req_lock); + link->out_ep->driver_data = NULL; + link->out_ep->desc = NULL; + + /* finish forgetting about this USB link episode */ + dev->header_len = 0; + dev->unwrap = NULL; + dev->wrap = NULL; + + spin_lock(&dev->lock); + dev->port_usb = NULL; + spin_unlock(&dev->lock); +} +EXPORT_SYMBOL_GPL(gether_disconnect); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Brownell"); diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h new file mode 100644 index 000000000..334b38947 --- /dev/null +++ b/drivers/usb/gadget/function/u_ether.h @@ -0,0 +1,272 @@ +/* + * u_ether.h -- interface to USB gadget "ethernet link" utilities + * + * Copyright (C) 2003-2005,2008 David Brownell + * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger + * Copyright (C) 2008 Nokia Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __U_ETHER_H +#define __U_ETHER_H + +#include <linux/err.h> +#include <linux/if_ether.h> +#include <linux/usb/composite.h> +#include <linux/usb/cdc.h> +#include <linux/netdevice.h> + +#include "gadget_chips.h" + +#define QMULT_DEFAULT 5 + +/* + * dev_addr: initial value + * changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" + * host_addr: this address is invisible to ifconfig + */ +#define USB_ETHERNET_MODULE_PARAMETERS() \ + static unsigned qmult = QMULT_DEFAULT; \ + module_param(qmult, uint, S_IRUGO|S_IWUSR); \ + MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");\ + \ + static char *dev_addr; \ + module_param(dev_addr, charp, S_IRUGO); \ + MODULE_PARM_DESC(dev_addr, "Device Ethernet Address"); \ + \ + static char *host_addr; \ + module_param(host_addr, charp, S_IRUGO); \ + MODULE_PARM_DESC(host_addr, "Host Ethernet Address") + +struct eth_dev; + +/* + * This represents the USB side of an "ethernet" link, managed by a USB + * function which provides control and (maybe) framing. Two functions + * in different configurations could share the same ethernet link/netdev, + * using different host interaction models. + * + * There is a current limitation that only one instance of this link may + * be present in any given configuration. When that's a problem, network + * layer facilities can be used to package multiple logical links on this + * single "physical" one. + */ +struct gether { + struct usb_function func; + + /* updated by gether_{connect,disconnect} */ + struct eth_dev *ioport; + + /* endpoints handle full and/or high speeds */ + struct usb_ep *in_ep; + struct usb_ep *out_ep; + + bool is_zlp_ok; + + u16 cdc_filter; + + /* hooks for added framing, as needed for RNDIS and EEM. */ + u32 header_len; + /* NCM requires fixed size bundles */ + bool is_fixed; + u32 fixed_out_len; + u32 fixed_in_len; + bool supports_multi_frame; + struct sk_buff *(*wrap)(struct gether *port, + struct sk_buff *skb); + int (*unwrap)(struct gether *port, + struct sk_buff *skb, + struct sk_buff_head *list); + + /* called on network open/close */ + void (*open)(struct gether *); + void (*close)(struct gether *); +}; + +#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ + |USB_CDC_PACKET_TYPE_ALL_MULTICAST \ + |USB_CDC_PACKET_TYPE_PROMISCUOUS \ + |USB_CDC_PACKET_TYPE_DIRECTED) + +/* variant of gether_setup that allows customizing network device name */ +struct eth_dev *gether_setup_name(struct usb_gadget *g, + const char *dev_addr, const char *host_addr, + u8 ethaddr[ETH_ALEN], unsigned qmult, const char *netname); + +/* netdev setup/teardown as directed by the gadget driver */ +/* gether_setup - initialize one ethernet-over-usb link + * @g: gadget to associated with these links + * @ethaddr: NULL, or a buffer in which the ethernet address of the + * host side of the link is recorded + * Context: may sleep + * + * This sets up the single network link that may be exported by a + * gadget driver using this framework. The link layer addresses are + * set up using module parameters. + * + * Returns a eth_dev pointer on success, or an ERR_PTR on failure + */ +static inline struct eth_dev *gether_setup(struct usb_gadget *g, + const char *dev_addr, const char *host_addr, + u8 ethaddr[ETH_ALEN], unsigned qmult) +{ + return gether_setup_name(g, dev_addr, host_addr, ethaddr, qmult, "usb"); +} + +/* + * variant of gether_setup_default that allows customizing + * network device name + */ +struct net_device *gether_setup_name_default(const char *netname); + +/* + * gether_register_netdev - register the net device + * @net: net device to register + * + * Registers the net device associated with this ethernet-over-usb link + * + */ +int gether_register_netdev(struct net_device *net); + +/* gether_setup_default - initialize one ethernet-over-usb link + * Context: may sleep + * + * This sets up the single network link that may be exported by a + * gadget driver using this framework. The link layer addresses + * are set to random values. + * + * Returns negative errno, or zero on success + */ +static inline struct net_device *gether_setup_default(void) +{ + return gether_setup_name_default("usb"); +} + +/** + * gether_set_gadget - initialize one ethernet-over-usb link with a gadget + * @net: device representing this link + * @g: the gadget to initialize with + * + * This associates one ethernet-over-usb link with a gadget. + */ +void gether_set_gadget(struct net_device *net, struct usb_gadget *g); + +/** + * gether_set_dev_addr - initialize an ethernet-over-usb link with eth address + * @net: device representing this link + * @dev_addr: eth address of this device + * + * This sets the device-side Ethernet address of this ethernet-over-usb link + * if dev_addr is correct. + * Returns negative errno if the new address is incorrect. + */ +int gether_set_dev_addr(struct net_device *net, const char *dev_addr); + +/** + * gether_get_dev_addr - get an ethernet-over-usb link eth address + * @net: device representing this link + * @dev_addr: place to store device's eth address + * @len: length of the @dev_addr buffer + * + * This gets the device-side Ethernet address of this ethernet-over-usb link. + * Returns zero on success, else negative errno. + */ +int gether_get_dev_addr(struct net_device *net, char *dev_addr, int len); + +/** + * gether_set_host_addr - initialize an ethernet-over-usb link with host address + * @net: device representing this link + * @host_addr: eth address of the host + * + * This sets the host-side Ethernet address of this ethernet-over-usb link + * if host_addr is correct. + * Returns negative errno if the new address is incorrect. + */ +int gether_set_host_addr(struct net_device *net, const char *host_addr); + +/** + * gether_get_host_addr - get an ethernet-over-usb link host address + * @net: device representing this link + * @host_addr: place to store eth address of the host + * @len: length of the @host_addr buffer + * + * This gets the host-side Ethernet address of this ethernet-over-usb link. + * Returns zero on success, else negative errno. + */ +int gether_get_host_addr(struct net_device *net, char *host_addr, int len); + +/** + * gether_get_host_addr_cdc - get an ethernet-over-usb link host address + * @net: device representing this link + * @host_addr: place to store eth address of the host + * @len: length of the @host_addr buffer + * + * This gets the CDC formatted host-side Ethernet address of this + * ethernet-over-usb link. + * Returns zero on success, else negative errno. + */ +int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len); + +/** + * gether_get_host_addr_u8 - get an ethernet-over-usb link host address + * @net: device representing this link + * @host_mac: place to store the eth address of the host + * + * This gets the binary formatted host-side Ethernet address of this + * ethernet-over-usb link. + */ +void gether_get_host_addr_u8(struct net_device *net, u8 host_mac[ETH_ALEN]); + +/** + * gether_set_qmult - initialize an ethernet-over-usb link with a multiplier + * @net: device representing this link + * @qmult: queue multiplier + * + * This sets the queue length multiplier of this ethernet-over-usb link. + * For higher speeds use longer queues. + */ +void gether_set_qmult(struct net_device *net, unsigned qmult); + +/** + * gether_get_qmult - get an ethernet-over-usb link multiplier + * @net: device representing this link + * + * This gets the queue length multiplier of this ethernet-over-usb link. + */ +unsigned gether_get_qmult(struct net_device *net); + +/** + * gether_get_ifname - get an ethernet-over-usb link interface name + * @net: device representing this link + * @name: place to store the interface name + * @len: length of the @name buffer + * + * This gets the interface name of this ethernet-over-usb link. + * Returns zero on success, else negative errno. + */ +int gether_get_ifname(struct net_device *net, char *name, int len); + +void gether_cleanup(struct eth_dev *dev); + +/* connect/disconnect is handled by individual functions */ +struct net_device *gether_connect(struct gether *); +void gether_disconnect(struct gether *); + +/* Some controllers can't support CDC Ethernet (ECM) ... */ +static inline bool can_support_ecm(struct usb_gadget *gadget) +{ + if (!gadget_supports_altsettings(gadget)) + return false; + + /* Everything else is *presumably* fine ... but this is a bit + * chancy, so be **CERTAIN** there are no hardware issues with + * your controller. Add it above if it can't handle CDC. + */ + return true; +} + +#endif /* __U_ETHER_H */ diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h new file mode 100644 index 000000000..bcbd30146 --- /dev/null +++ b/drivers/usb/gadget/function/u_ether_configfs.h @@ -0,0 +1,164 @@ +/* + * u_ether_configfs.h + * + * Utility definitions for configfs support in USB Ethernet functions + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __U_ETHER_CONFIGFS_H +#define __U_ETHER_CONFIGFS_H + +#define USB_ETHERNET_CONFIGFS_ITEM(_f_) \ + CONFIGFS_ATTR_STRUCT(f_##_f_##_opts); \ + CONFIGFS_ATTR_OPS(f_##_f_##_opts); \ + \ + static void _f_##_attr_release(struct config_item *item) \ + { \ + struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \ + \ + usb_put_function_instance(&opts->func_inst); \ + } \ + \ + static struct configfs_item_operations _f_##_item_ops = { \ + .release = _f_##_attr_release, \ + .show_attribute = f_##_f_##_opts_attr_show, \ + .store_attribute = f_##_f_##_opts_attr_store, \ + } + +#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(_f_) \ + static ssize_t _f_##_opts_dev_addr_show(struct f_##_f_##_opts *opts, \ + char *page) \ + { \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ + } \ + \ + static ssize_t _f_##_opts_dev_addr_store(struct f_##_f_##_opts *opts, \ + const char *page, size_t len)\ + { \ + int ret; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + mutex_unlock(&opts->lock); \ + return -EBUSY; \ + } \ + \ + ret = gether_set_dev_addr(opts->net, page); \ + mutex_unlock(&opts->lock); \ + if (!ret) \ + ret = len; \ + return ret; \ + } \ + \ + static struct f_##_f_##_opts_attribute f_##_f_##_opts_dev_addr = \ + __CONFIGFS_ATTR(dev_addr, S_IRUGO | S_IWUSR, \ + _f_##_opts_dev_addr_show, \ + _f_##_opts_dev_addr_store) + +#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(_f_) \ + static ssize_t _f_##_opts_host_addr_show(struct f_##_f_##_opts *opts, \ + char *page) \ + { \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ + } \ + \ + static ssize_t _f_##_opts_host_addr_store(struct f_##_f_##_opts *opts, \ + const char *page, size_t len)\ + { \ + int ret; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + mutex_unlock(&opts->lock); \ + return -EBUSY; \ + } \ + \ + ret = gether_set_host_addr(opts->net, page); \ + mutex_unlock(&opts->lock); \ + if (!ret) \ + ret = len; \ + return ret; \ + } \ + \ + static struct f_##_f_##_opts_attribute f_##_f_##_opts_host_addr = \ + __CONFIGFS_ATTR(host_addr, S_IRUGO | S_IWUSR, \ + _f_##_opts_host_addr_show, \ + _f_##_opts_host_addr_store) + +#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(_f_) \ + static ssize_t _f_##_opts_qmult_show(struct f_##_f_##_opts *opts, \ + char *page) \ + { \ + unsigned qmult; \ + \ + mutex_lock(&opts->lock); \ + qmult = gether_get_qmult(opts->net); \ + mutex_unlock(&opts->lock); \ + return sprintf(page, "%d", qmult); \ + } \ + \ + static ssize_t _f_##_opts_qmult_store(struct f_##_f_##_opts *opts, \ + const char *page, size_t len)\ + { \ + u8 val; \ + int ret; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto out; \ + } \ + \ + ret = kstrtou8(page, 0, &val); \ + if (ret) \ + goto out; \ + \ + gether_set_qmult(opts->net, val); \ + ret = len; \ +out: \ + mutex_unlock(&opts->lock); \ + return ret; \ + } \ + \ + static struct f_##_f_##_opts_attribute f_##_f_##_opts_qmult = \ + __CONFIGFS_ATTR(qmult, S_IRUGO | S_IWUSR, \ + _f_##_opts_qmult_show, \ + _f_##_opts_qmult_store) + +#define USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(_f_) \ + static ssize_t _f_##_opts_ifname_show(struct f_##_f_##_opts *opts, \ + char *page) \ + { \ + int ret; \ + \ + mutex_lock(&opts->lock); \ + ret = gether_get_ifname(opts->net, page, PAGE_SIZE); \ + mutex_unlock(&opts->lock); \ + \ + return ret; \ + } \ + \ + static struct f_##_f_##_opts_attribute f_##_f_##_opts_ifname = \ + __CONFIGFS_ATTR_RO(ifname, _f_##_opts_ifname_show) + +#endif /* __U_ETHER_CONFIGFS_H */ diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h new file mode 100644 index 000000000..60139854e --- /dev/null +++ b/drivers/usb/gadget/function/u_fs.h @@ -0,0 +1,299 @@ +/* + * u_fs.h + * + * Utility definitions for the FunctionFS + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_FFS_H +#define U_FFS_H + +#include <linux/usb/composite.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> + +#ifdef VERBOSE_DEBUG +#ifndef pr_vdebug +# define pr_vdebug pr_debug +#endif /* pr_vdebug */ +# define ffs_dump_mem(prefix, ptr, len) \ + print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len) +#else +#ifndef pr_vdebug +# define pr_vdebug(...) do { } while (0) +#endif /* pr_vdebug */ +# define ffs_dump_mem(prefix, ptr, len) do { } while (0) +#endif /* VERBOSE_DEBUG */ + +#define ENTER() pr_vdebug("%s()\n", __func__) + +struct f_fs_opts; + +struct ffs_dev { + const char *name; + bool name_allocated; + bool mounted; + bool desc_ready; + bool single; + struct ffs_data *ffs_data; + struct f_fs_opts *opts; + struct list_head entry; + + int (*ffs_ready_callback)(struct ffs_data *ffs); + void (*ffs_closed_callback)(struct ffs_data *ffs); + void *(*ffs_acquire_dev_callback)(struct ffs_dev *dev); + void (*ffs_release_dev_callback)(struct ffs_dev *dev); +}; + +extern struct mutex ffs_lock; + +static inline void ffs_dev_lock(void) +{ + mutex_lock(&ffs_lock); +} + +static inline void ffs_dev_unlock(void) +{ + mutex_unlock(&ffs_lock); +} + +int ffs_name_dev(struct ffs_dev *dev, const char *name); +int ffs_single_dev(struct ffs_dev *dev); + +struct ffs_epfile; +struct ffs_function; + +enum ffs_state { + /* + * Waiting for descriptors and strings. + * + * In this state no open(2), read(2) or write(2) on epfiles + * may succeed (which should not be the problem as there + * should be no such files opened in the first place). + */ + FFS_READ_DESCRIPTORS, + FFS_READ_STRINGS, + + /* + * We've got descriptors and strings. We are or have called + * functionfs_ready_callback(). functionfs_bind() may have + * been called but we don't know. + * + * This is the only state in which operations on epfiles may + * succeed. + */ + FFS_ACTIVE, + + /* + * Function is visible to host, but it's not functional. All + * setup requests are stalled and transfers on another endpoints + * are refused. All epfiles, except ep0, are deleted so there + * is no way to perform any operations on them. + * + * This state is set after closing all functionfs files, when + * mount parameter "no_disconnect=1" has been set. Function will + * remain in deactivated state until filesystem is umounted or + * ep0 is opened again. In the second case functionfs state will + * be reset, and it will be ready for descriptors and strings + * writing. + * + * This is useful only when functionfs is composed to gadget + * with another function which can perform some critical + * operations, and it's strongly desired to have this operations + * completed, even after functionfs files closure. + */ + FFS_DEACTIVATED, + + /* + * All endpoints have been closed. This state is also set if + * we encounter an unrecoverable error. The only + * unrecoverable error is situation when after reading strings + * from user space we fail to initialise epfiles or + * functionfs_ready_callback() returns with error (<0). + * + * In this state no open(2), read(2) or write(2) (both on ep0 + * as well as epfile) may succeed (at this point epfiles are + * unlinked and all closed so this is not a problem; ep0 is + * also closed but ep0 file exists and so open(2) on ep0 must + * fail). + */ + FFS_CLOSING +}; + +enum ffs_setup_state { + /* There is no setup request pending. */ + FFS_NO_SETUP, + /* + * User has read events and there was a setup request event + * there. The next read/write on ep0 will handle the + * request. + */ + FFS_SETUP_PENDING, + /* + * There was event pending but before user space handled it + * some other event was introduced which canceled existing + * setup. If this state is set read/write on ep0 return + * -EIDRM. This state is only set when adding event. + */ + FFS_SETUP_CANCELLED +}; + +struct ffs_data { + struct usb_gadget *gadget; + + /* + * Protect access read/write operations, only one read/write + * at a time. As a consequence protects ep0req and company. + * While setup request is being processed (queued) this is + * held. + */ + struct mutex mutex; + + /* + * Protect access to endpoint related structures (basically + * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for + * endpoint zero. + */ + spinlock_t eps_lock; + + /* + * XXX REVISIT do we need our own request? Since we are not + * handling setup requests immediately user space may be so + * slow that another setup will be sent to the gadget but this + * time not to us but another function and then there could be + * a race. Is that the case? Or maybe we can use cdev->req + * after all, maybe we just need some spinlock for that? + */ + struct usb_request *ep0req; /* P: mutex */ + struct completion ep0req_completion; /* P: mutex */ + + /* reference counter */ + atomic_t ref; + /* how many files are opened (EP0 and others) */ + atomic_t opened; + + /* EP0 state */ + enum ffs_state state; + + /* + * Possible transitions: + * + FFS_NO_SETUP -> FFS_SETUP_PENDING -- P: ev.waitq.lock + * happens only in ep0 read which is P: mutex + * + FFS_SETUP_PENDING -> FFS_NO_SETUP -- P: ev.waitq.lock + * happens only in ep0 i/o which is P: mutex + * + FFS_SETUP_PENDING -> FFS_SETUP_CANCELLED -- P: ev.waitq.lock + * + FFS_SETUP_CANCELLED -> FFS_NO_SETUP -- cmpxchg + * + * This field should never be accessed directly and instead + * ffs_setup_state_clear_cancelled function should be used. + */ + enum ffs_setup_state setup_state; + + /* Events & such. */ + struct { + u8 types[4]; + unsigned short count; + /* XXX REVISIT need to update it in some places, or do we? */ + unsigned short can_stall; + struct usb_ctrlrequest setup; + + wait_queue_head_t waitq; + } ev; /* the whole structure, P: ev.waitq.lock */ + + /* Flags */ + unsigned long flags; +#define FFS_FL_CALL_CLOSED_CALLBACK 0 +#define FFS_FL_BOUND 1 + + /* Active function */ + struct ffs_function *func; + + /* + * Device name, write once when file system is mounted. + * Intended for user to read if she wants. + */ + const char *dev_name; + /* Private data for our user (ie. gadget). Managed by user. */ + void *private_data; + + /* filled by __ffs_data_got_descs() */ + /* + * raw_descs is what you kfree, real_descs points inside of raw_descs, + * where full speed, high speed and super speed descriptors start. + * real_descs_length is the length of all those descriptors. + */ + const void *raw_descs_data; + const void *raw_descs; + unsigned raw_descs_length; + unsigned fs_descs_count; + unsigned hs_descs_count; + unsigned ss_descs_count; + unsigned ms_os_descs_count; + unsigned ms_os_descs_ext_prop_count; + unsigned ms_os_descs_ext_prop_name_len; + unsigned ms_os_descs_ext_prop_data_len; + void *ms_os_descs_ext_prop_avail; + void *ms_os_descs_ext_prop_name_avail; + void *ms_os_descs_ext_prop_data_avail; + + unsigned user_flags; + + u8 eps_addrmap[15]; + + unsigned short strings_count; + unsigned short interfaces_count; + unsigned short eps_count; + unsigned short _pad1; + + /* filled by __ffs_data_got_strings() */ + /* ids in stringtabs are set in functionfs_bind() */ + const void *raw_strings; + struct usb_gadget_strings **stringtabs; + + /* + * File system's super block, write once when file system is + * mounted. + */ + struct super_block *sb; + + /* File permissions, written once when fs is mounted */ + struct ffs_file_perms { + umode_t mode; + kuid_t uid; + kgid_t gid; + } file_perms; + + struct eventfd_ctx *ffs_eventfd; + bool no_disconnect; + struct work_struct reset_work; + + /* + * The endpoint files, filled by ffs_epfiles_create(), + * destroyed by ffs_epfiles_destroy(). + */ + struct ffs_epfile *epfiles; +}; + + +struct f_fs_opts { + struct usb_function_instance func_inst; + struct ffs_dev *dev; + unsigned refcnt; + bool no_configfs; +}; + +static inline struct f_fs_opts *to_f_fs_opts(struct usb_function_instance *fi) +{ + return container_of(fi, struct f_fs_opts, func_inst); +} + +#endif /* U_FFS_H */ diff --git a/drivers/usb/gadget/function/u_gether.h b/drivers/usb/gadget/function/u_gether.h new file mode 100644 index 000000000..d4078426b --- /dev/null +++ b/drivers/usb/gadget/function/u_gether.h @@ -0,0 +1,36 @@ +/* + * u_gether.h + * + * Utility definitions for the subset function + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_GETHER_H +#define U_GETHER_H + +#include <linux/usb/composite.h> + +struct f_gether_opts { + struct usb_function_instance func_inst; + struct net_device *net; + bool bound; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +#endif /* U_GETHER_H */ diff --git a/drivers/usb/gadget/function/u_hid.h b/drivers/usb/gadget/function/u_hid.h new file mode 100644 index 000000000..aaa0e368a --- /dev/null +++ b/drivers/usb/gadget/function/u_hid.h @@ -0,0 +1,42 @@ +/* + * u_hid.h + * + * Utility definitions for the hid function + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_HID_H +#define U_HID_H + +#include <linux/usb/composite.h> + +struct f_hid_opts { + struct usb_function_instance func_inst; + int minor; + unsigned char subclass; + unsigned char protocol; + unsigned short report_length; + unsigned short report_desc_length; + unsigned char *report_desc; + bool report_desc_alloc; + + /* + * Protect the data form concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +int ghid_setup(struct usb_gadget *g, int count); +void ghid_cleanup(void); + +#endif /* U_HID_H */ diff --git a/drivers/usb/gadget/function/u_midi.h b/drivers/usb/gadget/function/u_midi.h new file mode 100644 index 000000000..225101897 --- /dev/null +++ b/drivers/usb/gadget/function/u_midi.h @@ -0,0 +1,40 @@ +/* + * u_midi.h + * + * Utility definitions for the midi function + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_MIDI_H +#define U_MIDI_H + +#include <linux/usb/composite.h> + +struct f_midi_opts { + struct usb_function_instance func_inst; + int index; + char *id; + bool id_allocated; + unsigned int in_ports; + unsigned int out_ports; + unsigned int buflen; + unsigned int qlen; + + /* + * Protect the data form concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +#endif /* U_MIDI_H */ + diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h new file mode 100644 index 000000000..ce0f3a78c --- /dev/null +++ b/drivers/usb/gadget/function/u_ncm.h @@ -0,0 +1,36 @@ +/* + * u_ncm.h + * + * Utility definitions for the ncm function + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_NCM_H +#define U_NCM_H + +#include <linux/usb/composite.h> + +struct f_ncm_opts { + struct usb_function_instance func_inst; + struct net_device *net; + bool bound; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +#endif /* U_NCM_H */ diff --git a/drivers/usb/gadget/function/u_phonet.h b/drivers/usb/gadget/function/u_phonet.h new file mode 100644 index 000000000..98ced1877 --- /dev/null +++ b/drivers/usb/gadget/function/u_phonet.h @@ -0,0 +1,29 @@ +/* + * u_phonet.h - interface to Phonet + * + * Copyright (C) 2007-2008 by Nokia Corporation + * + * This software is distributed under the terms of the GNU General + * Public License ("GPL") as published by the Free Software Foundation, + * either version 2 of that License or (at your option) any later version. + */ + +#ifndef __U_PHONET_H +#define __U_PHONET_H + +#include <linux/usb/composite.h> +#include <linux/usb/cdc.h> + +struct f_phonet_opts { + struct usb_function_instance func_inst; + bool bound; + struct net_device *net; +}; + +struct net_device *gphonet_setup_default(void); +void gphonet_set_gadget(struct net_device *net, struct usb_gadget *g); +int gphonet_register_netdev(struct net_device *net); +int phonet_bind_config(struct usb_configuration *c, struct net_device *dev); +void gphonet_cleanup(struct net_device *dev); + +#endif /* __U_PHONET_H */ diff --git a/drivers/usb/gadget/function/u_printer.h b/drivers/usb/gadget/function/u_printer.h new file mode 100644 index 000000000..0e2c49d42 --- /dev/null +++ b/drivers/usb/gadget/function/u_printer.h @@ -0,0 +1,37 @@ +/* + * u_printer.h + * + * Utility definitions for the printer function + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_PRINTER_H +#define U_PRINTER_H + +#include <linux/usb/composite.h> + +#define PNP_STRING_LEN 1024 + +struct f_printer_opts { + struct usb_function_instance func_inst; + int minor; + char pnp_string[PNP_STRING_LEN]; + unsigned q_len; + + /* + * Protect the data from concurrent access by read/write + * and create symlink/remove symlink + */ + struct mutex lock; + int refcnt; +}; + +#endif /* U_PRINTER_H */ diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h new file mode 100644 index 000000000..e902aa42a --- /dev/null +++ b/drivers/usb/gadget/function/u_rndis.h @@ -0,0 +1,46 @@ +/* + * u_rndis.h + * + * Utility definitions for the subset function + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_RNDIS_H +#define U_RNDIS_H + +#include <linux/usb/composite.h> + +struct f_rndis_opts { + struct usb_function_instance func_inst; + u32 vendor_id; + const char *manufacturer; + struct net_device *net; + bool bound; + bool borrowed_net; + + struct usb_os_desc rndis_os_desc; + char rndis_ext_compat_id[16]; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This is to protect the data from concurrent access by read/write + * and create symlink/remove symlink. + */ + struct mutex lock; + int refcnt; +}; + +int rndis_init(void); +void rndis_exit(void); +void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net); + +#endif /* U_RNDIS_H */ diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c new file mode 100644 index 000000000..7ee057930 --- /dev/null +++ b/drivers/usb/gadget/function/u_serial.c @@ -0,0 +1,1346 @@ +/* + * u_serial.c - utilities for USB gadget "serial port"/TTY support + * + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 David Brownell + * Copyright (C) 2008 by Nokia Corporation + * + * This code also borrows from usbserial.c, which is + * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com) + * Copyright (C) 2000 Peter Berger (pberger@brimson.com) + * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com) + * + * This software is distributed under the terms of the GNU General + * Public License ("GPL") as published by the Free Software Foundation, + * either version 2 of that License or (at your option) any later version. + */ + +/* #define VERBOSE_DEBUG */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/slab.h> +#include <linux/export.h> +#include <linux/module.h> + +#include "u_serial.h" + + +/* + * This component encapsulates the TTY layer glue needed to provide basic + * "serial port" functionality through the USB gadget stack. Each such + * port is exposed through a /dev/ttyGS* node. + * + * After this module has been loaded, the individual TTY port can be requested + * (gserial_alloc_line()) and it will stay available until they are removed + * (gserial_free_line()). Each one may be connected to a USB function + * (gserial_connect), or disconnected (with gserial_disconnect) when the USB + * host issues a config change event. Data can only flow when the port is + * connected to the host. + * + * A given TTY port can be made available in multiple configurations. + * For example, each one might expose a ttyGS0 node which provides a + * login application. In one case that might use CDC ACM interface 0, + * while another configuration might use interface 3 for that. The + * work to handle that (including descriptor management) is not part + * of this component. + * + * Configurations may expose more than one TTY port. For example, if + * ttyGS0 provides login service, then ttyGS1 might provide dialer access + * for a telephone or fax link. And ttyGS2 might be something that just + * needs a simple byte stream interface for some messaging protocol that + * is managed in userspace ... OBEX, PTP, and MTP have been mentioned. + * + * + * gserial is the lifecycle interface, used by USB functions + * gs_port is the I/O nexus, used by the tty driver + * tty_struct links to the tty/filesystem framework + * + * gserial <---> gs_port ... links will be null when the USB link is + * inactive; managed by gserial_{connect,disconnect}(). each gserial + * instance can wrap its own USB control protocol. + * gserial->ioport == usb_ep->driver_data ... gs_port + * gs_port->port_usb ... gserial + * + * gs_port <---> tty_struct ... links will be null when the TTY file + * isn't opened; managed by gs_open()/gs_close() + * gserial->port_tty ... tty_struct + * tty_struct->driver_data ... gserial + */ + +/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the + * next layer of buffering. For TX that's a circular buffer; for RX + * consider it a NOP. A third layer is provided by the TTY code. + */ +#define QUEUE_SIZE 16 +#define WRITE_BUF_SIZE 8192 /* TX only */ + +/* circular buffer */ +struct gs_buf { + unsigned buf_size; + char *buf_buf; + char *buf_get; + char *buf_put; +}; + +/* + * The port structure holds info for each port, one for each minor number + * (and thus for each /dev/ node). + */ +struct gs_port { + struct tty_port port; + spinlock_t port_lock; /* guard port_* access */ + + struct gserial *port_usb; + + bool openclose; /* open/close in progress */ + u8 port_num; + + struct list_head read_pool; + int read_started; + int read_allocated; + struct list_head read_queue; + unsigned n_read; + struct tasklet_struct push; + + struct list_head write_pool; + int write_started; + int write_allocated; + struct gs_buf port_write_buf; + wait_queue_head_t drain_wait; /* wait while writes drain */ + bool write_busy; + + /* REVISIT this state ... */ + struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */ +}; + +static struct portmaster { + struct mutex lock; /* protect open/close */ + struct gs_port *port; +} ports[MAX_U_SERIAL_PORTS]; + +#define GS_CLOSE_TIMEOUT 15 /* seconds */ + + + +#ifdef VERBOSE_DEBUG +#ifndef pr_vdebug +#define pr_vdebug(fmt, arg...) \ + pr_debug(fmt, ##arg) +#endif /* pr_vdebug */ +#else +#ifndef pr_vdebug +#define pr_vdebug(fmt, arg...) \ + ({ if (0) pr_debug(fmt, ##arg); }) +#endif /* pr_vdebug */ +#endif + +/*-------------------------------------------------------------------------*/ + +/* Circular Buffer */ + +/* + * gs_buf_alloc + * + * Allocate a circular buffer and all associated memory. + */ +static int gs_buf_alloc(struct gs_buf *gb, unsigned size) +{ + gb->buf_buf = kmalloc(size, GFP_KERNEL); + if (gb->buf_buf == NULL) + return -ENOMEM; + + gb->buf_size = size; + gb->buf_put = gb->buf_buf; + gb->buf_get = gb->buf_buf; + + return 0; +} + +/* + * gs_buf_free + * + * Free the buffer and all associated memory. + */ +static void gs_buf_free(struct gs_buf *gb) +{ + kfree(gb->buf_buf); + gb->buf_buf = NULL; +} + +/* + * gs_buf_clear + * + * Clear out all data in the circular buffer. + */ +static void gs_buf_clear(struct gs_buf *gb) +{ + gb->buf_get = gb->buf_put; + /* equivalent to a get of all data available */ +} + +/* + * gs_buf_data_avail + * + * Return the number of bytes of data written into the circular + * buffer. + */ +static unsigned gs_buf_data_avail(struct gs_buf *gb) +{ + return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size; +} + +/* + * gs_buf_space_avail + * + * Return the number of bytes of space available in the circular + * buffer. + */ +static unsigned gs_buf_space_avail(struct gs_buf *gb) +{ + return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size; +} + +/* + * gs_buf_put + * + * Copy data data from a user buffer and put it into the circular buffer. + * Restrict to the amount of space available. + * + * Return the number of bytes copied. + */ +static unsigned +gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count) +{ + unsigned len; + + len = gs_buf_space_avail(gb); + if (count > len) + count = len; + + if (count == 0) + return 0; + + len = gb->buf_buf + gb->buf_size - gb->buf_put; + if (count > len) { + memcpy(gb->buf_put, buf, len); + memcpy(gb->buf_buf, buf+len, count - len); + gb->buf_put = gb->buf_buf + count - len; + } else { + memcpy(gb->buf_put, buf, count); + if (count < len) + gb->buf_put += count; + else /* count == len */ + gb->buf_put = gb->buf_buf; + } + + return count; +} + +/* + * gs_buf_get + * + * Get data from the circular buffer and copy to the given buffer. + * Restrict to the amount of data available. + * + * Return the number of bytes copied. + */ +static unsigned +gs_buf_get(struct gs_buf *gb, char *buf, unsigned count) +{ + unsigned len; + + len = gs_buf_data_avail(gb); + if (count > len) + count = len; + + if (count == 0) + return 0; + + len = gb->buf_buf + gb->buf_size - gb->buf_get; + if (count > len) { + memcpy(buf, gb->buf_get, len); + memcpy(buf+len, gb->buf_buf, count - len); + gb->buf_get = gb->buf_buf + count - len; + } else { + memcpy(buf, gb->buf_get, count); + if (count < len) + gb->buf_get += count; + else /* count == len */ + gb->buf_get = gb->buf_buf; + } + + return count; +} + +/*-------------------------------------------------------------------------*/ + +/* I/O glue between TTY (upper) and USB function (lower) driver layers */ + +/* + * gs_alloc_req + * + * Allocate a usb_request and its buffer. Returns a pointer to the + * usb_request or NULL if there is an error. + */ +struct usb_request * +gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags) +{ + struct usb_request *req; + + req = usb_ep_alloc_request(ep, kmalloc_flags); + + if (req != NULL) { + req->length = len; + req->buf = kmalloc(len, kmalloc_flags); + if (req->buf == NULL) { + usb_ep_free_request(ep, req); + return NULL; + } + } + + return req; +} +EXPORT_SYMBOL_GPL(gs_alloc_req); + +/* + * gs_free_req + * + * Free a usb_request and its buffer. + */ +void gs_free_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} +EXPORT_SYMBOL_GPL(gs_free_req); + +/* + * gs_send_packet + * + * If there is data to send, a packet is built in the given + * buffer and the size is returned. If there is no data to + * send, 0 is returned. + * + * Called with port_lock held. + */ +static unsigned +gs_send_packet(struct gs_port *port, char *packet, unsigned size) +{ + unsigned len; + + len = gs_buf_data_avail(&port->port_write_buf); + if (len < size) + size = len; + if (size != 0) + size = gs_buf_get(&port->port_write_buf, packet, size); + return size; +} + +/* + * gs_start_tx + * + * This function finds available write requests, calls + * gs_send_packet to fill these packets with data, and + * continues until either there are no more write requests + * available or no more data to send. This function is + * run whenever data arrives or write requests are available. + * + * Context: caller owns port_lock; port_usb is non-null. + */ +static int gs_start_tx(struct gs_port *port) +/* +__releases(&port->port_lock) +__acquires(&port->port_lock) +*/ +{ + struct list_head *pool = &port->write_pool; + struct usb_ep *in = port->port_usb->in; + int status = 0; + bool do_tty_wake = false; + + while (!port->write_busy && !list_empty(pool)) { + struct usb_request *req; + int len; + + if (port->write_started >= QUEUE_SIZE) + break; + + req = list_entry(pool->next, struct usb_request, list); + len = gs_send_packet(port, req->buf, in->maxpacket); + if (len == 0) { + wake_up_interruptible(&port->drain_wait); + break; + } + do_tty_wake = true; + + req->length = len; + list_del(&req->list); + req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0); + + pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n", + port->port_num, len, *((u8 *)req->buf), + *((u8 *)req->buf+1), *((u8 *)req->buf+2)); + + /* Drop lock while we call out of driver; completions + * could be issued while we do so. Disconnection may + * happen too; maybe immediately before we queue this! + * + * NOTE that we may keep sending data for a while after + * the TTY closed (dev->ioport->port_tty is NULL). + */ + port->write_busy = true; + spin_unlock(&port->port_lock); + status = usb_ep_queue(in, req, GFP_ATOMIC); + spin_lock(&port->port_lock); + port->write_busy = false; + + if (status) { + pr_debug("%s: %s %s err %d\n", + __func__, "queue", in->name, status); + list_add(&req->list, pool); + break; + } + + port->write_started++; + + /* abort immediately after disconnect */ + if (!port->port_usb) + break; + } + + if (do_tty_wake && port->port.tty) + tty_wakeup(port->port.tty); + return status; +} + +/* + * Context: caller owns port_lock, and port_usb is set + */ +static unsigned gs_start_rx(struct gs_port *port) +/* +__releases(&port->port_lock) +__acquires(&port->port_lock) +*/ +{ + struct list_head *pool = &port->read_pool; + struct usb_ep *out = port->port_usb->out; + + while (!list_empty(pool)) { + struct usb_request *req; + int status; + struct tty_struct *tty; + + /* no more rx if closed */ + tty = port->port.tty; + if (!tty) + break; + + if (port->read_started >= QUEUE_SIZE) + break; + + req = list_entry(pool->next, struct usb_request, list); + list_del(&req->list); + req->length = out->maxpacket; + + /* drop lock while we call out; the controller driver + * may need to call us back (e.g. for disconnect) + */ + spin_unlock(&port->port_lock); + status = usb_ep_queue(out, req, GFP_ATOMIC); + spin_lock(&port->port_lock); + + if (status) { + pr_debug("%s: %s %s err %d\n", + __func__, "queue", out->name, status); + list_add(&req->list, pool); + break; + } + port->read_started++; + + /* abort immediately after disconnect */ + if (!port->port_usb) + break; + } + return port->read_started; +} + +/* + * RX tasklet takes data out of the RX queue and hands it up to the TTY + * layer until it refuses to take any more data (or is throttled back). + * Then it issues reads for any further data. + * + * If the RX queue becomes full enough that no usb_request is queued, + * the OUT endpoint may begin NAKing as soon as its FIFO fills up. + * So QUEUE_SIZE packets plus however many the FIFO holds (usually two) + * can be buffered before the TTY layer's buffers (currently 64 KB). + */ +static void gs_rx_push(unsigned long _port) +{ + struct gs_port *port = (void *)_port; + struct tty_struct *tty; + struct list_head *queue = &port->read_queue; + bool disconnect = false; + bool do_push = false; + + /* hand any queued data to the tty */ + spin_lock_irq(&port->port_lock); + tty = port->port.tty; + while (!list_empty(queue)) { + struct usb_request *req; + + req = list_first_entry(queue, struct usb_request, list); + + /* leave data queued if tty was rx throttled */ + if (tty && test_bit(TTY_THROTTLED, &tty->flags)) + break; + + switch (req->status) { + case -ESHUTDOWN: + disconnect = true; + pr_vdebug("ttyGS%d: shutdown\n", port->port_num); + break; + + default: + /* presumably a transient fault */ + pr_warn("ttyGS%d: unexpected RX status %d\n", + port->port_num, req->status); + /* FALLTHROUGH */ + case 0: + /* normal completion */ + break; + } + + /* push data to (open) tty */ + if (req->actual) { + char *packet = req->buf; + unsigned size = req->actual; + unsigned n; + int count; + + /* we may have pushed part of this packet already... */ + n = port->n_read; + if (n) { + packet += n; + size -= n; + } + + count = tty_insert_flip_string(&port->port, packet, + size); + if (count) + do_push = true; + if (count != size) { + /* stop pushing; TTY layer can't handle more */ + port->n_read += count; + pr_vdebug("ttyGS%d: rx block %d/%d\n", + port->port_num, count, req->actual); + break; + } + port->n_read = 0; + } + + list_move(&req->list, &port->read_pool); + port->read_started--; + } + + /* Push from tty to ldisc; this is handled by a workqueue, + * so we won't get callbacks and can hold port_lock + */ + if (do_push) + tty_flip_buffer_push(&port->port); + + + /* We want our data queue to become empty ASAP, keeping data + * in the tty and ldisc (not here). If we couldn't push any + * this time around, there may be trouble unless there's an + * implicit tty_unthrottle() call on its way... + * + * REVISIT we should probably add a timer to keep the tasklet + * from starving ... but it's not clear that case ever happens. + */ + if (!list_empty(queue) && tty) { + if (!test_bit(TTY_THROTTLED, &tty->flags)) { + if (do_push) + tasklet_schedule(&port->push); + else + pr_warn("ttyGS%d: RX not scheduled?\n", + port->port_num); + } + } + + /* If we're still connected, refill the USB RX queue. */ + if (!disconnect && port->port_usb) + gs_start_rx(port); + + spin_unlock_irq(&port->port_lock); +} + +static void gs_read_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gs_port *port = ep->driver_data; + + /* Queue all received data until the tty layer is ready for it. */ + spin_lock(&port->port_lock); + list_add_tail(&req->list, &port->read_queue); + tasklet_schedule(&port->push); + spin_unlock(&port->port_lock); +} + +static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct gs_port *port = ep->driver_data; + + spin_lock(&port->port_lock); + list_add(&req->list, &port->write_pool); + port->write_started--; + + switch (req->status) { + default: + /* presumably a transient fault */ + pr_warning("%s: unexpected %s status %d\n", + __func__, ep->name, req->status); + /* FALL THROUGH */ + case 0: + /* normal completion */ + gs_start_tx(port); + break; + + case -ESHUTDOWN: + /* disconnect */ + pr_vdebug("%s: %s shutdown\n", __func__, ep->name); + break; + } + + spin_unlock(&port->port_lock); +} + +static void gs_free_requests(struct usb_ep *ep, struct list_head *head, + int *allocated) +{ + struct usb_request *req; + + while (!list_empty(head)) { + req = list_entry(head->next, struct usb_request, list); + list_del(&req->list); + gs_free_req(ep, req); + if (allocated) + (*allocated)--; + } +} + +static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, + void (*fn)(struct usb_ep *, struct usb_request *), + int *allocated) +{ + int i; + struct usb_request *req; + int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE; + + /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't + * do quite that many this time, don't fail ... we just won't + * be as speedy as we might otherwise be. + */ + for (i = 0; i < n; i++) { + req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); + if (!req) + return list_empty(head) ? -ENOMEM : 0; + req->complete = fn; + list_add_tail(&req->list, head); + if (allocated) + (*allocated)++; + } + return 0; +} + +/** + * gs_start_io - start USB I/O streams + * @dev: encapsulates endpoints to use + * Context: holding port_lock; port_tty and port_usb are non-null + * + * We only start I/O when something is connected to both sides of + * this port. If nothing is listening on the host side, we may + * be pointlessly filling up our TX buffers and FIFO. + */ +static int gs_start_io(struct gs_port *port) +{ + struct list_head *head = &port->read_pool; + struct usb_ep *ep = port->port_usb->out; + int status; + unsigned started; + + /* Allocate RX and TX I/O buffers. We can't easily do this much + * earlier (with GFP_KERNEL) because the requests are coupled to + * endpoints, as are the packet sizes we'll be using. Different + * configurations may use different endpoints with a given port; + * and high speed vs full speed changes packet sizes too. + */ + status = gs_alloc_requests(ep, head, gs_read_complete, + &port->read_allocated); + if (status) + return status; + + status = gs_alloc_requests(port->port_usb->in, &port->write_pool, + gs_write_complete, &port->write_allocated); + if (status) { + gs_free_requests(ep, head, &port->read_allocated); + return status; + } + + /* queue read requests */ + port->n_read = 0; + started = gs_start_rx(port); + + /* unblock any pending writes into our circular buffer */ + if (started) { + tty_wakeup(port->port.tty); + } else { + gs_free_requests(ep, head, &port->read_allocated); + gs_free_requests(port->port_usb->in, &port->write_pool, + &port->write_allocated); + status = -EIO; + } + + return status; +} + +/*-------------------------------------------------------------------------*/ + +/* TTY Driver */ + +/* + * gs_open sets up the link between a gs_port and its associated TTY. + * That link is broken *only* by TTY close(), and all driver methods + * know that. + */ +static int gs_open(struct tty_struct *tty, struct file *file) +{ + int port_num = tty->index; + struct gs_port *port; + int status; + + do { + mutex_lock(&ports[port_num].lock); + port = ports[port_num].port; + if (!port) + status = -ENODEV; + else { + spin_lock_irq(&port->port_lock); + + /* already open? Great. */ + if (port->port.count) { + status = 0; + port->port.count++; + + /* currently opening/closing? wait ... */ + } else if (port->openclose) { + status = -EBUSY; + + /* ... else we do the work */ + } else { + status = -EAGAIN; + port->openclose = true; + } + spin_unlock_irq(&port->port_lock); + } + mutex_unlock(&ports[port_num].lock); + + switch (status) { + default: + /* fully handled */ + return status; + case -EAGAIN: + /* must do the work */ + break; + case -EBUSY: + /* wait for EAGAIN task to finish */ + msleep(1); + /* REVISIT could have a waitchannel here, if + * concurrent open performance is important + */ + break; + } + } while (status != -EAGAIN); + + /* Do the "real open" */ + spin_lock_irq(&port->port_lock); + + /* allocate circular buffer on first open */ + if (port->port_write_buf.buf_buf == NULL) { + + spin_unlock_irq(&port->port_lock); + status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE); + spin_lock_irq(&port->port_lock); + + if (status) { + pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n", + port->port_num, tty, file); + port->openclose = false; + goto exit_unlock_port; + } + } + + /* REVISIT if REMOVED (ports[].port NULL), abort the open + * to let rmmod work faster (but this way isn't wrong). + */ + + /* REVISIT maybe wait for "carrier detect" */ + + tty->driver_data = port; + port->port.tty = tty; + + port->port.count = 1; + port->openclose = false; + + /* if connected, start the I/O stream */ + if (port->port_usb) { + struct gserial *gser = port->port_usb; + + pr_debug("gs_open: start ttyGS%d\n", port->port_num); + gs_start_io(port); + + if (gser->connect) + gser->connect(gser); + } + + pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file); + + status = 0; + +exit_unlock_port: + spin_unlock_irq(&port->port_lock); + return status; +} + +static int gs_writes_finished(struct gs_port *p) +{ + int cond; + + /* return true on disconnect or empty buffer */ + spin_lock_irq(&p->port_lock); + cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf); + spin_unlock_irq(&p->port_lock); + + return cond; +} + +static void gs_close(struct tty_struct *tty, struct file *file) +{ + struct gs_port *port = tty->driver_data; + struct gserial *gser; + + spin_lock_irq(&port->port_lock); + + if (port->port.count != 1) { + if (port->port.count == 0) + WARN_ON(1); + else + --port->port.count; + goto exit; + } + + pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file); + + /* mark port as closing but in use; we can drop port lock + * and sleep if necessary + */ + port->openclose = true; + port->port.count = 0; + + gser = port->port_usb; + if (gser && gser->disconnect) + gser->disconnect(gser); + + /* wait for circular write buffer to drain, disconnect, or at + * most GS_CLOSE_TIMEOUT seconds; then discard the rest + */ + if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) { + spin_unlock_irq(&port->port_lock); + wait_event_interruptible_timeout(port->drain_wait, + gs_writes_finished(port), + GS_CLOSE_TIMEOUT * HZ); + spin_lock_irq(&port->port_lock); + gser = port->port_usb; + } + + /* Iff we're disconnected, there can be no I/O in flight so it's + * ok to free the circular buffer; else just scrub it. And don't + * let the push tasklet fire again until we're re-opened. + */ + if (gser == NULL) + gs_buf_free(&port->port_write_buf); + else + gs_buf_clear(&port->port_write_buf); + + tty->driver_data = NULL; + port->port.tty = NULL; + + port->openclose = false; + + pr_debug("gs_close: ttyGS%d (%p,%p) done!\n", + port->port_num, tty, file); + + wake_up(&port->port.close_wait); +exit: + spin_unlock_irq(&port->port_lock); +} + +static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + struct gs_port *port = tty->driver_data; + unsigned long flags; + int status; + + pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n", + port->port_num, tty, count); + + spin_lock_irqsave(&port->port_lock, flags); + if (count) + count = gs_buf_put(&port->port_write_buf, buf, count); + /* treat count == 0 as flush_chars() */ + if (port->port_usb) + status = gs_start_tx(port); + spin_unlock_irqrestore(&port->port_lock, flags); + + return count; +} + +static int gs_put_char(struct tty_struct *tty, unsigned char ch) +{ + struct gs_port *port = tty->driver_data; + unsigned long flags; + int status; + + pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %ps\n", + port->port_num, tty, ch, __builtin_return_address(0)); + + spin_lock_irqsave(&port->port_lock, flags); + status = gs_buf_put(&port->port_write_buf, &ch, 1); + spin_unlock_irqrestore(&port->port_lock, flags); + + return status; +} + +static void gs_flush_chars(struct tty_struct *tty) +{ + struct gs_port *port = tty->driver_data; + unsigned long flags; + + pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty); + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) + gs_start_tx(port); + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static int gs_write_room(struct tty_struct *tty) +{ + struct gs_port *port = tty->driver_data; + unsigned long flags; + int room = 0; + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) + room = gs_buf_space_avail(&port->port_write_buf); + spin_unlock_irqrestore(&port->port_lock, flags); + + pr_vdebug("gs_write_room: (%d,%p) room=%d\n", + port->port_num, tty, room); + + return room; +} + +static int gs_chars_in_buffer(struct tty_struct *tty) +{ + struct gs_port *port = tty->driver_data; + unsigned long flags; + int chars = 0; + + spin_lock_irqsave(&port->port_lock, flags); + chars = gs_buf_data_avail(&port->port_write_buf); + spin_unlock_irqrestore(&port->port_lock, flags); + + pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n", + port->port_num, tty, chars); + + return chars; +} + +/* undo side effects of setting TTY_THROTTLED */ +static void gs_unthrottle(struct tty_struct *tty) +{ + struct gs_port *port = tty->driver_data; + unsigned long flags; + + spin_lock_irqsave(&port->port_lock, flags); + if (port->port_usb) { + /* Kickstart read queue processing. We don't do xon/xoff, + * rts/cts, or other handshaking with the host, but if the + * read queue backs up enough we'll be NAKing OUT packets. + */ + tasklet_schedule(&port->push); + pr_vdebug("ttyGS%d: unthrottle\n", port->port_num); + } + spin_unlock_irqrestore(&port->port_lock, flags); +} + +static int gs_break_ctl(struct tty_struct *tty, int duration) +{ + struct gs_port *port = tty->driver_data; + int status = 0; + struct gserial *gser; + + pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n", + port->port_num, duration); + + spin_lock_irq(&port->port_lock); + gser = port->port_usb; + if (gser && gser->send_break) + status = gser->send_break(gser, duration); + spin_unlock_irq(&port->port_lock); + + return status; +} + +static const struct tty_operations gs_tty_ops = { + .open = gs_open, + .close = gs_close, + .write = gs_write, + .put_char = gs_put_char, + .flush_chars = gs_flush_chars, + .write_room = gs_write_room, + .chars_in_buffer = gs_chars_in_buffer, + .unthrottle = gs_unthrottle, + .break_ctl = gs_break_ctl, +}; + +/*-------------------------------------------------------------------------*/ + +static struct tty_driver *gs_tty_driver; + +static int +gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding) +{ + struct gs_port *port; + int ret = 0; + + mutex_lock(&ports[port_num].lock); + if (ports[port_num].port) { + ret = -EBUSY; + goto out; + } + + port = kzalloc(sizeof(struct gs_port), GFP_KERNEL); + if (port == NULL) { + ret = -ENOMEM; + goto out; + } + + tty_port_init(&port->port); + spin_lock_init(&port->port_lock); + init_waitqueue_head(&port->drain_wait); + + tasklet_init(&port->push, gs_rx_push, (unsigned long) port); + + INIT_LIST_HEAD(&port->read_pool); + INIT_LIST_HEAD(&port->read_queue); + INIT_LIST_HEAD(&port->write_pool); + + port->port_num = port_num; + port->port_line_coding = *coding; + + ports[port_num].port = port; +out: + mutex_unlock(&ports[port_num].lock); + return ret; +} + +static int gs_closed(struct gs_port *port) +{ + int cond; + + spin_lock_irq(&port->port_lock); + cond = (port->port.count == 0) && !port->openclose; + spin_unlock_irq(&port->port_lock); + return cond; +} + +static void gserial_free_port(struct gs_port *port) +{ + tasklet_kill(&port->push); + /* wait for old opens to finish */ + wait_event(port->port.close_wait, gs_closed(port)); + WARN_ON(port->port_usb != NULL); + tty_port_destroy(&port->port); + kfree(port); +} + +void gserial_free_line(unsigned char port_num) +{ + struct gs_port *port; + + mutex_lock(&ports[port_num].lock); + if (WARN_ON(!ports[port_num].port)) { + mutex_unlock(&ports[port_num].lock); + return; + } + port = ports[port_num].port; + ports[port_num].port = NULL; + mutex_unlock(&ports[port_num].lock); + + gserial_free_port(port); + tty_unregister_device(gs_tty_driver, port_num); +} +EXPORT_SYMBOL_GPL(gserial_free_line); + +int gserial_alloc_line(unsigned char *line_num) +{ + struct usb_cdc_line_coding coding; + struct device *tty_dev; + int ret; + int port_num; + + coding.dwDTERate = cpu_to_le32(9600); + coding.bCharFormat = 8; + coding.bParityType = USB_CDC_NO_PARITY; + coding.bDataBits = USB_CDC_1_STOP_BITS; + + for (port_num = 0; port_num < MAX_U_SERIAL_PORTS; port_num++) { + ret = gs_port_alloc(port_num, &coding); + if (ret == -EBUSY) + continue; + if (ret) + return ret; + break; + } + if (ret) + return ret; + + /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */ + + tty_dev = tty_port_register_device(&ports[port_num].port->port, + gs_tty_driver, port_num, NULL); + if (IS_ERR(tty_dev)) { + struct gs_port *port; + pr_err("%s: failed to register tty for port %d, err %ld\n", + __func__, port_num, PTR_ERR(tty_dev)); + + ret = PTR_ERR(tty_dev); + port = ports[port_num].port; + ports[port_num].port = NULL; + gserial_free_port(port); + goto err; + } + *line_num = port_num; +err: + return ret; +} +EXPORT_SYMBOL_GPL(gserial_alloc_line); + +/** + * gserial_connect - notify TTY I/O glue that USB link is active + * @gser: the function, set up with endpoints and descriptors + * @port_num: which port is active + * Context: any (usually from irq) + * + * This is called activate endpoints and let the TTY layer know that + * the connection is active ... not unlike "carrier detect". It won't + * necessarily start I/O queues; unless the TTY is held open by any + * task, there would be no point. However, the endpoints will be + * activated so the USB host can perform I/O, subject to basic USB + * hardware flow control. + * + * Caller needs to have set up the endpoints and USB function in @dev + * before calling this, as well as the appropriate (speed-specific) + * endpoint descriptors, and also have allocate @port_num by calling + * @gserial_alloc_line(). + * + * Returns negative errno or zero. + * On success, ep->driver_data will be overwritten. + */ +int gserial_connect(struct gserial *gser, u8 port_num) +{ + struct gs_port *port; + unsigned long flags; + int status; + + if (port_num >= MAX_U_SERIAL_PORTS) + return -ENXIO; + + port = ports[port_num].port; + if (!port) { + pr_err("serial line %d not allocated.\n", port_num); + return -EINVAL; + } + if (port->port_usb) { + pr_err("serial line %d is in use.\n", port_num); + return -EBUSY; + } + + /* activate the endpoints */ + status = usb_ep_enable(gser->in); + if (status < 0) + return status; + gser->in->driver_data = port; + + status = usb_ep_enable(gser->out); + if (status < 0) + goto fail_out; + gser->out->driver_data = port; + + /* then tell the tty glue that I/O can work */ + spin_lock_irqsave(&port->port_lock, flags); + gser->ioport = port; + port->port_usb = gser; + + /* REVISIT unclear how best to handle this state... + * we don't really couple it with the Linux TTY. + */ + gser->port_line_coding = port->port_line_coding; + + /* REVISIT if waiting on "carrier detect", signal. */ + + /* if it's already open, start I/O ... and notify the serial + * protocol about open/close status (connect/disconnect). + */ + if (port->port.count) { + pr_debug("gserial_connect: start ttyGS%d\n", port->port_num); + gs_start_io(port); + if (gser->connect) + gser->connect(gser); + } else { + if (gser->disconnect) + gser->disconnect(gser); + } + + spin_unlock_irqrestore(&port->port_lock, flags); + + return status; + +fail_out: + usb_ep_disable(gser->in); + gser->in->driver_data = NULL; + return status; +} +EXPORT_SYMBOL_GPL(gserial_connect); +/** + * gserial_disconnect - notify TTY I/O glue that USB link is inactive + * @gser: the function, on which gserial_connect() was called + * Context: any (usually from irq) + * + * This is called to deactivate endpoints and let the TTY layer know + * that the connection went inactive ... not unlike "hangup". + * + * On return, the state is as if gserial_connect() had never been called; + * there is no active USB I/O on these endpoints. + */ +void gserial_disconnect(struct gserial *gser) +{ + struct gs_port *port = gser->ioport; + unsigned long flags; + + if (!port) + return; + + /* tell the TTY glue not to do I/O here any more */ + spin_lock_irqsave(&port->port_lock, flags); + + /* REVISIT as above: how best to track this? */ + port->port_line_coding = gser->port_line_coding; + + port->port_usb = NULL; + gser->ioport = NULL; + if (port->port.count > 0 || port->openclose) { + wake_up_interruptible(&port->drain_wait); + if (port->port.tty) + tty_hangup(port->port.tty); + } + spin_unlock_irqrestore(&port->port_lock, flags); + + /* disable endpoints, aborting down any active I/O */ + usb_ep_disable(gser->out); + gser->out->driver_data = NULL; + + usb_ep_disable(gser->in); + gser->in->driver_data = NULL; + + /* finally, free any unused/unusable I/O buffers */ + spin_lock_irqsave(&port->port_lock, flags); + if (port->port.count == 0 && !port->openclose) + gs_buf_free(&port->port_write_buf); + gs_free_requests(gser->out, &port->read_pool, NULL); + gs_free_requests(gser->out, &port->read_queue, NULL); + gs_free_requests(gser->in, &port->write_pool, NULL); + + port->read_allocated = port->read_started = + port->write_allocated = port->write_started = 0; + + spin_unlock_irqrestore(&port->port_lock, flags); +} +EXPORT_SYMBOL_GPL(gserial_disconnect); + +static int userial_init(void) +{ + unsigned i; + int status; + + gs_tty_driver = alloc_tty_driver(MAX_U_SERIAL_PORTS); + if (!gs_tty_driver) + return -ENOMEM; + + gs_tty_driver->driver_name = "g_serial"; + gs_tty_driver->name = "ttyGS"; + /* uses dynamically assigned dev_t values */ + + gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + gs_tty_driver->subtype = SERIAL_TYPE_NORMAL; + gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV; + gs_tty_driver->init_termios = tty_std_termios; + + /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on + * MS-Windows. Otherwise, most of these flags shouldn't affect + * anything unless we were to actually hook up to a serial line. + */ + gs_tty_driver->init_termios.c_cflag = + B9600 | CS8 | CREAD | HUPCL | CLOCAL; + gs_tty_driver->init_termios.c_ispeed = 9600; + gs_tty_driver->init_termios.c_ospeed = 9600; + + tty_set_operations(gs_tty_driver, &gs_tty_ops); + for (i = 0; i < MAX_U_SERIAL_PORTS; i++) + mutex_init(&ports[i].lock); + + /* export the driver ... */ + status = tty_register_driver(gs_tty_driver); + if (status) { + pr_err("%s: cannot register, err %d\n", + __func__, status); + goto fail; + } + + pr_debug("%s: registered %d ttyGS* device%s\n", __func__, + MAX_U_SERIAL_PORTS, + (MAX_U_SERIAL_PORTS == 1) ? "" : "s"); + + return status; +fail: + put_tty_driver(gs_tty_driver); + gs_tty_driver = NULL; + return status; +} +module_init(userial_init); + +static void userial_cleanup(void) +{ + tty_unregister_driver(gs_tty_driver); + put_tty_driver(gs_tty_driver); + gs_tty_driver = NULL; +} +module_exit(userial_cleanup); + +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/gadget/function/u_serial.h b/drivers/usb/gadget/function/u_serial.h new file mode 100644 index 000000000..c20210c0b --- /dev/null +++ b/drivers/usb/gadget/function/u_serial.h @@ -0,0 +1,71 @@ +/* + * u_serial.h - interface to USB gadget "serial port"/TTY utilities + * + * Copyright (C) 2008 David Brownell + * Copyright (C) 2008 by Nokia Corporation + * + * This software is distributed under the terms of the GNU General + * Public License ("GPL") as published by the Free Software Foundation, + * either version 2 of that License or (at your option) any later version. + */ + +#ifndef __U_SERIAL_H +#define __U_SERIAL_H + +#include <linux/usb/composite.h> +#include <linux/usb/cdc.h> + +#define MAX_U_SERIAL_PORTS 4 + +struct f_serial_opts { + struct usb_function_instance func_inst; + u8 port_num; +}; + +/* + * One non-multiplexed "serial" I/O port ... there can be several of these + * on any given USB peripheral device, if it provides enough endpoints. + * + * The "u_serial" utility component exists to do one thing: manage TTY + * style I/O using the USB peripheral endpoints listed here, including + * hookups to sysfs and /dev for each logical "tty" device. + * + * REVISIT at least ACM could support tiocmget() if needed. + * + * REVISIT someday, allow multiplexing several TTYs over these endpoints. + */ +struct gserial { + struct usb_function func; + + /* port is managed by gserial_{connect,disconnect} */ + struct gs_port *ioport; + + struct usb_ep *in; + struct usb_ep *out; + + /* REVISIT avoid this CDC-ACM support harder ... */ + struct usb_cdc_line_coding port_line_coding; /* 9600-8-N-1 etc */ + + /* notification callbacks */ + void (*connect)(struct gserial *p); + void (*disconnect)(struct gserial *p); + int (*send_break)(struct gserial *p, int duration); +}; + +/* utilities to allocate/free request and buffer */ +struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags); +void gs_free_req(struct usb_ep *, struct usb_request *req); + +/* management of individual TTY ports */ +int gserial_alloc_line(unsigned char *port_line); +void gserial_free_line(unsigned char port_line); + +/* connect/disconnect is handled by individual functions */ +int gserial_connect(struct gserial *, u8 port_num); +void gserial_disconnect(struct gserial *); + +/* functions are bound to configurations by a config or gadget driver */ +int gser_bind_config(struct usb_configuration *c, u8 port_num); +int obex_bind_config(struct usb_configuration *c, u8 port_num); + +#endif /* __U_SERIAL_H */ diff --git a/drivers/usb/gadget/function/u_uac1.c b/drivers/usb/gadget/function/u_uac1.c new file mode 100644 index 000000000..c78c84138 --- /dev/null +++ b/drivers/usb/gadget/function/u_uac1.c @@ -0,0 +1,314 @@ +/* + * u_uac1.c -- ALSA audio utilities for Gadget stack + * + * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org> + * Copyright (C) 2008 Analog Devices, Inc + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/delay.h> +#include <linux/ctype.h> +#include <linux/random.h> +#include <linux/syscalls.h> + +#include "u_uac1.h" + +/* + * This component encapsulates the ALSA devices for USB audio gadget + */ + +/*-------------------------------------------------------------------------*/ + +/** + * Some ALSA internal helper functions + */ +static int snd_interval_refine_set(struct snd_interval *i, unsigned int val) +{ + struct snd_interval t; + t.empty = 0; + t.min = t.max = val; + t.openmin = t.openmax = 0; + t.integer = 1; + return snd_interval_refine(i, &t); +} + +static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params, + snd_pcm_hw_param_t var, unsigned int val, + int dir) +{ + int changed; + if (hw_is_mask(var)) { + struct snd_mask *m = hw_param_mask(params, var); + if (val == 0 && dir < 0) { + changed = -EINVAL; + snd_mask_none(m); + } else { + if (dir > 0) + val++; + else if (dir < 0) + val--; + changed = snd_mask_refine_set( + hw_param_mask(params, var), val); + } + } else if (hw_is_interval(var)) { + struct snd_interval *i = hw_param_interval(params, var); + if (val == 0 && dir < 0) { + changed = -EINVAL; + snd_interval_none(i); + } else if (dir == 0) + changed = snd_interval_refine_set(i, val); + else { + struct snd_interval t; + t.openmin = 1; + t.openmax = 1; + t.empty = 0; + t.integer = 0; + if (dir < 0) { + t.min = val - 1; + t.max = val; + } else { + t.min = val; + t.max = val+1; + } + changed = snd_interval_refine(i, &t); + } + } else + return -EINVAL; + if (changed) { + params->cmask |= 1 << var; + params->rmask |= 1 << var; + } + return changed; +} +/*-------------------------------------------------------------------------*/ + +/** + * Set default hardware params + */ +static int playback_default_hw_params(struct gaudio_snd_dev *snd) +{ + struct snd_pcm_substream *substream = snd->substream; + struct snd_pcm_hw_params *params; + snd_pcm_sframes_t result; + + /* + * SNDRV_PCM_ACCESS_RW_INTERLEAVED, + * SNDRV_PCM_FORMAT_S16_LE + * CHANNELS: 2 + * RATE: 48000 + */ + snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED; + snd->format = SNDRV_PCM_FORMAT_S16_LE; + snd->channels = 2; + snd->rate = 48000; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + _snd_pcm_hw_params_any(params); + _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS, + snd->access, 0); + _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT, + snd->format, 0); + _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS, + snd->channels, 0); + _snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE, + snd->rate, 0); + + snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); + snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params); + + result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL); + if (result < 0) { + ERROR(snd->card, + "Preparing sound card failed: %d\n", (int)result); + kfree(params); + return result; + } + + /* Store the hardware parameters */ + snd->access = params_access(params); + snd->format = params_format(params); + snd->channels = params_channels(params); + snd->rate = params_rate(params); + + kfree(params); + + INFO(snd->card, + "Hardware params: access %x, format %x, channels %d, rate %d\n", + snd->access, snd->format, snd->channels, snd->rate); + + return 0; +} + +/** + * Playback audio buffer data by ALSA PCM device + */ +size_t u_audio_playback(struct gaudio *card, void *buf, size_t count) +{ + struct gaudio_snd_dev *snd = &card->playback; + struct snd_pcm_substream *substream = snd->substream; + struct snd_pcm_runtime *runtime = substream->runtime; + mm_segment_t old_fs; + ssize_t result; + snd_pcm_sframes_t frames; + +try_again: + if (runtime->status->state == SNDRV_PCM_STATE_XRUN || + runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) { + result = snd_pcm_kernel_ioctl(substream, + SNDRV_PCM_IOCTL_PREPARE, NULL); + if (result < 0) { + ERROR(card, "Preparing sound card failed: %d\n", + (int)result); + return result; + } + } + + frames = bytes_to_frames(runtime, count); + old_fs = get_fs(); + set_fs(KERNEL_DS); + result = snd_pcm_lib_write(snd->substream, (void __user *)buf, frames); + if (result != frames) { + ERROR(card, "Playback error: %d\n", (int)result); + set_fs(old_fs); + goto try_again; + } + set_fs(old_fs); + + return 0; +} + +int u_audio_get_playback_channels(struct gaudio *card) +{ + return card->playback.channels; +} + +int u_audio_get_playback_rate(struct gaudio *card) +{ + return card->playback.rate; +} + +/** + * Open ALSA PCM and control device files + * Initial the PCM or control device + */ +static int gaudio_open_snd_dev(struct gaudio *card) +{ + struct snd_pcm_file *pcm_file; + struct gaudio_snd_dev *snd; + struct f_uac1_opts *opts; + char *fn_play, *fn_cap, *fn_cntl; + + opts = container_of(card->func.fi, struct f_uac1_opts, func_inst); + fn_play = opts->fn_play; + fn_cap = opts->fn_cap; + fn_cntl = opts->fn_cntl; + + /* Open control device */ + snd = &card->control; + snd->filp = filp_open(fn_cntl, O_RDWR, 0); + if (IS_ERR(snd->filp)) { + int ret = PTR_ERR(snd->filp); + ERROR(card, "unable to open sound control device file: %s\n", + fn_cntl); + snd->filp = NULL; + return ret; + } + snd->card = card; + + /* Open PCM playback device and setup substream */ + snd = &card->playback; + snd->filp = filp_open(fn_play, O_WRONLY, 0); + if (IS_ERR(snd->filp)) { + int ret = PTR_ERR(snd->filp); + + ERROR(card, "No such PCM playback device: %s\n", fn_play); + snd->filp = NULL; + return ret; + } + pcm_file = snd->filp->private_data; + snd->substream = pcm_file->substream; + snd->card = card; + playback_default_hw_params(snd); + + /* Open PCM capture device and setup substream */ + snd = &card->capture; + snd->filp = filp_open(fn_cap, O_RDONLY, 0); + if (IS_ERR(snd->filp)) { + ERROR(card, "No such PCM capture device: %s\n", fn_cap); + snd->substream = NULL; + snd->card = NULL; + snd->filp = NULL; + } else { + pcm_file = snd->filp->private_data; + snd->substream = pcm_file->substream; + snd->card = card; + } + + return 0; +} + +/** + * Close ALSA PCM and control device files + */ +static int gaudio_close_snd_dev(struct gaudio *gau) +{ + struct gaudio_snd_dev *snd; + + /* Close control device */ + snd = &gau->control; + if (snd->filp) + filp_close(snd->filp, NULL); + + /* Close PCM playback device and setup substream */ + snd = &gau->playback; + if (snd->filp) + filp_close(snd->filp, NULL); + + /* Close PCM capture device and setup substream */ + snd = &gau->capture; + if (snd->filp) + filp_close(snd->filp, NULL); + + return 0; +} + +/** + * gaudio_setup - setup ALSA interface and preparing for USB transfer + * + * This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using. + * + * Returns negative errno, or zero on success + */ +int gaudio_setup(struct gaudio *card) +{ + int ret; + + ret = gaudio_open_snd_dev(card); + if (ret) + ERROR(card, "we need at least one control device\n"); + + return ret; + +} + +/** + * gaudio_cleanup - remove ALSA device interface + * + * This is called to free all resources allocated by @gaudio_setup(). + */ +void gaudio_cleanup(struct gaudio *the_card) +{ + if (the_card) + gaudio_close_snd_dev(the_card); +} + diff --git a/drivers/usb/gadget/function/u_uac1.h b/drivers/usb/gadget/function/u_uac1.h new file mode 100644 index 000000000..fe386df6d --- /dev/null +++ b/drivers/usb/gadget/function/u_uac1.h @@ -0,0 +1,84 @@ +/* + * u_uac1.h -- interface to USB gadget "ALSA AUDIO" utilities + * + * Copyright (C) 2008 Bryan Wu <cooloney@kernel.org> + * Copyright (C) 2008 Analog Devices, Inc + * + * Enter bugs at http://blackfin.uclinux.org/ + * + * Licensed under the GPL-2 or later. + */ + +#ifndef __U_AUDIO_H +#define __U_AUDIO_H + +#include <linux/device.h> +#include <linux/err.h> +#include <linux/usb/audio.h> +#include <linux/usb/composite.h> + +#include <sound/core.h> +#include <sound/pcm.h> +#include <sound/pcm_params.h> + +#include "gadget_chips.h" + +#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p" +#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c" +#define FILE_CONTROL "/dev/snd/controlC0" + +#define UAC1_OUT_EP_MAX_PACKET_SIZE 200 +#define UAC1_REQ_COUNT 256 +#define UAC1_AUDIO_BUF_SIZE 48000 + +/* + * This represents the USB side of an audio card device, managed by a USB + * function which provides control and stream interfaces. + */ + +struct gaudio_snd_dev { + struct gaudio *card; + struct file *filp; + struct snd_pcm_substream *substream; + int access; + int format; + int channels; + int rate; +}; + +struct gaudio { + struct usb_function func; + struct usb_gadget *gadget; + + /* ALSA sound device interfaces */ + struct gaudio_snd_dev control; + struct gaudio_snd_dev playback; + struct gaudio_snd_dev capture; + + /* TODO */ +}; + +struct f_uac1_opts { + struct usb_function_instance func_inst; + int req_buf_size; + int req_count; + int audio_buf_size; + char *fn_play; + char *fn_cap; + char *fn_cntl; + unsigned bound:1; + unsigned fn_play_alloc:1; + unsigned fn_cap_alloc:1; + unsigned fn_cntl_alloc:1; + struct mutex lock; + int refcnt; +}; + +int gaudio_setup(struct gaudio *card); +void gaudio_cleanup(struct gaudio *the_card); + +size_t u_audio_playback(struct gaudio *card, void *buf, size_t count); +int u_audio_get_playback_channels(struct gaudio *card); +int u_audio_get_playback_rate(struct gaudio *card); + +#endif /* __U_AUDIO_H */ diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h new file mode 100644 index 000000000..78dd37279 --- /dev/null +++ b/drivers/usb/gadget/function/u_uac2.h @@ -0,0 +1,42 @@ +/* + * u_uac2.h + * + * Utility definitions for UAC2 function + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_UAC2_H +#define U_UAC2_H + +#include <linux/usb/composite.h> + +#define UAC2_DEF_PCHMASK 0x3 +#define UAC2_DEF_PSRATE 48000 +#define UAC2_DEF_PSSIZE 2 +#define UAC2_DEF_CCHMASK 0x3 +#define UAC2_DEF_CSRATE 64000 +#define UAC2_DEF_CSSIZE 2 + +struct f_uac2_opts { + struct usb_function_instance func_inst; + int p_chmask; + int p_srate; + int p_ssize; + int c_chmask; + int c_srate; + int c_ssize; + bool bound; + + struct mutex lock; + int refcnt; +}; + +#endif diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h new file mode 100644 index 000000000..4676b60a5 --- /dev/null +++ b/drivers/usb/gadget/function/u_uvc.h @@ -0,0 +1,89 @@ +/* + * u_uvc.h + * + * Utility definitions for the uvc function + * + * Copyright (c) 2013-2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef U_UVC_H +#define U_UVC_H + +#include <linux/usb/composite.h> +#include <linux/usb/video.h> + +#define fi_to_f_uvc_opts(f) container_of(f, struct f_uvc_opts, func_inst) + +struct f_uvc_opts { + struct usb_function_instance func_inst; + unsigned int uvc_gadget_trace_param; + unsigned int streaming_interval; + unsigned int streaming_maxpacket; + unsigned int streaming_maxburst; + + /* + * Control descriptors array pointers for full-/high-speed and + * super-speed. They point by default to the uvc_fs_control_cls and + * uvc_ss_control_cls arrays respectively. Legacy gadgets must + * override them in their gadget bind callback. + */ + const struct uvc_descriptor_header * const *fs_control; + const struct uvc_descriptor_header * const *ss_control; + + /* + * Streaming descriptors array pointers for full-speed, high-speed and + * super-speed. They will point to the uvc_[fhs]s_streaming_cls arrays + * for configfs-based gadgets. Legacy gadgets must initialize them in + * their gadget bind callback. + */ + const struct uvc_descriptor_header * const *fs_streaming; + const struct uvc_descriptor_header * const *hs_streaming; + const struct uvc_descriptor_header * const *ss_streaming; + + /* Default control descriptors for configfs-based gadgets. */ + struct uvc_camera_terminal_descriptor uvc_camera_terminal; + struct uvc_processing_unit_descriptor uvc_processing; + struct uvc_output_terminal_descriptor uvc_output_terminal; + struct uvc_color_matching_descriptor uvc_color_matching; + + /* + * Control descriptors pointers arrays for full-/high-speed and + * super-speed. The first element is a configurable control header + * descriptor, the other elements point to the fixed default control + * descriptors. Used by configfs only, must not be touched by legacy + * gadgets. + */ + struct uvc_descriptor_header *uvc_fs_control_cls[5]; + struct uvc_descriptor_header *uvc_ss_control_cls[5]; + + /* + * Streaming descriptors for full-speed, high-speed and super-speed. + * Used by configfs only, must not be touched by legacy gadgets. The + * arrays are allocated at runtime as the number of descriptors isn't + * known in advance. + */ + struct uvc_descriptor_header **uvc_fs_streaming_cls; + struct uvc_descriptor_header **uvc_hs_streaming_cls; + struct uvc_descriptor_header **uvc_ss_streaming_cls; + + /* + * Read/write access to configfs attributes is handled by configfs. + * + * This lock protects the descriptors from concurrent access by + * read/write and symlink creation/removal. + */ + struct mutex lock; + int refcnt; +}; + +void uvc_set_trace_param(unsigned int trace); + +#endif /* U_UVC_H */ + diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h new file mode 100644 index 000000000..ebe409b9e --- /dev/null +++ b/drivers/usb/gadget/function/uvc.h @@ -0,0 +1,201 @@ +/* + * uvc_gadget.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UVC_GADGET_H_ +#define _UVC_GADGET_H_ + +#include <linux/ioctl.h> +#include <linux/types.h> +#include <linux/usb/ch9.h> + +#define UVC_EVENT_FIRST (V4L2_EVENT_PRIVATE_START + 0) +#define UVC_EVENT_CONNECT (V4L2_EVENT_PRIVATE_START + 0) +#define UVC_EVENT_DISCONNECT (V4L2_EVENT_PRIVATE_START + 1) +#define UVC_EVENT_STREAMON (V4L2_EVENT_PRIVATE_START + 2) +#define UVC_EVENT_STREAMOFF (V4L2_EVENT_PRIVATE_START + 3) +#define UVC_EVENT_SETUP (V4L2_EVENT_PRIVATE_START + 4) +#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5) +#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5) + +struct uvc_request_data +{ + __s32 length; + __u8 data[60]; +}; + +struct uvc_event +{ + union { + enum usb_device_speed speed; + struct usb_ctrlrequest req; + struct uvc_request_data data; + }; +}; + +#define UVCIOC_SEND_RESPONSE _IOW('U', 1, struct uvc_request_data) + +#define UVC_INTF_CONTROL 0 +#define UVC_INTF_STREAMING 1 + +/* ------------------------------------------------------------------------ + * Debugging, printing and logging + */ + +#ifdef __KERNEL__ + +#include <linux/usb.h> /* For usb_endpoint_* */ +#include <linux/usb/composite.h> +#include <linux/usb/gadget.h> +#include <linux/videodev2.h> +#include <linux/version.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-device.h> + +#include "uvc_queue.h" + +#define UVC_TRACE_PROBE (1 << 0) +#define UVC_TRACE_DESCR (1 << 1) +#define UVC_TRACE_CONTROL (1 << 2) +#define UVC_TRACE_FORMAT (1 << 3) +#define UVC_TRACE_CAPTURE (1 << 4) +#define UVC_TRACE_CALLS (1 << 5) +#define UVC_TRACE_IOCTL (1 << 6) +#define UVC_TRACE_FRAME (1 << 7) +#define UVC_TRACE_SUSPEND (1 << 8) +#define UVC_TRACE_STATUS (1 << 9) + +#define UVC_WARN_MINMAX 0 +#define UVC_WARN_PROBE_DEF 1 + +extern unsigned int uvc_gadget_trace_param; + +#define uvc_trace(flag, msg...) \ + do { \ + if (uvc_gadget_trace_param & flag) \ + printk(KERN_DEBUG "uvcvideo: " msg); \ + } while (0) + +#define uvc_warn_once(dev, warn, msg...) \ + do { \ + if (!test_and_set_bit(warn, &dev->warnings)) \ + printk(KERN_INFO "uvcvideo: " msg); \ + } while (0) + +#define uvc_printk(level, msg...) \ + printk(level "uvcvideo: " msg) + +/* ------------------------------------------------------------------------ + * Driver specific constants + */ + +#define UVC_NUM_REQUESTS 4 +#define UVC_MAX_REQUEST_SIZE 64 +#define UVC_MAX_EVENTS 4 + +/* ------------------------------------------------------------------------ + * Structures + */ + +struct uvc_video +{ + struct usb_ep *ep; + + /* Frame parameters */ + u8 bpp; + u32 fcc; + unsigned int width; + unsigned int height; + unsigned int imagesize; + struct mutex mutex; /* protects frame parameters */ + + /* Requests */ + unsigned int req_size; + struct usb_request *req[UVC_NUM_REQUESTS]; + __u8 *req_buffer[UVC_NUM_REQUESTS]; + struct list_head req_free; + spinlock_t req_lock; + + void (*encode) (struct usb_request *req, struct uvc_video *video, + struct uvc_buffer *buf); + + /* Context data used by the completion handler */ + __u32 payload_size; + __u32 max_payload_size; + + struct uvc_video_queue queue; + unsigned int fid; +}; + +enum uvc_state +{ + UVC_STATE_DISCONNECTED, + UVC_STATE_CONNECTED, + UVC_STATE_STREAMING, +}; + +struct uvc_device +{ + struct video_device vdev; + struct v4l2_device v4l2_dev; + enum uvc_state state; + struct usb_function func; + struct uvc_video video; + + /* Descriptors */ + struct { + const struct uvc_descriptor_header * const *fs_control; + const struct uvc_descriptor_header * const *ss_control; + const struct uvc_descriptor_header * const *fs_streaming; + const struct uvc_descriptor_header * const *hs_streaming; + const struct uvc_descriptor_header * const *ss_streaming; + } desc; + + unsigned int control_intf; + struct usb_ep *control_ep; + struct usb_request *control_req; + void *control_buf; + + unsigned int streaming_intf; + + /* Events */ + unsigned int event_length; + unsigned int event_setup_out : 1; +}; + +static inline struct uvc_device *to_uvc(struct usb_function *f) +{ + return container_of(f, struct uvc_device, func); +} + +struct uvc_file_handle +{ + struct v4l2_fh vfh; + struct uvc_video *device; +}; + +#define to_uvc_file_handle(handle) \ + container_of(handle, struct uvc_file_handle, vfh) + +/* ------------------------------------------------------------------------ + * Functions + */ + +extern void uvc_function_setup_continue(struct uvc_device *uvc); +extern void uvc_endpoint_stream(struct uvc_device *dev); + +extern void uvc_function_connect(struct uvc_device *uvc); +extern void uvc_function_disconnect(struct uvc_device *uvc); + +#endif /* __KERNEL__ */ + +#endif /* _UVC_GADGET_H_ */ + diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c new file mode 100644 index 000000000..3c0467bcb --- /dev/null +++ b/drivers/usb/gadget/function/uvc_configfs.c @@ -0,0 +1,2468 @@ +/* + * uvc_configfs.c + * + * Configfs support for the uvc function. + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include "u_uvc.h" +#include "uvc_configfs.h" + +#define UVCG_STREAMING_CONTROL_SIZE 1 + +#define CONFIGFS_ATTR_OPS_RO(_item) \ +static ssize_t _item##_attr_show(struct config_item *item, \ + struct configfs_attribute *attr, \ + char *page) \ +{ \ + struct _item *_item = to_##_item(item); \ + struct _item##_attribute *_item##_attr = \ + container_of(attr, struct _item##_attribute, attr); \ + ssize_t ret = 0; \ + \ + if (_item##_attr->show) \ + ret = _item##_attr->show(_item, page); \ + return ret; \ +} + +static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item); + +/* control/header/<NAME> */ +DECLARE_UVC_HEADER_DESCRIPTOR(1); + +struct uvcg_control_header { + struct config_item item; + struct UVC_HEADER_DESCRIPTOR(1) desc; + unsigned linked; +}; + +static struct uvcg_control_header *to_uvcg_control_header(struct config_item *item) +{ + return container_of(item, struct uvcg_control_header, item); +} + +CONFIGFS_ATTR_STRUCT(uvcg_control_header); +CONFIGFS_ATTR_OPS(uvcg_control_header); + +static struct configfs_item_operations uvcg_control_header_item_ops = { + .show_attribute = uvcg_control_header_attr_show, + .store_attribute = uvcg_control_header_attr_store, +}; + +#define UVCG_CTRL_HDR_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \ +static ssize_t uvcg_control_header_##cname##_show( \ + struct uvcg_control_header *ch, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = ch->item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(ch->desc.aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t \ +uvcg_control_header_##cname##_store(struct uvcg_control_header *ch, \ + const char *page, size_t len) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\ + int ret; \ + uxx num; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = ch->item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + if (ch->linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = str2u(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (num > limit) { \ + ret = -EINVAL; \ + goto end; \ + } \ + ch->desc.aname = vnoc(num); \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +static struct uvcg_control_header_attribute \ + uvcg_control_header_##cname = \ + __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \ + uvcg_control_header_##cname##_show, \ + uvcg_control_header_##cname##_store) + +UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, le16_to_cpu, kstrtou16, u16, cpu_to_le16, + 0xffff); + +UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, le32_to_cpu, kstrtou32, + u32, cpu_to_le32, 0x7fffffff); + +#undef UVCG_CTRL_HDR_ATTR + +static struct configfs_attribute *uvcg_control_header_attrs[] = { + &uvcg_control_header_bcd_uvc.attr, + &uvcg_control_header_dw_clock_frequency.attr, + NULL, +}; + +static struct config_item_type uvcg_control_header_type = { + .ct_item_ops = &uvcg_control_header_item_ops, + .ct_attrs = uvcg_control_header_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_item *uvcg_control_header_make(struct config_group *group, + const char *name) +{ + struct uvcg_control_header *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->desc.bLength = UVC_DT_HEADER_SIZE(1); + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VC_HEADER; + h->desc.bcdUVC = cpu_to_le16(0x0100); + h->desc.dwClockFrequency = cpu_to_le32(48000000); + + config_item_init_type_name(&h->item, name, &uvcg_control_header_type); + + return &h->item; +} + +static void uvcg_control_header_drop(struct config_group *group, + struct config_item *item) +{ + struct uvcg_control_header *h = to_uvcg_control_header(item); + + kfree(h); +} + +/* control/header */ +static struct uvcg_control_header_grp { + struct config_group group; +} uvcg_control_header_grp; + +static struct configfs_group_operations uvcg_control_header_grp_ops = { + .make_item = uvcg_control_header_make, + .drop_item = uvcg_control_header_drop, +}; + +static struct config_item_type uvcg_control_header_grp_type = { + .ct_group_ops = &uvcg_control_header_grp_ops, + .ct_owner = THIS_MODULE, +}; + +/* control/processing/default */ +static struct uvcg_default_processing { + struct config_group group; +} uvcg_default_processing; + +static inline struct uvcg_default_processing +*to_uvcg_default_processing(struct config_item *item) +{ + return container_of(to_config_group(item), + struct uvcg_default_processing, group); +} + +CONFIGFS_ATTR_STRUCT(uvcg_default_processing); +CONFIGFS_ATTR_OPS_RO(uvcg_default_processing); + +static struct configfs_item_operations uvcg_default_processing_item_ops = { + .show_attribute = uvcg_default_processing_attr_show, +}; + +#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, conv) \ +static ssize_t uvcg_default_processing_##cname##_show( \ + struct uvcg_default_processing *dp, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; \ + struct uvc_processing_unit_descriptor *pd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + pd = &opts->uvc_processing; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(pd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static struct uvcg_default_processing_attribute \ + uvcg_default_processing_##cname = \ + __CONFIGFS_ATTR_RO(aname, uvcg_default_processing_##cname##_show) + +#define identity_conv(x) (x) + +UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, identity_conv); +UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, identity_conv); +UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, le16_to_cpu); +UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, identity_conv); + +#undef identity_conv + +#undef UVCG_DEFAULT_PROCESSING_ATTR + +static ssize_t uvcg_default_processing_bm_controls_show( + struct uvcg_default_processing *dp, char *page) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; + struct uvc_processing_unit_descriptor *pd; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + pd = &opts->uvc_processing; + + mutex_lock(&opts->lock); + for (result = 0, i = 0; i < pd->bControlSize; ++i) { + result += sprintf(pg, "%d\n", pd->bmControls[i]); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return result; +} + +static struct uvcg_default_processing_attribute + uvcg_default_processing_bm_controls = + __CONFIGFS_ATTR_RO(bmControls, + uvcg_default_processing_bm_controls_show); + +static struct configfs_attribute *uvcg_default_processing_attrs[] = { + &uvcg_default_processing_b_unit_id.attr, + &uvcg_default_processing_b_source_id.attr, + &uvcg_default_processing_w_max_multiplier.attr, + &uvcg_default_processing_bm_controls.attr, + &uvcg_default_processing_i_processing.attr, + NULL, +}; + +static struct config_item_type uvcg_default_processing_type = { + .ct_item_ops = &uvcg_default_processing_item_ops, + .ct_attrs = uvcg_default_processing_attrs, + .ct_owner = THIS_MODULE, +}; + +/* struct uvcg_processing {}; */ + +static struct config_group *uvcg_processing_default_groups[] = { + &uvcg_default_processing.group, + NULL, +}; + +/* control/processing */ +static struct uvcg_processing_grp { + struct config_group group; +} uvcg_processing_grp; + +static struct config_item_type uvcg_processing_grp_type = { + .ct_owner = THIS_MODULE, +}; + +/* control/terminal/camera/default */ +static struct uvcg_default_camera { + struct config_group group; +} uvcg_default_camera; + +static inline struct uvcg_default_camera +*to_uvcg_default_camera(struct config_item *item) +{ + return container_of(to_config_group(item), + struct uvcg_default_camera, group); +} + +CONFIGFS_ATTR_STRUCT(uvcg_default_camera); +CONFIGFS_ATTR_OPS_RO(uvcg_default_camera); + +static struct configfs_item_operations uvcg_default_camera_item_ops = { + .show_attribute = uvcg_default_camera_attr_show, +}; + +#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, conv) \ +static ssize_t uvcg_default_camera_##cname##_show( \ + struct uvcg_default_camera *dc, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \ + struct uvc_camera_terminal_descriptor *cd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> \ + ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + cd = &opts->uvc_camera_terminal; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(cd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + \ + return result; \ +} \ + \ +static struct uvcg_default_camera_attribute \ + uvcg_default_camera_##cname = \ + __CONFIGFS_ATTR_RO(aname, uvcg_default_camera_##cname##_show) + +#define identity_conv(x) (x) + +UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, identity_conv); +UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, le16_to_cpu); +UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv); +UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, identity_conv); +UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_min, wObjectiveFocalLengthMin, + le16_to_cpu); +UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_max, wObjectiveFocalLengthMax, + le16_to_cpu); +UVCG_DEFAULT_CAMERA_ATTR(w_ocular_focal_length, wOcularFocalLength, + le16_to_cpu); + +#undef identity_conv + +#undef UVCG_DEFAULT_CAMERA_ATTR + +static ssize_t uvcg_default_camera_bm_controls_show( + struct uvcg_default_camera *dc, char *page) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; + struct uvc_camera_terminal_descriptor *cd; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> + ci_parent; + opts = to_f_uvc_opts(opts_item); + cd = &opts->uvc_camera_terminal; + + mutex_lock(&opts->lock); + for (result = 0, i = 0; i < cd->bControlSize; ++i) { + result += sprintf(pg, "%d\n", cd->bmControls[i]); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + return result; +} + +static struct uvcg_default_camera_attribute + uvcg_default_camera_bm_controls = + __CONFIGFS_ATTR_RO(bmControls, uvcg_default_camera_bm_controls_show); + +static struct configfs_attribute *uvcg_default_camera_attrs[] = { + &uvcg_default_camera_b_terminal_id.attr, + &uvcg_default_camera_w_terminal_type.attr, + &uvcg_default_camera_b_assoc_terminal.attr, + &uvcg_default_camera_i_terminal.attr, + &uvcg_default_camera_w_objective_focal_length_min.attr, + &uvcg_default_camera_w_objective_focal_length_max.attr, + &uvcg_default_camera_w_ocular_focal_length.attr, + &uvcg_default_camera_bm_controls.attr, + NULL, +}; + +static struct config_item_type uvcg_default_camera_type = { + .ct_item_ops = &uvcg_default_camera_item_ops, + .ct_attrs = uvcg_default_camera_attrs, + .ct_owner = THIS_MODULE, +}; + +/* struct uvcg_camera {}; */ + +static struct config_group *uvcg_camera_default_groups[] = { + &uvcg_default_camera.group, + NULL, +}; + +/* control/terminal/camera */ +static struct uvcg_camera_grp { + struct config_group group; +} uvcg_camera_grp; + +static struct config_item_type uvcg_camera_grp_type = { + .ct_owner = THIS_MODULE, +}; + +/* control/terminal/output/default */ +static struct uvcg_default_output { + struct config_group group; +} uvcg_default_output; + +static inline struct uvcg_default_output +*to_uvcg_default_output(struct config_item *item) +{ + return container_of(to_config_group(item), + struct uvcg_default_output, group); +} + +CONFIGFS_ATTR_STRUCT(uvcg_default_output); +CONFIGFS_ATTR_OPS_RO(uvcg_default_output); + +static struct configfs_item_operations uvcg_default_output_item_ops = { + .show_attribute = uvcg_default_output_attr_show, +}; + +#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, conv) \ +static ssize_t uvcg_default_output_##cname##_show( \ + struct uvcg_default_output *dout, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &dout->group.cg_subsys->su_mutex; \ + struct uvc_output_terminal_descriptor *cd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = dout->group.cg_item.ci_parent->ci_parent-> \ + ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + cd = &opts->uvc_output_terminal; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(cd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + \ + return result; \ +} \ + \ +static struct uvcg_default_output_attribute \ + uvcg_default_output_##cname = \ + __CONFIGFS_ATTR_RO(aname, uvcg_default_output_##cname##_show) + +#define identity_conv(x) (x) + +UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, identity_conv); +UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, le16_to_cpu); +UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv); +UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, identity_conv); +UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, identity_conv); + +#undef identity_conv + +#undef UVCG_DEFAULT_OUTPUT_ATTR + +static struct configfs_attribute *uvcg_default_output_attrs[] = { + &uvcg_default_output_b_terminal_id.attr, + &uvcg_default_output_w_terminal_type.attr, + &uvcg_default_output_b_assoc_terminal.attr, + &uvcg_default_output_b_source_id.attr, + &uvcg_default_output_i_terminal.attr, + NULL, +}; + +static struct config_item_type uvcg_default_output_type = { + .ct_item_ops = &uvcg_default_output_item_ops, + .ct_attrs = uvcg_default_output_attrs, + .ct_owner = THIS_MODULE, +}; + +/* struct uvcg_output {}; */ + +static struct config_group *uvcg_output_default_groups[] = { + &uvcg_default_output.group, + NULL, +}; + +/* control/terminal/output */ +static struct uvcg_output_grp { + struct config_group group; +} uvcg_output_grp; + +static struct config_item_type uvcg_output_grp_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_terminal_default_groups[] = { + &uvcg_camera_grp.group, + &uvcg_output_grp.group, + NULL, +}; + +/* control/terminal */ +static struct uvcg_terminal_grp { + struct config_group group; +} uvcg_terminal_grp; + +static struct config_item_type uvcg_terminal_grp_type = { + .ct_owner = THIS_MODULE, +}; + +/* control/class/{fs} */ +static struct uvcg_control_class { + struct config_group group; +} uvcg_control_class_fs, uvcg_control_class_ss; + + +static inline struct uvc_descriptor_header +**uvcg_get_ctl_class_arr(struct config_item *i, struct f_uvc_opts *o) +{ + struct uvcg_control_class *cl = container_of(to_config_group(i), + struct uvcg_control_class, group); + + if (cl == &uvcg_control_class_fs) + return o->uvc_fs_control_cls; + + if (cl == &uvcg_control_class_ss) + return o->uvc_ss_control_cls; + + return NULL; +} + +static int uvcg_control_class_allow_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *control, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header **class_array; + struct uvcg_control_header *target_hdr; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + control = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(control), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(control->ci_parent); + + mutex_lock(&opts->lock); + + class_array = uvcg_get_ctl_class_arr(src, opts); + if (!class_array) + goto unlock; + if (opts->refcnt || class_array[0]) { + ret = -EBUSY; + goto unlock; + } + + target_hdr = to_uvcg_control_header(target); + ++target_hdr->linked; + class_array[0] = (struct uvc_descriptor_header *)&target_hdr->desc; + ret = 0; + +unlock: + mutex_unlock(&opts->lock); +out: + mutex_unlock(su_mutex); + return ret; +} + +static int uvcg_control_class_drop_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *control, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header **class_array; + struct uvcg_control_header *target_hdr; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + control = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(control), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(control->ci_parent); + + mutex_lock(&opts->lock); + + class_array = uvcg_get_ctl_class_arr(src, opts); + if (!class_array) + goto unlock; + if (opts->refcnt) { + ret = -EBUSY; + goto unlock; + } + + target_hdr = to_uvcg_control_header(target); + --target_hdr->linked; + class_array[0] = NULL; + ret = 0; + +unlock: + mutex_unlock(&opts->lock); +out: + mutex_unlock(su_mutex); + return ret; +} + +static struct configfs_item_operations uvcg_control_class_item_ops = { + .allow_link = uvcg_control_class_allow_link, + .drop_link = uvcg_control_class_drop_link, +}; + +static struct config_item_type uvcg_control_class_type = { + .ct_item_ops = &uvcg_control_class_item_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_control_class_default_groups[] = { + &uvcg_control_class_fs.group, + &uvcg_control_class_ss.group, + NULL, +}; + +/* control/class */ +static struct uvcg_control_class_grp { + struct config_group group; +} uvcg_control_class_grp; + +static struct config_item_type uvcg_control_class_grp_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_control_default_groups[] = { + &uvcg_control_header_grp.group, + &uvcg_processing_grp.group, + &uvcg_terminal_grp.group, + &uvcg_control_class_grp.group, + NULL, +}; + +/* control */ +static struct uvcg_control_grp { + struct config_group group; +} uvcg_control_grp; + +static struct config_item_type uvcg_control_grp_type = { + .ct_owner = THIS_MODULE, +}; + +/* streaming/uncompressed */ +static struct uvcg_uncompressed_grp { + struct config_group group; +} uvcg_uncompressed_grp; + +/* streaming/mjpeg */ +static struct uvcg_mjpeg_grp { + struct config_group group; +} uvcg_mjpeg_grp; + +static struct config_item *fmt_parent[] = { + &uvcg_uncompressed_grp.group.cg_item, + &uvcg_mjpeg_grp.group.cg_item, +}; + +enum uvcg_format_type { + UVCG_UNCOMPRESSED = 0, + UVCG_MJPEG, +}; + +struct uvcg_format { + struct config_group group; + enum uvcg_format_type type; + unsigned linked; + unsigned num_frames; + __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE]; +}; + +static struct uvcg_format *to_uvcg_format(struct config_item *item) +{ + return container_of(to_config_group(item), struct uvcg_format, group); +} + +static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &f->group.cg_subsys->su_mutex; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = f->group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + result = sprintf(pg, "0x"); + pg += result; + for (i = 0; i < UVCG_STREAMING_CONTROL_SIZE; ++i) { + result += sprintf(pg, "%x\n", f->bmaControls[i]); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + return result; +} + +static ssize_t uvcg_format_bma_controls_store(struct uvcg_format *ch, + const char *page, size_t len) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &ch->group.cg_subsys->su_mutex; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + if (ch->linked || opts->refcnt) { + ret = -EBUSY; + goto end; + } + + if (len < 4 || *page != '0' || + (*(page + 1) != 'x' && *(page + 1) != 'X')) + goto end; + ret = hex2bin(ch->bmaControls, page + 2, 1); + if (ret < 0) + goto end; + ret = len; +end: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +struct uvcg_format_ptr { + struct uvcg_format *fmt; + struct list_head entry; +}; + +/* streaming/header/<NAME> */ +struct uvcg_streaming_header { + struct config_item item; + struct uvc_input_header_descriptor desc; + unsigned linked; + struct list_head formats; + unsigned num_fmt; +}; + +static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item *item) +{ + return container_of(item, struct uvcg_streaming_header, item); +} + +CONFIGFS_ATTR_STRUCT(uvcg_streaming_header); +CONFIGFS_ATTR_OPS(uvcg_streaming_header); + +static int uvcg_streaming_header_allow_link(struct config_item *src, + struct config_item *target) +{ + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct config_item *opts_item; + struct f_uvc_opts *opts; + struct uvcg_streaming_header *src_hdr; + struct uvcg_format *target_fmt = NULL; + struct uvcg_format_ptr *format_ptr; + int i, ret = -EINVAL; + + src_hdr = to_uvcg_streaming_header(src); + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = src->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + + if (src_hdr->linked) { + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ARRAY_SIZE(fmt_parent); ++i) + if (target->ci_parent == fmt_parent[i]) + break; + if (i == ARRAY_SIZE(fmt_parent)) + goto out; + + target_fmt = container_of(to_config_group(target), struct uvcg_format, + group); + if (!target_fmt) + goto out; + + format_ptr = kzalloc(sizeof(*format_ptr), GFP_KERNEL); + if (!format_ptr) { + ret = -ENOMEM; + goto out; + } + ret = 0; + format_ptr->fmt = target_fmt; + list_add_tail(&format_ptr->entry, &src_hdr->formats); + ++src_hdr->num_fmt; + +out: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +static int uvcg_streaming_header_drop_link(struct config_item *src, + struct config_item *target) +{ + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct config_item *opts_item; + struct f_uvc_opts *opts; + struct uvcg_streaming_header *src_hdr; + struct uvcg_format *target_fmt = NULL; + struct uvcg_format_ptr *format_ptr, *tmp; + int ret = -EINVAL; + + src_hdr = to_uvcg_streaming_header(src); + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = src->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + target_fmt = container_of(to_config_group(target), struct uvcg_format, + group); + if (!target_fmt) + goto out; + + list_for_each_entry_safe(format_ptr, tmp, &src_hdr->formats, entry) + if (format_ptr->fmt == target_fmt) { + list_del(&format_ptr->entry); + kfree(format_ptr); + --src_hdr->num_fmt; + break; + } + +out: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; + +} + +static struct configfs_item_operations uvcg_streaming_header_item_ops = { + .show_attribute = uvcg_streaming_header_attr_show, + .store_attribute = uvcg_streaming_header_attr_store, + .allow_link = uvcg_streaming_header_allow_link, + .drop_link = uvcg_streaming_header_drop_link, +}; + +#define UVCG_STREAMING_HEADER_ATTR(cname, aname, conv) \ +static ssize_t uvcg_streaming_header_##cname##_show( \ + struct uvcg_streaming_header *sh, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &sh->item.ci_group->cg_subsys->su_mutex;\ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = sh->item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(sh->desc.aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static struct uvcg_streaming_header_attribute \ + uvcg_streaming_header_##cname = \ + __CONFIGFS_ATTR_RO(aname, uvcg_streaming_header_##cname##_show) + +#define identity_conv(x) (x) + +UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, identity_conv); +UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, identity_conv); +UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod, + identity_conv); +UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, identity_conv); +UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, identity_conv); + +#undef identity_conv + +#undef UVCG_STREAMING_HEADER_ATTR + +static struct configfs_attribute *uvcg_streaming_header_attrs[] = { + &uvcg_streaming_header_bm_info.attr, + &uvcg_streaming_header_b_terminal_link.attr, + &uvcg_streaming_header_b_still_capture_method.attr, + &uvcg_streaming_header_b_trigger_support.attr, + &uvcg_streaming_header_b_trigger_usage.attr, + NULL, +}; + +static struct config_item_type uvcg_streaming_header_type = { + .ct_item_ops = &uvcg_streaming_header_item_ops, + .ct_attrs = uvcg_streaming_header_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_item +*uvcg_streaming_header_make(struct config_group *group, const char *name) +{ + struct uvcg_streaming_header *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&h->formats); + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VS_INPUT_HEADER; + h->desc.bTerminalLink = 3; + h->desc.bControlSize = UVCG_STREAMING_CONTROL_SIZE; + + config_item_init_type_name(&h->item, name, &uvcg_streaming_header_type); + + return &h->item; +} + +static void uvcg_streaming_header_drop(struct config_group *group, + struct config_item *item) +{ + struct uvcg_streaming_header *h = to_uvcg_streaming_header(item); + + kfree(h); +} + +/* streaming/header */ +static struct uvcg_streaming_header_grp { + struct config_group group; +} uvcg_streaming_header_grp; + +static struct configfs_group_operations uvcg_streaming_header_grp_ops = { + .make_item = uvcg_streaming_header_make, + .drop_item = uvcg_streaming_header_drop, +}; + +static struct config_item_type uvcg_streaming_header_grp_type = { + .ct_group_ops = &uvcg_streaming_header_grp_ops, + .ct_owner = THIS_MODULE, +}; + +/* streaming/<mode>/<format>/<NAME> */ +struct uvcg_frame { + struct { + u8 b_length; + u8 b_descriptor_type; + u8 b_descriptor_subtype; + u8 b_frame_index; + u8 bm_capabilities; + u16 w_width; + u16 w_height; + u32 dw_min_bit_rate; + u32 dw_max_bit_rate; + u32 dw_max_video_frame_buffer_size; + u32 dw_default_frame_interval; + u8 b_frame_interval_type; + } __attribute__((packed)) frame; + u32 *dw_frame_interval; + enum uvcg_format_type fmt_type; + struct config_item item; +}; + +static struct uvcg_frame *to_uvcg_frame(struct config_item *item) +{ + return container_of(item, struct uvcg_frame, item); +} + +CONFIGFS_ATTR_STRUCT(uvcg_frame); +CONFIGFS_ATTR_OPS(uvcg_frame); + +static struct configfs_item_operations uvcg_frame_item_ops = { + .show_attribute = uvcg_frame_attr_show, + .store_attribute = uvcg_frame_attr_store, +}; + +#define UVCG_FRAME_ATTR(cname, aname, to_cpu_endian, to_little_endian, bits) \ +static ssize_t uvcg_frame_##cname##_show(struct uvcg_frame *f, char *page)\ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", to_cpu_endian(f->frame.cname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t uvcg_frame_##cname##_store(struct uvcg_frame *f, \ + const char *page, size_t len)\ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct uvcg_format *fmt; \ + struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\ + int ret; \ + u##bits num; \ + \ + ret = kstrtou##bits(page, 0, &num); \ + if (ret) \ + return ret; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = f->item.ci_parent->ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + fmt = to_uvcg_format(f->item.ci_parent); \ + \ + mutex_lock(&opts->lock); \ + if (fmt->linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + f->frame.cname = to_little_endian(num); \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +static struct uvcg_frame_attribute \ + uvcg_frame_##cname = \ + __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \ + uvcg_frame_##cname##_show, \ + uvcg_frame_##cname##_store) + +#define noop_conversion(x) (x) + +UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, noop_conversion, + noop_conversion, 8); +UVCG_FRAME_ATTR(w_width, wWidth, le16_to_cpu, cpu_to_le16, 16); +UVCG_FRAME_ATTR(w_height, wHeight, le16_to_cpu, cpu_to_le16, 16); +UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, le32_to_cpu, cpu_to_le32, 32); +UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, le32_to_cpu, cpu_to_le32, 32); +UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize, + le32_to_cpu, cpu_to_le32, 32); +UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval, + le32_to_cpu, cpu_to_le32, 32); + +#undef noop_conversion + +#undef UVCG_FRAME_ATTR + +static ssize_t uvcg_frame_dw_frame_interval_show(struct uvcg_frame *frm, + char *page) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &frm->item.ci_group->cg_subsys->su_mutex; + int result, i; + char *pg = page; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = frm->item.ci_parent->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + for (result = 0, i = 0; i < frm->frame.b_frame_interval_type; ++i) { + result += sprintf(pg, "%d\n", + le32_to_cpu(frm->dw_frame_interval[i])); + pg = page + result; + } + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + return result; +} + +static inline int __uvcg_count_frm_intrv(char *buf, void *priv) +{ + ++*((int *)priv); + return 0; +} + +static inline int __uvcg_fill_frm_intrv(char *buf, void *priv) +{ + u32 num, **interv; + int ret; + + ret = kstrtou32(buf, 0, &num); + if (ret) + return ret; + + interv = priv; + **interv = cpu_to_le32(num); + ++*interv; + + return 0; +} + +static int __uvcg_iter_frm_intrv(const char *page, size_t len, + int (*fun)(char *, void *), void *priv) +{ + /* sign, base 2 representation, newline, terminator */ + char buf[1 + sizeof(u32) * 8 + 1 + 1]; + const char *pg = page; + int i, ret; + + if (!fun) + return -EINVAL; + + while (pg - page < len) { + i = 0; + while (i < sizeof(buf) && (pg - page < len) && + *pg != '\0' && *pg != '\n') + buf[i++] = *pg++; + if (i == sizeof(buf)) + return -EINVAL; + while ((pg - page < len) && (*pg == '\0' || *pg == '\n')) + ++pg; + buf[i] = '\0'; + ret = fun(buf, priv); + if (ret) + return ret; + } + + return 0; +} + +static ssize_t uvcg_frame_dw_frame_interval_store(struct uvcg_frame *ch, + const char *page, size_t len) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct uvcg_format *fmt; + struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex; + int ret = 0, n = 0; + u32 *frm_intrv, *tmp; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->item.ci_parent->ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + fmt = to_uvcg_format(ch->item.ci_parent); + + mutex_lock(&opts->lock); + if (fmt->linked || opts->refcnt) { + ret = -EBUSY; + goto end; + } + + ret = __uvcg_iter_frm_intrv(page, len, __uvcg_count_frm_intrv, &n); + if (ret) + goto end; + + tmp = frm_intrv = kcalloc(n, sizeof(u32), GFP_KERNEL); + if (!frm_intrv) { + ret = -ENOMEM; + goto end; + } + + ret = __uvcg_iter_frm_intrv(page, len, __uvcg_fill_frm_intrv, &tmp); + if (ret) { + kfree(frm_intrv); + goto end; + } + + kfree(ch->dw_frame_interval); + ch->dw_frame_interval = frm_intrv; + ch->frame.b_frame_interval_type = n; + ret = len; + +end: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +static struct uvcg_frame_attribute + uvcg_frame_dw_frame_interval = + __CONFIGFS_ATTR(dwFrameInterval, S_IRUGO | S_IWUSR, + uvcg_frame_dw_frame_interval_show, + uvcg_frame_dw_frame_interval_store); + +static struct configfs_attribute *uvcg_frame_attrs[] = { + &uvcg_frame_bm_capabilities.attr, + &uvcg_frame_w_width.attr, + &uvcg_frame_w_height.attr, + &uvcg_frame_dw_min_bit_rate.attr, + &uvcg_frame_dw_max_bit_rate.attr, + &uvcg_frame_dw_max_video_frame_buffer_size.attr, + &uvcg_frame_dw_default_frame_interval.attr, + &uvcg_frame_dw_frame_interval.attr, + NULL, +}; + +static struct config_item_type uvcg_frame_type = { + .ct_item_ops = &uvcg_frame_item_ops, + .ct_attrs = uvcg_frame_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_item *uvcg_frame_make(struct config_group *group, + const char *name) +{ + struct uvcg_frame *h; + struct uvcg_format *fmt; + struct f_uvc_opts *opts; + struct config_item *opts_item; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->frame.b_descriptor_type = USB_DT_CS_INTERFACE; + h->frame.b_frame_index = 1; + h->frame.w_width = cpu_to_le16(640); + h->frame.w_height = cpu_to_le16(360); + h->frame.dw_min_bit_rate = cpu_to_le32(18432000); + h->frame.dw_max_bit_rate = cpu_to_le32(55296000); + h->frame.dw_max_video_frame_buffer_size = cpu_to_le32(460800); + h->frame.dw_default_frame_interval = cpu_to_le32(666666); + + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + fmt = to_uvcg_format(&group->cg_item); + if (fmt->type == UVCG_UNCOMPRESSED) { + h->frame.b_descriptor_subtype = UVC_VS_FRAME_UNCOMPRESSED; + h->fmt_type = UVCG_UNCOMPRESSED; + } else if (fmt->type == UVCG_MJPEG) { + h->frame.b_descriptor_subtype = UVC_VS_FRAME_MJPEG; + h->fmt_type = UVCG_MJPEG; + } else { + mutex_unlock(&opts->lock); + kfree(h); + return ERR_PTR(-EINVAL); + } + ++fmt->num_frames; + mutex_unlock(&opts->lock); + + config_item_init_type_name(&h->item, name, &uvcg_frame_type); + + return &h->item; +} + +static void uvcg_frame_drop(struct config_group *group, struct config_item *item) +{ + struct uvcg_frame *h = to_uvcg_frame(item); + struct uvcg_format *fmt; + struct f_uvc_opts *opts; + struct config_item *opts_item; + + opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + fmt = to_uvcg_format(&group->cg_item); + --fmt->num_frames; + kfree(h); + mutex_unlock(&opts->lock); +} + +/* streaming/uncompressed/<NAME> */ +struct uvcg_uncompressed { + struct uvcg_format fmt; + struct uvc_format_uncompressed desc; +}; + +static struct uvcg_uncompressed *to_uvcg_uncompressed(struct config_item *item) +{ + return container_of( + container_of(to_config_group(item), struct uvcg_format, group), + struct uvcg_uncompressed, fmt); +} + +CONFIGFS_ATTR_STRUCT(uvcg_uncompressed); +CONFIGFS_ATTR_OPS(uvcg_uncompressed); + +static struct configfs_item_operations uvcg_uncompressed_item_ops = { + .show_attribute = uvcg_uncompressed_attr_show, + .store_attribute = uvcg_uncompressed_attr_store, +}; + +static struct configfs_group_operations uvcg_uncompressed_group_ops = { + .make_item = uvcg_frame_make, + .drop_item = uvcg_frame_drop, +}; + +static ssize_t uvcg_uncompressed_guid_format_show(struct uvcg_uncompressed *ch, + char *page) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + memcpy(page, ch->desc.guidFormat, sizeof(ch->desc.guidFormat)); + mutex_unlock(&opts->lock); + + mutex_unlock(su_mutex); + + return sizeof(ch->desc.guidFormat); +} + +static ssize_t uvcg_uncompressed_guid_format_store(struct uvcg_uncompressed *ch, + const char *page, size_t len) +{ + struct f_uvc_opts *opts; + struct config_item *opts_item; + struct mutex *su_mutex = &ch->fmt.group.cg_subsys->su_mutex; + int ret; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + opts_item = ch->fmt.group.cg_item.ci_parent->ci_parent->ci_parent; + opts = to_f_uvc_opts(opts_item); + + mutex_lock(&opts->lock); + if (ch->fmt.linked || opts->refcnt) { + ret = -EBUSY; + goto end; + } + + memcpy(ch->desc.guidFormat, page, + min(sizeof(ch->desc.guidFormat), len)); + ret = sizeof(ch->desc.guidFormat); + +end: + mutex_unlock(&opts->lock); + mutex_unlock(su_mutex); + return ret; +} + +static struct uvcg_uncompressed_attribute uvcg_uncompressed_guid_format = + __CONFIGFS_ATTR(guidFormat, S_IRUGO | S_IWUSR, + uvcg_uncompressed_guid_format_show, + uvcg_uncompressed_guid_format_store); + + +#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, conv) \ +static ssize_t uvcg_uncompressed_##cname##_show( \ + struct uvcg_uncompressed *u, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(u->desc.aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static struct uvcg_uncompressed_attribute \ + uvcg_uncompressed_##cname = \ + __CONFIGFS_ATTR_RO(aname, uvcg_uncompressed_##cname##_show) + +#define UVCG_UNCOMPRESSED_ATTR(cname, aname, conv) \ +static ssize_t uvcg_uncompressed_##cname##_show( \ + struct uvcg_uncompressed *u, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(u->desc.aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t \ +uvcg_uncompressed_##cname##_store(struct uvcg_uncompressed *u, \ + const char *page, size_t len) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int ret; \ + u8 num; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + if (u->fmt.linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou8(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (num > 255) { \ + ret = -EINVAL; \ + goto end; \ + } \ + u->desc.aname = num; \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +static struct uvcg_uncompressed_attribute \ + uvcg_uncompressed_##cname = \ + __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \ + uvcg_uncompressed_##cname##_show, \ + uvcg_uncompressed_##cname##_store) + +#define identity_conv(x) (x) + +UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, identity_conv); +UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex, + identity_conv); +UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv); +UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv); +UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv); + +#undef identity_conv + +#undef UVCG_UNCOMPRESSED_ATTR +#undef UVCG_UNCOMPRESSED_ATTR_RO + +static inline ssize_t +uvcg_uncompressed_bma_controls_show(struct uvcg_uncompressed *unc, char *page) +{ + return uvcg_format_bma_controls_show(&unc->fmt, page); +} + +static inline ssize_t +uvcg_uncompressed_bma_controls_store(struct uvcg_uncompressed *ch, + const char *page, size_t len) +{ + return uvcg_format_bma_controls_store(&ch->fmt, page, len); +} + +static struct uvcg_uncompressed_attribute uvcg_uncompressed_bma_controls = + __CONFIGFS_ATTR(bmaControls, S_IRUGO | S_IWUSR, + uvcg_uncompressed_bma_controls_show, + uvcg_uncompressed_bma_controls_store); + +static struct configfs_attribute *uvcg_uncompressed_attrs[] = { + &uvcg_uncompressed_guid_format.attr, + &uvcg_uncompressed_b_bits_per_pixel.attr, + &uvcg_uncompressed_b_default_frame_index.attr, + &uvcg_uncompressed_b_aspect_ratio_x.attr, + &uvcg_uncompressed_b_aspect_ratio_y.attr, + &uvcg_uncompressed_bm_interface_flags.attr, + &uvcg_uncompressed_bma_controls.attr, + NULL, +}; + +static struct config_item_type uvcg_uncompressed_type = { + .ct_item_ops = &uvcg_uncompressed_item_ops, + .ct_group_ops = &uvcg_uncompressed_group_ops, + .ct_attrs = uvcg_uncompressed_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_uncompressed_make(struct config_group *group, + const char *name) +{ + static char guid[] = { + 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71 + }; + struct uvcg_uncompressed *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->desc.bLength = UVC_DT_FORMAT_UNCOMPRESSED_SIZE; + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VS_FORMAT_UNCOMPRESSED; + memcpy(h->desc.guidFormat, guid, sizeof(guid)); + h->desc.bBitsPerPixel = 16; + h->desc.bDefaultFrameIndex = 1; + h->desc.bAspectRatioX = 0; + h->desc.bAspectRatioY = 0; + h->desc.bmInterfaceFlags = 0; + h->desc.bCopyProtect = 0; + + h->fmt.type = UVCG_UNCOMPRESSED; + config_group_init_type_name(&h->fmt.group, name, + &uvcg_uncompressed_type); + + return &h->fmt.group; +} + +static void uvcg_uncompressed_drop(struct config_group *group, + struct config_item *item) +{ + struct uvcg_uncompressed *h = to_uvcg_uncompressed(item); + + kfree(h); +} + +static struct configfs_group_operations uvcg_uncompressed_grp_ops = { + .make_group = uvcg_uncompressed_make, + .drop_item = uvcg_uncompressed_drop, +}; + +static struct config_item_type uvcg_uncompressed_grp_type = { + .ct_group_ops = &uvcg_uncompressed_grp_ops, + .ct_owner = THIS_MODULE, +}; + +/* streaming/mjpeg/<NAME> */ +struct uvcg_mjpeg { + struct uvcg_format fmt; + struct uvc_format_mjpeg desc; +}; + +static struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item) +{ + return container_of( + container_of(to_config_group(item), struct uvcg_format, group), + struct uvcg_mjpeg, fmt); +} + +CONFIGFS_ATTR_STRUCT(uvcg_mjpeg); +CONFIGFS_ATTR_OPS(uvcg_mjpeg); + +static struct configfs_item_operations uvcg_mjpeg_item_ops = { + .show_attribute = uvcg_mjpeg_attr_show, + .store_attribute = uvcg_mjpeg_attr_store, +}; + +static struct configfs_group_operations uvcg_mjpeg_group_ops = { + .make_item = uvcg_frame_make, + .drop_item = uvcg_frame_drop, +}; + +#define UVCG_MJPEG_ATTR_RO(cname, aname, conv) \ +static ssize_t uvcg_mjpeg_##cname##_show(struct uvcg_mjpeg *u, char *page)\ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(u->desc.aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static struct uvcg_mjpeg_attribute \ + uvcg_mjpeg_##cname = \ + __CONFIGFS_ATTR_RO(aname, uvcg_mjpeg_##cname##_show) + +#define UVCG_MJPEG_ATTR(cname, aname, conv) \ +static ssize_t uvcg_mjpeg_##cname##_show(struct uvcg_mjpeg *u, char *page)\ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(u->desc.aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static ssize_t \ +uvcg_mjpeg_##cname##_store(struct uvcg_mjpeg *u, \ + const char *page, size_t len) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &u->fmt.group.cg_subsys->su_mutex; \ + int ret; \ + u8 num; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = u->fmt.group.cg_item.ci_parent->ci_parent->ci_parent;\ + opts = to_f_uvc_opts(opts_item); \ + \ + mutex_lock(&opts->lock); \ + if (u->fmt.linked || opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = kstrtou8(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (num > 255) { \ + ret = -EINVAL; \ + goto end; \ + } \ + u->desc.aname = num; \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + mutex_unlock(su_mutex); \ + return ret; \ +} \ + \ +static struct uvcg_mjpeg_attribute \ + uvcg_mjpeg_##cname = \ + __CONFIGFS_ATTR(aname, S_IRUGO | S_IWUSR, \ + uvcg_mjpeg_##cname##_show, \ + uvcg_mjpeg_##cname##_store) + +#define identity_conv(x) (x) + +UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex, + identity_conv); +UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, identity_conv); +UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv); +UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv); +UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv); + +#undef identity_conv + +#undef UVCG_MJPEG_ATTR +#undef UVCG_MJPEG_ATTR_RO + +static inline ssize_t +uvcg_mjpeg_bma_controls_show(struct uvcg_mjpeg *unc, char *page) +{ + return uvcg_format_bma_controls_show(&unc->fmt, page); +} + +static inline ssize_t +uvcg_mjpeg_bma_controls_store(struct uvcg_mjpeg *ch, + const char *page, size_t len) +{ + return uvcg_format_bma_controls_store(&ch->fmt, page, len); +} + +static struct uvcg_mjpeg_attribute uvcg_mjpeg_bma_controls = + __CONFIGFS_ATTR(bmaControls, S_IRUGO | S_IWUSR, + uvcg_mjpeg_bma_controls_show, + uvcg_mjpeg_bma_controls_store); + +static struct configfs_attribute *uvcg_mjpeg_attrs[] = { + &uvcg_mjpeg_b_default_frame_index.attr, + &uvcg_mjpeg_bm_flags.attr, + &uvcg_mjpeg_b_aspect_ratio_x.attr, + &uvcg_mjpeg_b_aspect_ratio_y.attr, + &uvcg_mjpeg_bm_interface_flags.attr, + &uvcg_mjpeg_bma_controls.attr, + NULL, +}; + +static struct config_item_type uvcg_mjpeg_type = { + .ct_item_ops = &uvcg_mjpeg_item_ops, + .ct_group_ops = &uvcg_mjpeg_group_ops, + .ct_attrs = uvcg_mjpeg_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_mjpeg_make(struct config_group *group, + const char *name) +{ + struct uvcg_mjpeg *h; + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return ERR_PTR(-ENOMEM); + + h->desc.bLength = UVC_DT_FORMAT_MJPEG_SIZE; + h->desc.bDescriptorType = USB_DT_CS_INTERFACE; + h->desc.bDescriptorSubType = UVC_VS_FORMAT_MJPEG; + h->desc.bDefaultFrameIndex = 1; + h->desc.bAspectRatioX = 0; + h->desc.bAspectRatioY = 0; + h->desc.bmInterfaceFlags = 0; + h->desc.bCopyProtect = 0; + + h->fmt.type = UVCG_MJPEG; + config_group_init_type_name(&h->fmt.group, name, + &uvcg_mjpeg_type); + + return &h->fmt.group; +} + +static void uvcg_mjpeg_drop(struct config_group *group, + struct config_item *item) +{ + struct uvcg_mjpeg *h = to_uvcg_mjpeg(item); + + kfree(h); +} + +static struct configfs_group_operations uvcg_mjpeg_grp_ops = { + .make_group = uvcg_mjpeg_make, + .drop_item = uvcg_mjpeg_drop, +}; + +static struct config_item_type uvcg_mjpeg_grp_type = { + .ct_group_ops = &uvcg_mjpeg_grp_ops, + .ct_owner = THIS_MODULE, +}; + +/* streaming/color_matching/default */ +static struct uvcg_default_color_matching { + struct config_group group; +} uvcg_default_color_matching; + +static inline struct uvcg_default_color_matching +*to_uvcg_default_color_matching(struct config_item *item) +{ + return container_of(to_config_group(item), + struct uvcg_default_color_matching, group); +} + +CONFIGFS_ATTR_STRUCT(uvcg_default_color_matching); +CONFIGFS_ATTR_OPS_RO(uvcg_default_color_matching); + +static struct configfs_item_operations uvcg_default_color_matching_item_ops = { + .show_attribute = uvcg_default_color_matching_attr_show, +}; + +#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, conv) \ +static ssize_t uvcg_default_color_matching_##cname##_show( \ + struct uvcg_default_color_matching *dc, char *page) \ +{ \ + struct f_uvc_opts *opts; \ + struct config_item *opts_item; \ + struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \ + struct uvc_color_matching_descriptor *cd; \ + int result; \ + \ + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \ + \ + opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent; \ + opts = to_f_uvc_opts(opts_item); \ + cd = &opts->uvc_color_matching; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(cd->aname)); \ + mutex_unlock(&opts->lock); \ + \ + mutex_unlock(su_mutex); \ + return result; \ +} \ + \ +static struct uvcg_default_color_matching_attribute \ + uvcg_default_color_matching_##cname = \ + __CONFIGFS_ATTR_RO(aname, uvcg_default_color_matching_##cname##_show) + +#define identity_conv(x) (x) + +UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, + identity_conv); +UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics, + bTransferCharacteristics, identity_conv); +UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, + identity_conv); + +#undef identity_conv + +#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR + +static struct configfs_attribute *uvcg_default_color_matching_attrs[] = { + &uvcg_default_color_matching_b_color_primaries.attr, + &uvcg_default_color_matching_b_transfer_characteristics.attr, + &uvcg_default_color_matching_b_matrix_coefficients.attr, + NULL, +}; + +static struct config_item_type uvcg_default_color_matching_type = { + .ct_item_ops = &uvcg_default_color_matching_item_ops, + .ct_attrs = uvcg_default_color_matching_attrs, + .ct_owner = THIS_MODULE, +}; + +/* struct uvcg_color_matching {}; */ + +static struct config_group *uvcg_color_matching_default_groups[] = { + &uvcg_default_color_matching.group, + NULL, +}; + +/* streaming/color_matching */ +static struct uvcg_color_matching_grp { + struct config_group group; +} uvcg_color_matching_grp; + +static struct config_item_type uvcg_color_matching_grp_type = { + .ct_owner = THIS_MODULE, +}; + +/* streaming/class/{fs|hs|ss} */ +static struct uvcg_streaming_class { + struct config_group group; +} uvcg_streaming_class_fs, uvcg_streaming_class_hs, uvcg_streaming_class_ss; + + +static inline struct uvc_descriptor_header +***__uvcg_get_stream_class_arr(struct config_item *i, struct f_uvc_opts *o) +{ + struct uvcg_streaming_class *cl = container_of(to_config_group(i), + struct uvcg_streaming_class, group); + + if (cl == &uvcg_streaming_class_fs) + return &o->uvc_fs_streaming_cls; + + if (cl == &uvcg_streaming_class_hs) + return &o->uvc_hs_streaming_cls; + + if (cl == &uvcg_streaming_class_ss) + return &o->uvc_ss_streaming_cls; + + return NULL; +} + +enum uvcg_strm_type { + UVCG_HEADER = 0, + UVCG_FORMAT, + UVCG_FRAME +}; + +/* + * Iterate over a hierarchy of streaming descriptors' config items. + * The items are created by the user with configfs. + * + * It "processes" the header pointed to by @priv1, then for each format + * that follows the header "processes" the format itself and then for + * each frame inside a format "processes" the frame. + * + * As a "processing" function the @fun is used. + * + * __uvcg_iter_strm_cls() is used in two context: first, to calculate + * the amount of memory needed for an array of streaming descriptors + * and second, to actually fill the array. + * + * @h: streaming header pointer + * @priv2: an "inout" parameter (the caller might want to see the changes to it) + * @priv3: an "inout" parameter (the caller might want to see the changes to it) + * @fun: callback function for processing each level of the hierarchy + */ +static int __uvcg_iter_strm_cls(struct uvcg_streaming_header *h, + void *priv2, void *priv3, + int (*fun)(void *, void *, void *, int, enum uvcg_strm_type type)) +{ + struct uvcg_format_ptr *f; + struct config_group *grp; + struct config_item *item; + struct uvcg_frame *frm; + int ret, i, j; + + if (!fun) + return -EINVAL; + + i = j = 0; + ret = fun(h, priv2, priv3, 0, UVCG_HEADER); + if (ret) + return ret; + list_for_each_entry(f, &h->formats, entry) { + ret = fun(f->fmt, priv2, priv3, i++, UVCG_FORMAT); + if (ret) + return ret; + grp = &f->fmt->group; + list_for_each_entry(item, &grp->cg_children, ci_entry) { + frm = to_uvcg_frame(item); + ret = fun(frm, priv2, priv3, j++, UVCG_FRAME); + if (ret) + return ret; + } + } + + return ret; +} + +/* + * Count how many bytes are needed for an array of streaming descriptors. + * + * @priv1: pointer to a header, format or frame + * @priv2: inout parameter, accumulated size of the array + * @priv3: inout parameter, accumulated number of the array elements + * @n: unused, this function's prototype must match @fun in __uvcg_iter_strm_cls + */ +static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n, + enum uvcg_strm_type type) +{ + size_t *size = priv2; + size_t *count = priv3; + + switch (type) { + case UVCG_HEADER: { + struct uvcg_streaming_header *h = priv1; + + *size += sizeof(h->desc); + /* bmaControls */ + *size += h->num_fmt * UVCG_STREAMING_CONTROL_SIZE; + } + break; + case UVCG_FORMAT: { + struct uvcg_format *fmt = priv1; + + if (fmt->type == UVCG_UNCOMPRESSED) { + struct uvcg_uncompressed *u = + container_of(fmt, struct uvcg_uncompressed, + fmt); + + *size += sizeof(u->desc); + } else if (fmt->type == UVCG_MJPEG) { + struct uvcg_mjpeg *m = + container_of(fmt, struct uvcg_mjpeg, fmt); + + *size += sizeof(m->desc); + } else { + return -EINVAL; + } + } + break; + case UVCG_FRAME: { + struct uvcg_frame *frm = priv1; + int sz = sizeof(frm->dw_frame_interval); + + *size += sizeof(frm->frame); + *size += frm->frame.b_frame_interval_type * sz; + } + break; + } + + ++*count; + + return 0; +} + +/* + * Fill an array of streaming descriptors. + * + * @priv1: pointer to a header, format or frame + * @priv2: inout parameter, pointer into a block of memory + * @priv3: inout parameter, pointer to a 2-dimensional array + */ +static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n, + enum uvcg_strm_type type) +{ + void **dest = priv2; + struct uvc_descriptor_header ***array = priv3; + size_t sz; + + **array = *dest; + ++*array; + + switch (type) { + case UVCG_HEADER: { + struct uvc_input_header_descriptor *ihdr = *dest; + struct uvcg_streaming_header *h = priv1; + struct uvcg_format_ptr *f; + + memcpy(*dest, &h->desc, sizeof(h->desc)); + *dest += sizeof(h->desc); + sz = UVCG_STREAMING_CONTROL_SIZE; + list_for_each_entry(f, &h->formats, entry) { + memcpy(*dest, f->fmt->bmaControls, sz); + *dest += sz; + } + ihdr->bLength = sizeof(h->desc) + h->num_fmt * sz; + ihdr->bNumFormats = h->num_fmt; + } + break; + case UVCG_FORMAT: { + struct uvcg_format *fmt = priv1; + + if (fmt->type == UVCG_UNCOMPRESSED) { + struct uvc_format_uncompressed *unc = *dest; + struct uvcg_uncompressed *u = + container_of(fmt, struct uvcg_uncompressed, + fmt); + + memcpy(*dest, &u->desc, sizeof(u->desc)); + *dest += sizeof(u->desc); + unc->bNumFrameDescriptors = fmt->num_frames; + unc->bFormatIndex = n + 1; + } else if (fmt->type == UVCG_MJPEG) { + struct uvc_format_mjpeg *mjp = *dest; + struct uvcg_mjpeg *m = + container_of(fmt, struct uvcg_mjpeg, fmt); + + memcpy(*dest, &m->desc, sizeof(m->desc)); + *dest += sizeof(m->desc); + mjp->bNumFrameDescriptors = fmt->num_frames; + mjp->bFormatIndex = n + 1; + } else { + return -EINVAL; + } + } + break; + case UVCG_FRAME: { + struct uvcg_frame *frm = priv1; + struct uvc_descriptor_header *h = *dest; + + sz = sizeof(frm->frame); + memcpy(*dest, &frm->frame, sz); + *dest += sz; + sz = frm->frame.b_frame_interval_type * + sizeof(*frm->dw_frame_interval); + memcpy(*dest, frm->dw_frame_interval, sz); + *dest += sz; + if (frm->fmt_type == UVCG_UNCOMPRESSED) + h->bLength = UVC_DT_FRAME_UNCOMPRESSED_SIZE( + frm->frame.b_frame_interval_type); + else if (frm->fmt_type == UVCG_MJPEG) + h->bLength = UVC_DT_FRAME_MJPEG_SIZE( + frm->frame.b_frame_interval_type); + } + break; + } + + return 0; +} + +static int uvcg_streaming_class_allow_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *streaming, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header ***class_array, **cl_arr; + struct uvcg_streaming_header *target_hdr; + void *data, *data_save; + size_t size = 0, count = 0; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + streaming = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(streaming), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(streaming->ci_parent); + + mutex_lock(&opts->lock); + + class_array = __uvcg_get_stream_class_arr(src, opts); + if (!class_array || *class_array || opts->refcnt) { + ret = -EBUSY; + goto unlock; + } + + target_hdr = to_uvcg_streaming_header(target); + ret = __uvcg_iter_strm_cls(target_hdr, &size, &count, __uvcg_cnt_strm); + if (ret) + goto unlock; + + count += 2; /* color_matching, NULL */ + *class_array = kcalloc(count, sizeof(void *), GFP_KERNEL); + if (!*class_array) { + ret = -ENOMEM; + goto unlock; + } + + data = data_save = kzalloc(size, GFP_KERNEL); + if (!data) { + kfree(*class_array); + *class_array = NULL; + ret = PTR_ERR(data); + goto unlock; + } + cl_arr = *class_array; + ret = __uvcg_iter_strm_cls(target_hdr, &data, &cl_arr, + __uvcg_fill_strm); + if (ret) { + kfree(*class_array); + *class_array = NULL; + /* + * __uvcg_fill_strm() called from __uvcg_iter_stream_cls() + * might have advanced the "data", so use a backup copy + */ + kfree(data_save); + goto unlock; + } + *cl_arr = (struct uvc_descriptor_header *)&opts->uvc_color_matching; + + ++target_hdr->linked; + ret = 0; + +unlock: + mutex_unlock(&opts->lock); +out: + mutex_unlock(su_mutex); + return ret; +} + +static int uvcg_streaming_class_drop_link(struct config_item *src, + struct config_item *target) +{ + struct config_item *streaming, *header; + struct f_uvc_opts *opts; + struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex; + struct uvc_descriptor_header ***class_array; + struct uvcg_streaming_header *target_hdr; + int ret = -EINVAL; + + mutex_lock(su_mutex); /* for navigating configfs hierarchy */ + + streaming = src->ci_parent->ci_parent; + header = config_group_find_item(to_config_group(streaming), "header"); + if (!header || target->ci_parent != header) + goto out; + + opts = to_f_uvc_opts(streaming->ci_parent); + + mutex_lock(&opts->lock); + + class_array = __uvcg_get_stream_class_arr(src, opts); + if (!class_array || !*class_array) + goto unlock; + + if (opts->refcnt) { + ret = -EBUSY; + goto unlock; + } + + target_hdr = to_uvcg_streaming_header(target); + --target_hdr->linked; + kfree(**class_array); + kfree(*class_array); + *class_array = NULL; + ret = 0; + +unlock: + mutex_unlock(&opts->lock); +out: + mutex_unlock(su_mutex); + return ret; +} + +static struct configfs_item_operations uvcg_streaming_class_item_ops = { + .allow_link = uvcg_streaming_class_allow_link, + .drop_link = uvcg_streaming_class_drop_link, +}; + +static struct config_item_type uvcg_streaming_class_type = { + .ct_item_ops = &uvcg_streaming_class_item_ops, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_streaming_class_default_groups[] = { + &uvcg_streaming_class_fs.group, + &uvcg_streaming_class_hs.group, + &uvcg_streaming_class_ss.group, + NULL, +}; + +/* streaming/class */ +static struct uvcg_streaming_class_grp { + struct config_group group; +} uvcg_streaming_class_grp; + +static struct config_item_type uvcg_streaming_class_grp_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_streaming_default_groups[] = { + &uvcg_streaming_header_grp.group, + &uvcg_uncompressed_grp.group, + &uvcg_mjpeg_grp.group, + &uvcg_color_matching_grp.group, + &uvcg_streaming_class_grp.group, + NULL, +}; + +/* streaming */ +static struct uvcg_streaming_grp { + struct config_group group; +} uvcg_streaming_grp; + +static struct config_item_type uvcg_streaming_grp_type = { + .ct_owner = THIS_MODULE, +}; + +static struct config_group *uvcg_default_groups[] = { + &uvcg_control_grp.group, + &uvcg_streaming_grp.group, + NULL, +}; + +static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item) +{ + return container_of(to_config_group(item), struct f_uvc_opts, + func_inst.group); +} + +CONFIGFS_ATTR_STRUCT(f_uvc_opts); +CONFIGFS_ATTR_OPS(f_uvc_opts); + +static void uvc_attr_release(struct config_item *item) +{ + struct f_uvc_opts *opts = to_f_uvc_opts(item); + + usb_put_function_instance(&opts->func_inst); +} + +static struct configfs_item_operations uvc_item_ops = { + .release = uvc_attr_release, + .show_attribute = f_uvc_opts_attr_show, + .store_attribute = f_uvc_opts_attr_store, +}; + +#define UVCG_OPTS_ATTR(cname, conv, str2u, uxx, vnoc, limit) \ +static ssize_t f_uvc_opts_##cname##_show( \ + struct f_uvc_opts *opts, char *page) \ +{ \ + int result; \ + \ + mutex_lock(&opts->lock); \ + result = sprintf(page, "%d\n", conv(opts->cname)); \ + mutex_unlock(&opts->lock); \ + \ + return result; \ +} \ + \ +static ssize_t \ +f_uvc_opts_##cname##_store(struct f_uvc_opts *opts, \ + const char *page, size_t len) \ +{ \ + int ret; \ + uxx num; \ + \ + mutex_lock(&opts->lock); \ + if (opts->refcnt) { \ + ret = -EBUSY; \ + goto end; \ + } \ + \ + ret = str2u(page, 0, &num); \ + if (ret) \ + goto end; \ + \ + if (num > limit) { \ + ret = -EINVAL; \ + goto end; \ + } \ + opts->cname = vnoc(num); \ + ret = len; \ +end: \ + mutex_unlock(&opts->lock); \ + return ret; \ +} \ + \ +static struct f_uvc_opts_attribute \ + f_uvc_opts_attribute_##cname = \ + __CONFIGFS_ATTR(cname, S_IRUGO | S_IWUSR, \ + f_uvc_opts_##cname##_show, \ + f_uvc_opts_##cname##_store) + +#define identity_conv(x) (x) + +UVCG_OPTS_ATTR(streaming_interval, identity_conv, kstrtou8, u8, identity_conv, + 16); +UVCG_OPTS_ATTR(streaming_maxpacket, le16_to_cpu, kstrtou16, u16, le16_to_cpu, + 3072); +UVCG_OPTS_ATTR(streaming_maxburst, identity_conv, kstrtou8, u8, identity_conv, + 15); + +#undef identity_conv + +#undef UVCG_OPTS_ATTR + +static struct configfs_attribute *uvc_attrs[] = { + &f_uvc_opts_attribute_streaming_interval.attr, + &f_uvc_opts_attribute_streaming_maxpacket.attr, + &f_uvc_opts_attribute_streaming_maxburst.attr, + NULL, +}; + +static struct config_item_type uvc_func_type = { + .ct_item_ops = &uvc_item_ops, + .ct_attrs = uvc_attrs, + .ct_owner = THIS_MODULE, +}; + +static inline void uvcg_init_group(struct config_group *g, + struct config_group **default_groups, + const char *name, + struct config_item_type *type) +{ + g->default_groups = default_groups; + config_group_init_type_name(g, name, type); +} + +int uvcg_attach_configfs(struct f_uvc_opts *opts) +{ + config_group_init_type_name(&uvcg_control_header_grp.group, + "header", + &uvcg_control_header_grp_type); + config_group_init_type_name(&uvcg_default_processing.group, + "default", + &uvcg_default_processing_type); + uvcg_init_group(&uvcg_processing_grp.group, + uvcg_processing_default_groups, + "processing", + &uvcg_processing_grp_type); + config_group_init_type_name(&uvcg_default_camera.group, + "default", + &uvcg_default_camera_type); + uvcg_init_group(&uvcg_camera_grp.group, + uvcg_camera_default_groups, + "camera", + &uvcg_camera_grp_type); + config_group_init_type_name(&uvcg_default_output.group, + "default", + &uvcg_default_output_type); + uvcg_init_group(&uvcg_output_grp.group, + uvcg_output_default_groups, + "output", + &uvcg_output_grp_type); + uvcg_init_group(&uvcg_terminal_grp.group, + uvcg_terminal_default_groups, + "terminal", + &uvcg_terminal_grp_type); + config_group_init_type_name(&uvcg_control_class_fs.group, + "fs", + &uvcg_control_class_type); + config_group_init_type_name(&uvcg_control_class_ss.group, + "ss", + &uvcg_control_class_type); + uvcg_init_group(&uvcg_control_class_grp.group, + uvcg_control_class_default_groups, + "class", + &uvcg_control_class_grp_type); + uvcg_init_group(&uvcg_control_grp.group, + uvcg_control_default_groups, + "control", + &uvcg_control_grp_type); + config_group_init_type_name(&uvcg_streaming_header_grp.group, + "header", + &uvcg_streaming_header_grp_type); + config_group_init_type_name(&uvcg_uncompressed_grp.group, + "uncompressed", + &uvcg_uncompressed_grp_type); + config_group_init_type_name(&uvcg_mjpeg_grp.group, + "mjpeg", + &uvcg_mjpeg_grp_type); + config_group_init_type_name(&uvcg_default_color_matching.group, + "default", + &uvcg_default_color_matching_type); + uvcg_init_group(&uvcg_color_matching_grp.group, + uvcg_color_matching_default_groups, + "color_matching", + &uvcg_color_matching_grp_type); + config_group_init_type_name(&uvcg_streaming_class_fs.group, + "fs", + &uvcg_streaming_class_type); + config_group_init_type_name(&uvcg_streaming_class_hs.group, + "hs", + &uvcg_streaming_class_type); + config_group_init_type_name(&uvcg_streaming_class_ss.group, + "ss", + &uvcg_streaming_class_type); + uvcg_init_group(&uvcg_streaming_class_grp.group, + uvcg_streaming_class_default_groups, + "class", + &uvcg_streaming_class_grp_type); + uvcg_init_group(&uvcg_streaming_grp.group, + uvcg_streaming_default_groups, + "streaming", + &uvcg_streaming_grp_type); + uvcg_init_group(&opts->func_inst.group, + uvcg_default_groups, + "", + &uvc_func_type); + return 0; +} diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h new file mode 100644 index 000000000..085e67be7 --- /dev/null +++ b/drivers/usb/gadget/function/uvc_configfs.h @@ -0,0 +1,22 @@ +/* + * uvc_configfs.h + * + * Configfs support for the uvc function. + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef UVC_CONFIGFS_H +#define UVC_CONFIGFS_H + +struct f_uvc_opts; + +int uvcg_attach_configfs(struct f_uvc_opts *opts); + +#endif /* UVC_CONFIGFS_H */ diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c new file mode 100644 index 000000000..d617c39a0 --- /dev/null +++ b/drivers/usb/gadget/function/uvc_queue.c @@ -0,0 +1,350 @@ +/* + * uvc_queue.c -- USB Video Class driver - Buffers management + * + * Copyright (C) 2005-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/atomic.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/videodev2.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> + +#include <media/v4l2-common.h> +#include <media/videobuf2-vmalloc.h> + +#include "uvc.h" + +/* ------------------------------------------------------------------------ + * Video buffers queue management. + * + * Video queues is initialized by uvcg_queue_init(). The function performs + * basic initialization of the uvc_video_queue struct and never fails. + * + * Video buffers are managed by videobuf2. The driver uses a mutex to protect + * the videobuf2 queue operations by serializing calls to videobuf2 and a + * spinlock to protect the IRQ queue that holds the buffers to be processed by + * the driver. + */ + +/* ----------------------------------------------------------------------------- + * videobuf2 queue operations + */ + +static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, + unsigned int *nbuffers, unsigned int *nplanes, + unsigned int sizes[], void *alloc_ctxs[]) +{ + struct uvc_video_queue *queue = vb2_get_drv_priv(vq); + struct uvc_video *video = container_of(queue, struct uvc_video, queue); + + if (*nbuffers > UVC_MAX_VIDEO_BUFFERS) + *nbuffers = UVC_MAX_VIDEO_BUFFERS; + + *nplanes = 1; + + sizes[0] = video->imagesize; + + return 0; +} + +static int uvc_buffer_prepare(struct vb2_buffer *vb) +{ + struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); + struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); + + if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT && + vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { + uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); + return -EINVAL; + } + + if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) + return -ENODEV; + + buf->state = UVC_BUF_STATE_QUEUED; + buf->mem = vb2_plane_vaddr(vb, 0); + buf->length = vb2_plane_size(vb, 0); + if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) + buf->bytesused = 0; + else + buf->bytesused = vb2_get_plane_payload(vb, 0); + + return 0; +} + +static void uvc_buffer_queue(struct vb2_buffer *vb) +{ + struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); + struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); + unsigned long flags; + + spin_lock_irqsave(&queue->irqlock, flags); + + if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { + list_add_tail(&buf->queue, &queue->irqqueue); + } else { + /* If the device is disconnected return the buffer to userspace + * directly. The next QBUF call will fail with -ENODEV. + */ + buf->state = UVC_BUF_STATE_ERROR; + vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); + } + + spin_unlock_irqrestore(&queue->irqlock, flags); +} + +static struct vb2_ops uvc_queue_qops = { + .queue_setup = uvc_queue_setup, + .buf_prepare = uvc_buffer_prepare, + .buf_queue = uvc_buffer_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, +}; + +int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, + struct mutex *lock) +{ + int ret; + + queue->queue.type = type; + queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; + queue->queue.drv_priv = queue; + queue->queue.buf_struct_size = sizeof(struct uvc_buffer); + queue->queue.ops = &uvc_queue_qops; + queue->queue.lock = lock; + queue->queue.mem_ops = &vb2_vmalloc_memops; + queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC + | V4L2_BUF_FLAG_TSTAMP_SRC_EOF; + ret = vb2_queue_init(&queue->queue); + if (ret) + return ret; + + spin_lock_init(&queue->irqlock); + INIT_LIST_HEAD(&queue->irqqueue); + queue->flags = 0; + + return 0; +} + +/* + * Free the video buffers. + */ +void uvcg_free_buffers(struct uvc_video_queue *queue) +{ + vb2_queue_release(&queue->queue); +} + +/* + * Allocate the video buffers. + */ +int uvcg_alloc_buffers(struct uvc_video_queue *queue, + struct v4l2_requestbuffers *rb) +{ + int ret; + + ret = vb2_reqbufs(&queue->queue, rb); + + return ret ? ret : rb->count; +} + +int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) +{ + return vb2_querybuf(&queue->queue, buf); +} + +int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) +{ + unsigned long flags; + int ret; + + ret = vb2_qbuf(&queue->queue, buf); + if (ret < 0) + return ret; + + spin_lock_irqsave(&queue->irqlock, flags); + ret = (queue->flags & UVC_QUEUE_PAUSED) != 0; + queue->flags &= ~UVC_QUEUE_PAUSED; + spin_unlock_irqrestore(&queue->irqlock, flags); + return ret; +} + +/* + * Dequeue a video buffer. If nonblocking is false, block until a buffer is + * available. + */ +int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, + int nonblocking) +{ + return vb2_dqbuf(&queue->queue, buf, nonblocking); +} + +/* + * Poll the video queue. + * + * This function implements video queue polling and is intended to be used by + * the device poll handler. + */ +unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, + poll_table *wait) +{ + return vb2_poll(&queue->queue, file, wait); +} + +int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) +{ + return vb2_mmap(&queue->queue, vma); +} + +#ifndef CONFIG_MMU +/* + * Get unmapped area. + * + * NO-MMU arch need this function to make mmap() work correctly. + */ +unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, + unsigned long pgoff) +{ + return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); +} +#endif + +/* + * Cancel the video buffers queue. + * + * Cancelling the queue marks all buffers on the irq queue as erroneous, + * wakes them up and removes them from the queue. + * + * If the disconnect parameter is set, further calls to uvc_queue_buffer will + * fail with -ENODEV. + * + * This function acquires the irq spinlock and can be called from interrupt + * context. + */ +void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) +{ + struct uvc_buffer *buf; + unsigned long flags; + + spin_lock_irqsave(&queue->irqlock, flags); + while (!list_empty(&queue->irqqueue)) { + buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, + queue); + list_del(&buf->queue); + buf->state = UVC_BUF_STATE_ERROR; + vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); + } + /* This must be protected by the irqlock spinlock to avoid race + * conditions between uvc_queue_buffer and the disconnection event that + * could result in an interruptible wait in uvc_dequeue_buffer. Do not + * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED + * state outside the queue code. + */ + if (disconnect) + queue->flags |= UVC_QUEUE_DISCONNECTED; + spin_unlock_irqrestore(&queue->irqlock, flags); +} + +/* + * Enable or disable the video buffers queue. + * + * The queue must be enabled before starting video acquisition and must be + * disabled after stopping it. This ensures that the video buffers queue + * state can be properly initialized before buffers are accessed from the + * interrupt handler. + * + * Enabling the video queue initializes parameters (such as sequence number, + * sync pattern, ...). If the queue is already enabled, return -EBUSY. + * + * Disabling the video queue cancels the queue and removes all buffers from + * the main queue. + * + * This function can't be called from interrupt context. Use + * uvcg_queue_cancel() instead. + */ +int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) +{ + unsigned long flags; + int ret = 0; + + if (enable) { + ret = vb2_streamon(&queue->queue, queue->queue.type); + if (ret < 0) + return ret; + + queue->sequence = 0; + queue->buf_used = 0; + } else { + ret = vb2_streamoff(&queue->queue, queue->queue.type); + if (ret < 0) + return ret; + + spin_lock_irqsave(&queue->irqlock, flags); + INIT_LIST_HEAD(&queue->irqqueue); + + /* + * FIXME: We need to clear the DISCONNECTED flag to ensure that + * applications will be able to queue buffers for the next + * streaming run. However, clearing it here doesn't guarantee + * that the device will be reconnected in the meantime. + */ + queue->flags &= ~UVC_QUEUE_DISCONNECTED; + spin_unlock_irqrestore(&queue->irqlock, flags); + } + + return ret; +} + +/* called with &queue_irqlock held.. */ +struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, + struct uvc_buffer *buf) +{ + struct uvc_buffer *nextbuf; + + if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && + buf->length != buf->bytesused) { + buf->state = UVC_BUF_STATE_QUEUED; + vb2_set_plane_payload(&buf->buf, 0, 0); + return buf; + } + + list_del(&buf->queue); + if (!list_empty(&queue->irqqueue)) + nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, + queue); + else + nextbuf = NULL; + + buf->buf.v4l2_buf.field = V4L2_FIELD_NONE; + buf->buf.v4l2_buf.sequence = queue->sequence++; + v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp); + + vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); + vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); + + return nextbuf; +} + +struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue) +{ + struct uvc_buffer *buf = NULL; + + if (!list_empty(&queue->irqqueue)) + buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, + queue); + else + queue->flags |= UVC_QUEUE_PAUSED; + + return buf; +} + diff --git a/drivers/usb/gadget/function/uvc_queue.h b/drivers/usb/gadget/function/uvc_queue.h new file mode 100644 index 000000000..01ca9eab3 --- /dev/null +++ b/drivers/usb/gadget/function/uvc_queue.h @@ -0,0 +1,96 @@ +#ifndef _UVC_QUEUE_H_ +#define _UVC_QUEUE_H_ + +#ifdef __KERNEL__ + +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/videodev2.h> +#include <media/videobuf2-core.h> + +/* Maximum frame size in bytes, for sanity checking. */ +#define UVC_MAX_FRAME_SIZE (16*1024*1024) +/* Maximum number of video buffers. */ +#define UVC_MAX_VIDEO_BUFFERS 32 + +/* ------------------------------------------------------------------------ + * Structures. + */ + +enum uvc_buffer_state { + UVC_BUF_STATE_IDLE = 0, + UVC_BUF_STATE_QUEUED = 1, + UVC_BUF_STATE_ACTIVE = 2, + UVC_BUF_STATE_DONE = 3, + UVC_BUF_STATE_ERROR = 4, +}; + +struct uvc_buffer { + struct vb2_buffer buf; + struct list_head queue; + + enum uvc_buffer_state state; + void *mem; + unsigned int length; + unsigned int bytesused; +}; + +#define UVC_QUEUE_DISCONNECTED (1 << 0) +#define UVC_QUEUE_DROP_INCOMPLETE (1 << 1) +#define UVC_QUEUE_PAUSED (1 << 2) + +struct uvc_video_queue { + struct vb2_queue queue; + + unsigned int flags; + __u32 sequence; + + unsigned int buf_used; + + spinlock_t irqlock; /* Protects flags and irqqueue */ + struct list_head irqqueue; +}; + +static inline int uvc_queue_streaming(struct uvc_video_queue *queue) +{ + return vb2_is_streaming(&queue->queue); +} + +int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, + struct mutex *lock); + +void uvcg_free_buffers(struct uvc_video_queue *queue); + +int uvcg_alloc_buffers(struct uvc_video_queue *queue, + struct v4l2_requestbuffers *rb); + +int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); + +int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); + +int uvcg_dequeue_buffer(struct uvc_video_queue *queue, + struct v4l2_buffer *buf, int nonblocking); + +unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, + struct file *file, poll_table *wait); + +int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma); + +#ifndef CONFIG_MMU +unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, + unsigned long pgoff); +#endif /* CONFIG_MMU */ + +void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect); + +int uvcg_queue_enable(struct uvc_video_queue *queue, int enable); + +struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, + struct uvc_buffer *buf); + +struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue); + +#endif /* __KERNEL__ */ + +#endif /* _UVC_QUEUE_H_ */ + diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c new file mode 100644 index 000000000..f4ccbd56f --- /dev/null +++ b/drivers/usb/gadget/function/uvc_v4l2.c @@ -0,0 +1,369 @@ +/* + * uvc_v4l2.c -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/videodev2.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> + +#include <media/v4l2-dev.h> +#include <media/v4l2-event.h> +#include <media/v4l2-ioctl.h> + +#include "f_uvc.h" +#include "uvc.h" +#include "uvc_queue.h" +#include "uvc_video.h" +#include "uvc_v4l2.h" + +/* -------------------------------------------------------------------------- + * Requests handling + */ + +static int +uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) +{ + struct usb_composite_dev *cdev = uvc->func.config->cdev; + struct usb_request *req = uvc->control_req; + + if (data->length < 0) + return usb_ep_set_halt(cdev->gadget->ep0); + + req->length = min_t(unsigned int, uvc->event_length, data->length); + req->zero = data->length < uvc->event_length; + + memcpy(req->buf, data->data, req->length); + + return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL); +} + +/* -------------------------------------------------------------------------- + * V4L2 ioctls + */ + +struct uvc_format +{ + u8 bpp; + u32 fcc; +}; + +static struct uvc_format uvc_formats[] = { + { 16, V4L2_PIX_FMT_YUYV }, + { 0, V4L2_PIX_FMT_MJPEG }, +}; + +static int +uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct usb_composite_dev *cdev = uvc->func.config->cdev; + + strlcpy(cap->driver, "g_uvc", sizeof(cap->driver)); + strlcpy(cap->card, cdev->gadget->name, sizeof(cap->card)); + strlcpy(cap->bus_info, dev_name(&cdev->gadget->dev), + sizeof(cap->bus_info)); + + cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; + cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; + + return 0; +} + +static int +uvc_v4l2_get_format(struct file *file, void *fh, struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + fmt->fmt.pix.pixelformat = video->fcc; + fmt->fmt.pix.width = video->width; + fmt->fmt.pix.height = video->height; + fmt->fmt.pix.field = V4L2_FIELD_NONE; + fmt->fmt.pix.bytesperline = video->bpp * video->width / 8; + fmt->fmt.pix.sizeimage = video->imagesize; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + fmt->fmt.pix.priv = 0; + + return 0; +} + +static int +uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + struct uvc_format *format; + unsigned int imagesize; + unsigned int bpl; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(uvc_formats); ++i) { + format = &uvc_formats[i]; + if (format->fcc == fmt->fmt.pix.pixelformat) + break; + } + + if (i == ARRAY_SIZE(uvc_formats)) { + printk(KERN_INFO "Unsupported format 0x%08x.\n", + fmt->fmt.pix.pixelformat); + return -EINVAL; + } + + bpl = format->bpp * fmt->fmt.pix.width / 8; + imagesize = bpl ? bpl * fmt->fmt.pix.height : fmt->fmt.pix.sizeimage; + + video->fcc = format->fcc; + video->bpp = format->bpp; + video->width = fmt->fmt.pix.width; + video->height = fmt->fmt.pix.height; + video->imagesize = imagesize; + + fmt->fmt.pix.field = V4L2_FIELD_NONE; + fmt->fmt.pix.bytesperline = bpl; + fmt->fmt.pix.sizeimage = imagesize; + fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; + fmt->fmt.pix.priv = 0; + + return 0; +} + +static int +uvc_v4l2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + if (b->type != video->queue.queue.type) + return -EINVAL; + + return uvcg_alloc_buffers(&video->queue, b); +} + +static int +uvc_v4l2_querybuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + return uvcg_query_buffer(&video->queue, b); +} + +static int +uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + int ret; + + ret = uvcg_queue_buffer(&video->queue, b); + if (ret < 0) + return ret; + + return uvcg_video_pump(video); +} + +static int +uvc_v4l2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK); +} + +static int +uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + int ret; + + if (type != video->queue.queue.type) + return -EINVAL; + + /* Enable UVC video. */ + ret = uvcg_video_enable(video, 1); + if (ret < 0) + return ret; + + /* + * Complete the alternate setting selection setup phase now that + * userspace is ready to provide video frames. + */ + uvc_function_setup_continue(uvc); + uvc->state = UVC_STATE_STREAMING; + + return 0; +} + +static int +uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_video *video = &uvc->video; + + if (type != video->queue.queue.type) + return -EINVAL; + + return uvcg_video_enable(video, 0); +} + +static int +uvc_v4l2_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST) + return -EINVAL; + + return v4l2_event_subscribe(fh, sub, 2, NULL); +} + +static int +uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + return v4l2_event_unsubscribe(fh, sub); +} + +static long +uvc_v4l2_ioctl_default(struct file *file, void *fh, bool valid_prio, + unsigned int cmd, void *arg) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + switch (cmd) { + case UVCIOC_SEND_RESPONSE: + return uvc_send_response(uvc, arg); + + default: + return -ENOIOCTLCMD; + } +} + +const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = { + .vidioc_querycap = uvc_v4l2_querycap, + .vidioc_g_fmt_vid_out = uvc_v4l2_get_format, + .vidioc_s_fmt_vid_out = uvc_v4l2_set_format, + .vidioc_reqbufs = uvc_v4l2_reqbufs, + .vidioc_querybuf = uvc_v4l2_querybuf, + .vidioc_qbuf = uvc_v4l2_qbuf, + .vidioc_dqbuf = uvc_v4l2_dqbuf, + .vidioc_streamon = uvc_v4l2_streamon, + .vidioc_streamoff = uvc_v4l2_streamoff, + .vidioc_subscribe_event = uvc_v4l2_subscribe_event, + .vidioc_unsubscribe_event = uvc_v4l2_unsubscribe_event, + .vidioc_default = uvc_v4l2_ioctl_default, +}; + +/* -------------------------------------------------------------------------- + * V4L2 + */ + +static int +uvc_v4l2_open(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_file_handle *handle; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (handle == NULL) + return -ENOMEM; + + v4l2_fh_init(&handle->vfh, vdev); + v4l2_fh_add(&handle->vfh); + + handle->device = &uvc->video; + file->private_data = &handle->vfh; + + uvc_function_connect(uvc); + return 0; +} + +static int +uvc_v4l2_release(struct file *file) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data); + struct uvc_video *video = handle->device; + + uvc_function_disconnect(uvc); + + mutex_lock(&video->mutex); + uvcg_video_enable(video, 0); + uvcg_free_buffers(&video->queue); + mutex_unlock(&video->mutex); + + file->private_data = NULL; + v4l2_fh_del(&handle->vfh); + v4l2_fh_exit(&handle->vfh); + kfree(handle); + + return 0; +} + +static int +uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + return uvcg_queue_mmap(&uvc->video.queue, vma); +} + +static unsigned int +uvc_v4l2_poll(struct file *file, poll_table *wait) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + return uvcg_queue_poll(&uvc->video.queue, file, wait); +} + +#ifndef CONFIG_MMU +static unsigned long uvcg_v4l2_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct video_device *vdev = video_devdata(file); + struct uvc_device *uvc = video_get_drvdata(vdev); + + return uvcg_queue_get_unmapped_area(&uvc->video.queue, pgoff); +} +#endif + +struct v4l2_file_operations uvc_v4l2_fops = { + .owner = THIS_MODULE, + .open = uvc_v4l2_open, + .release = uvc_v4l2_release, + .unlocked_ioctl = video_ioctl2, + .mmap = uvc_v4l2_mmap, + .poll = uvc_v4l2_poll, +#ifndef CONFIG_MMU + .get_unmapped_area = uvcg_v4l2_get_unmapped_area, +#endif +}; + diff --git a/drivers/usb/gadget/function/uvc_v4l2.h b/drivers/usb/gadget/function/uvc_v4l2.h new file mode 100644 index 000000000..2683b92fd --- /dev/null +++ b/drivers/usb/gadget/function/uvc_v4l2.h @@ -0,0 +1,22 @@ +/* + * uvc_v4l2.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __UVC_V4L2_H__ +#define __UVC_V4L2_H__ + +extern const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops; +extern struct v4l2_file_operations uvc_v4l2_fops; + +#endif /* __UVC_V4L2_H__ */ diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c new file mode 100644 index 000000000..3d0d5d94a --- /dev/null +++ b/drivers/usb/gadget/function/uvc_video.c @@ -0,0 +1,398 @@ +/* + * uvc_video.c -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/usb/ch9.h> +#include <linux/usb/gadget.h> +#include <linux/usb/video.h> + +#include <media/v4l2-dev.h> + +#include "uvc.h" +#include "uvc_queue.h" +#include "uvc_video.h" + +/* -------------------------------------------------------------------------- + * Video codecs + */ + +static int +uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf, + u8 *data, int len) +{ + data[0] = 2; + data[1] = UVC_STREAM_EOH | video->fid; + + if (buf->bytesused - video->queue.buf_used <= len - 2) + data[1] |= UVC_STREAM_EOF; + + return 2; +} + +static int +uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf, + u8 *data, int len) +{ + struct uvc_video_queue *queue = &video->queue; + unsigned int nbytes; + void *mem; + + /* Copy video data to the USB buffer. */ + mem = buf->mem + queue->buf_used; + nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); + + memcpy(data, mem, nbytes); + queue->buf_used += nbytes; + + return nbytes; +} + +static void +uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, + struct uvc_buffer *buf) +{ + void *mem = req->buf; + int len = video->req_size; + int ret; + + /* Add a header at the beginning of the payload. */ + if (video->payload_size == 0) { + ret = uvc_video_encode_header(video, buf, mem, len); + video->payload_size += ret; + mem += ret; + len -= ret; + } + + /* Process video data. */ + len = min((int)(video->max_payload_size - video->payload_size), len); + ret = uvc_video_encode_data(video, buf, mem, len); + + video->payload_size += ret; + len -= ret; + + req->length = video->req_size - len; + req->zero = video->payload_size == video->max_payload_size; + + if (buf->bytesused == video->queue.buf_used) { + video->queue.buf_used = 0; + buf->state = UVC_BUF_STATE_DONE; + uvcg_queue_next_buffer(&video->queue, buf); + video->fid ^= UVC_STREAM_FID; + + video->payload_size = 0; + } + + if (video->payload_size == video->max_payload_size || + buf->bytesused == video->queue.buf_used) + video->payload_size = 0; +} + +static void +uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, + struct uvc_buffer *buf) +{ + void *mem = req->buf; + int len = video->req_size; + int ret; + + /* Add the header. */ + ret = uvc_video_encode_header(video, buf, mem, len); + mem += ret; + len -= ret; + + /* Process video data. */ + ret = uvc_video_encode_data(video, buf, mem, len); + len -= ret; + + req->length = video->req_size - len; + + if (buf->bytesused == video->queue.buf_used) { + video->queue.buf_used = 0; + buf->state = UVC_BUF_STATE_DONE; + uvcg_queue_next_buffer(&video->queue, buf); + video->fid ^= UVC_STREAM_FID; + } +} + +/* -------------------------------------------------------------------------- + * Request handling + */ + +/* + * I somehow feel that synchronisation won't be easy to achieve here. We have + * three events that control USB requests submission: + * + * - USB request completion: the completion handler will resubmit the request + * if a video buffer is available. + * + * - USB interface setting selection: in response to a SET_INTERFACE request, + * the handler will start streaming if a video buffer is available and if + * video is not currently streaming. + * + * - V4L2 buffer queueing: the driver will start streaming if video is not + * currently streaming. + * + * Race conditions between those 3 events might lead to deadlocks or other + * nasty side effects. + * + * The "video currently streaming" condition can't be detected by the irqqueue + * being empty, as a request can still be in flight. A separate "queue paused" + * flag is thus needed. + * + * The paused flag will be set when we try to retrieve the irqqueue head if the + * queue is empty, and cleared when we queue a buffer. + * + * The USB request completion handler will get the buffer at the irqqueue head + * under protection of the queue spinlock. If the queue is empty, the streaming + * paused flag will be set. Right after releasing the spinlock a userspace + * application can queue a buffer. The flag will then cleared, and the ioctl + * handler will restart the video stream. + */ +static void +uvc_video_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct uvc_video *video = req->context; + struct uvc_video_queue *queue = &video->queue; + struct uvc_buffer *buf; + unsigned long flags; + int ret; + + switch (req->status) { + case 0: + break; + + case -ESHUTDOWN: /* disconnect from host. */ + printk(KERN_DEBUG "VS request cancelled.\n"); + uvcg_queue_cancel(queue, 1); + goto requeue; + + default: + printk(KERN_INFO "VS request completed with status %d.\n", + req->status); + uvcg_queue_cancel(queue, 0); + goto requeue; + } + + spin_lock_irqsave(&video->queue.irqlock, flags); + buf = uvcg_queue_head(&video->queue); + if (buf == NULL) { + spin_unlock_irqrestore(&video->queue.irqlock, flags); + goto requeue; + } + + video->encode(req, video, buf); + + if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) { + printk(KERN_INFO "Failed to queue request (%d).\n", ret); + usb_ep_set_halt(ep); + spin_unlock_irqrestore(&video->queue.irqlock, flags); + uvcg_queue_cancel(queue, 0); + goto requeue; + } + spin_unlock_irqrestore(&video->queue.irqlock, flags); + + return; + +requeue: + spin_lock_irqsave(&video->req_lock, flags); + list_add_tail(&req->list, &video->req_free); + spin_unlock_irqrestore(&video->req_lock, flags); +} + +static int +uvc_video_free_requests(struct uvc_video *video) +{ + unsigned int i; + + for (i = 0; i < UVC_NUM_REQUESTS; ++i) { + if (video->req[i]) { + usb_ep_free_request(video->ep, video->req[i]); + video->req[i] = NULL; + } + + if (video->req_buffer[i]) { + kfree(video->req_buffer[i]); + video->req_buffer[i] = NULL; + } + } + + INIT_LIST_HEAD(&video->req_free); + video->req_size = 0; + return 0; +} + +static int +uvc_video_alloc_requests(struct uvc_video *video) +{ + unsigned int req_size; + unsigned int i; + int ret = -ENOMEM; + + BUG_ON(video->req_size); + + req_size = video->ep->maxpacket + * max_t(unsigned int, video->ep->maxburst, 1) + * (video->ep->mult + 1); + + for (i = 0; i < UVC_NUM_REQUESTS; ++i) { + video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL); + if (video->req_buffer[i] == NULL) + goto error; + + video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL); + if (video->req[i] == NULL) + goto error; + + video->req[i]->buf = video->req_buffer[i]; + video->req[i]->length = 0; + video->req[i]->complete = uvc_video_complete; + video->req[i]->context = video; + + list_add_tail(&video->req[i]->list, &video->req_free); + } + + video->req_size = req_size; + + return 0; + +error: + uvc_video_free_requests(video); + return ret; +} + +/* -------------------------------------------------------------------------- + * Video streaming + */ + +/* + * uvcg_video_pump - Pump video data into the USB requests + * + * This function fills the available USB requests (listed in req_free) with + * video data from the queued buffers. + */ +int uvcg_video_pump(struct uvc_video *video) +{ + struct uvc_video_queue *queue = &video->queue; + struct usb_request *req; + struct uvc_buffer *buf; + unsigned long flags; + int ret; + + /* FIXME TODO Race between uvcg_video_pump and requests completion + * handler ??? + */ + + while (1) { + /* Retrieve the first available USB request, protected by the + * request lock. + */ + spin_lock_irqsave(&video->req_lock, flags); + if (list_empty(&video->req_free)) { + spin_unlock_irqrestore(&video->req_lock, flags); + return 0; + } + req = list_first_entry(&video->req_free, struct usb_request, + list); + list_del(&req->list); + spin_unlock_irqrestore(&video->req_lock, flags); + + /* Retrieve the first available video buffer and fill the + * request, protected by the video queue irqlock. + */ + spin_lock_irqsave(&queue->irqlock, flags); + buf = uvcg_queue_head(queue); + if (buf == NULL) { + spin_unlock_irqrestore(&queue->irqlock, flags); + break; + } + + video->encode(req, video, buf); + + /* Queue the USB request */ + ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); + if (ret < 0) { + printk(KERN_INFO "Failed to queue request (%d)\n", ret); + usb_ep_set_halt(video->ep); + spin_unlock_irqrestore(&queue->irqlock, flags); + uvcg_queue_cancel(queue, 0); + break; + } + spin_unlock_irqrestore(&queue->irqlock, flags); + } + + spin_lock_irqsave(&video->req_lock, flags); + list_add_tail(&req->list, &video->req_free); + spin_unlock_irqrestore(&video->req_lock, flags); + return 0; +} + +/* + * Enable or disable the video stream. + */ +int uvcg_video_enable(struct uvc_video *video, int enable) +{ + unsigned int i; + int ret; + + if (video->ep == NULL) { + printk(KERN_INFO "Video enable failed, device is " + "uninitialized.\n"); + return -ENODEV; + } + + if (!enable) { + for (i = 0; i < UVC_NUM_REQUESTS; ++i) + if (video->req[i]) + usb_ep_dequeue(video->ep, video->req[i]); + + uvc_video_free_requests(video); + uvcg_queue_enable(&video->queue, 0); + return 0; + } + + if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0) + return ret; + + if ((ret = uvc_video_alloc_requests(video)) < 0) + return ret; + + if (video->max_payload_size) { + video->encode = uvc_video_encode_bulk; + video->payload_size = 0; + } else + video->encode = uvc_video_encode_isoc; + + return uvcg_video_pump(video); +} + +/* + * Initialize the UVC video stream. + */ +int uvcg_video_init(struct uvc_video *video) +{ + INIT_LIST_HEAD(&video->req_free); + spin_lock_init(&video->req_lock); + + video->fcc = V4L2_PIX_FMT_YUYV; + video->bpp = 16; + video->width = 320; + video->height = 240; + video->imagesize = 320 * 240 * 2; + + /* Initialize the video buffers queue. */ + uvcg_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT, + &video->mutex); + return 0; +} + diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h new file mode 100644 index 000000000..ef00f06fa --- /dev/null +++ b/drivers/usb/gadget/function/uvc_video.h @@ -0,0 +1,24 @@ +/* + * uvc_video.h -- USB Video Class Gadget driver + * + * Copyright (C) 2009-2010 + * Laurent Pinchart (laurent.pinchart@ideasonboard.com) + * + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __UVC_VIDEO_H__ +#define __UVC_VIDEO_H__ + +int uvcg_video_pump(struct uvc_video *video); + +int uvcg_video_enable(struct uvc_video *video, int enable); + +int uvcg_video_init(struct uvc_video *video); + +#endif /* __UVC_VIDEO_H__ */ |