const u8 *bufp = tbuf;
int len = 0;
int patch_wakeup = 0;
- int status = 0;
+ int status;
int n;
might_sleep();
+ spin_lock_irq(&hcd_root_hub_lock);
+ status = usb_hcd_link_urb_to_ep(hcd, urb);
+ spin_unlock_irq(&hcd_root_hub_lock);
+ if (status)
+ return status;
+
cmd = (struct usb_ctrlrequest *) urb->setup_packet;
typeReq = (cmd->bRequestType << 8) | cmd->bRequest;
wValue = le16_to_cpu (cmd->wValue);
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
- spin_lock(&urb->lock);
if (urb->status == -EINPROGRESS)
urb->status = status;
- spin_unlock(&urb->lock);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
/* This peculiar use of spinlocks echoes what real HC drivers do.
* Avoiding calls to local_irq_disable/enable makes the code
spin_lock_irqsave(&hcd_root_hub_lock, flags);
urb = hcd->status_urb;
if (urb) {
- spin_lock(&urb->lock);
- if (urb->status == -EINPROGRESS) {
- hcd->poll_pending = 0;
- hcd->status_urb = NULL;
- urb->status = 0;
- urb->hcpriv = NULL;
- urb->actual_length = length;
- memcpy(urb->transfer_buffer, buffer, length);
- } else /* urb has been unlinked */
- length = 0;
- spin_unlock(&urb->lock);
+ hcd->poll_pending = 0;
+ hcd->status_urb = NULL;
+ urb->status = 0;
+ urb->hcpriv = NULL;
+ urb->actual_length = length;
+ memcpy(urb->transfer_buffer, buffer, length);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&hcd_root_hub_lock);
usb_hcd_giveback_urb(hcd, urb);
spin_lock(&hcd_root_hub_lock);
- } else
+ } else {
length = 0;
-
- if (length <= 0)
hcd->poll_pending = 1;
+ }
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
}
int len = 1 + (urb->dev->maxchild / 8);
spin_lock_irqsave (&hcd_root_hub_lock, flags);
- if (urb->status != -EINPROGRESS) /* already unlinked */
- retval = urb->status;
- else if (hcd->status_urb || urb->transfer_buffer_length < len) {
+ if (hcd->status_urb || urb->transfer_buffer_length < len) {
dev_dbg (hcd->self.controller, "not queuing rh status urb\n");
retval = -EINVAL;
- } else {
- hcd->status_urb = urb;
- urb->hcpriv = hcd; /* indicate it's queued */
+ goto done;
+ }
- if (!hcd->uses_new_polling)
- mod_timer (&hcd->rh_timer,
- (jiffies/(HZ/4) + 1) * (HZ/4));
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval)
+ goto done;
- /* If a status change has already occurred, report it ASAP */
- else if (hcd->poll_pending)
- mod_timer (&hcd->rh_timer, jiffies);
- retval = 0;
- }
+ hcd->status_urb = urb;
+ urb->hcpriv = hcd; /* indicate it's queued */
+ if (!hcd->uses_new_polling)
+ mod_timer(&hcd->rh_timer, (jiffies/(HZ/4) + 1) * (HZ/4));
+
+ /* If a status change has already occurred, report it ASAP */
+ else if (hcd->poll_pending)
+ mod_timer(&hcd->rh_timer, jiffies);
+ retval = 0;
+ done:
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
return retval;
}
/* Unlinks of root-hub control URBs are legal, but they don't do anything
* since these URBs always execute synchronously.
*/
-static int usb_rh_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
+static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
unsigned long flags;
+ int rc;
spin_lock_irqsave(&hcd_root_hub_lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
if (usb_endpoint_num(&urb->ep->desc) == 0) { /* Control URB */
; /* Do nothing */
if (urb == hcd->status_urb) {
hcd->status_urb = NULL;
urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock(&hcd_root_hub_lock);
usb_hcd_giveback_urb(hcd, urb);
spin_lock(&hcd_root_hub_lock);
}
}
+ done:
spin_unlock_irqrestore(&hcd_root_hub_lock, flags);
- return 0;
+ return rc;
}
/*-------------------------------------------------------------------------*/
-static int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
+/**
+ * usb_hcd_link_urb_to_ep - add an URB to its endpoint queue
+ * @hcd: host controller to which @urb was submitted
+ * @urb: URB being submitted
+ *
+ * Host controller drivers should call this routine in their enqueue()
+ * method. The HCD's private spinlock must be held and interrupts must
+ * be disabled. The actions carried out here are required for URB
+ * submission, as well as for endpoint shutdown and for usb_kill_urb.
+ *
+ * Returns 0 for no error, otherwise a negative error code (in which case
+ * the enqueue() method must fail). If no error occurs but enqueue() fails
+ * anyway, it must call usb_hcd_unlink_urb_from_ep() before releasing
+ * the private spinlock and returning.
+ */
+int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb)
{
- unsigned long flags;
int rc = 0;
- spin_lock_irqsave(&hcd_urb_list_lock, flags);
+ spin_lock(&hcd_urb_list_lock);
/* Check that the URB isn't being killed */
if (unlikely(urb->reject)) {
goto done;
}
done:
- spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
+ spin_unlock(&hcd_urb_list_lock);
return rc;
}
+EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep);
-static int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
+/**
+ * usb_hcd_check_unlink_urb - check whether an URB may be unlinked
+ * @hcd: host controller to which @urb was submitted
+ * @urb: URB being checked for unlinkability
+ * @status: error code to store in @urb if the unlink succeeds
+ *
+ * Host controller drivers should call this routine in their dequeue()
+ * method. The HCD's private spinlock must be held and interrupts must
+ * be disabled. The actions carried out here are required for making
+ * sure than an unlink is valid.
+ *
+ * Returns 0 for no error, otherwise a negative error code (in which case
+ * the dequeue() method must fail). The possible error codes are:
+ *
+ * -EIDRM: @urb was not submitted or has already completed.
+ * The completion function may not have been called yet.
+ *
+ * -EBUSY: @urb has already been unlinked.
+ */
+int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status)
{
- unsigned long flags;
struct list_head *tmp;
- int rc = 0;
-
- /*
- * we contend for urb->status with the hcd core,
- * which changes it while returning the urb.
- *
- * Caller guaranteed that the urb pointer hasn't been freed, and
- * that it was submitted. But as a rule it can't know whether or
- * not it's already been unlinked ... so we respect the reversed
- * lock sequence needed for the usb_hcd_giveback_urb() code paths
- * (urb lock, then hcd_urb_list_lock) in case some other CPU is now
- * unlinking it.
- */
- spin_lock_irqsave(&urb->lock, flags);
- spin_lock(&hcd_urb_list_lock);
/* insist the urb is still queued */
list_for_each(tmp, &urb->ep->urb_list) {
if (tmp == &urb->urb_list)
break;
}
- if (tmp != &urb->urb_list) {
- rc = -EIDRM;
- goto done;
- }
+ if (tmp != &urb->urb_list)
+ return -EIDRM;
/* Any status except -EINPROGRESS means something already started to
* unlink this URB from the hardware. So there's no more work to do.
*/
- if (urb->status != -EINPROGRESS) {
- rc = -EBUSY;
- goto done;
- }
+ if (urb->status != -EINPROGRESS)
+ return -EBUSY;
urb->status = status;
/* IRQ setup can easily be broken so that USB controllers
set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
}
- done:
- spin_unlock(&hcd_urb_list_lock);
- spin_unlock_irqrestore (&urb->lock, flags);
- return rc;
+ return 0;
}
+EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
-static void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
+/**
+ * usb_hcd_unlink_urb_from_ep - remove an URB from its endpoint queue
+ * @hcd: host controller to which @urb was submitted
+ * @urb: URB being unlinked
+ *
+ * Host controller drivers should call this routine before calling
+ * usb_hcd_giveback_urb(). The HCD's private spinlock must be held and
+ * interrupts must be disabled. The actions carried out here are required
+ * for URB completion.
+ */
+void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb)
{
- unsigned long flags;
-
/* clear all state linking urb to this dev (and hcd) */
- spin_lock_irqsave(&hcd_urb_list_lock, flags);
+ spin_lock(&hcd_urb_list_lock);
list_del_init(&urb->urb_list);
- spin_unlock_irqrestore(&hcd_urb_list_lock, flags);
+ spin_unlock(&hcd_urb_list_lock);
}
+EXPORT_SYMBOL_GPL(usb_hcd_unlink_urb_from_ep);
static void map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
{
* URBs must be submitted in process context with interrupts
* enabled.
*/
- status = usb_hcd_link_urb_to_ep(hcd, urb);
- if (!status) {
- map_urb_for_dma(hcd, urb);
- if (is_root_hub(urb->dev))
- status = rh_urb_enqueue(hcd, urb);
- else
- status = hcd->driver->urb_enqueue(hcd, urb->ep, urb,
- mem_flags);
- }
+ map_urb_for_dma(hcd, urb);
+ if (is_root_hub(urb->dev))
+ status = rh_urb_enqueue(hcd, urb);
+ else
+ status = hcd->driver->urb_enqueue(hcd, urb, mem_flags);
if (unlikely(status)) {
usbmon_urb_submit_error(&hcd->self, urb, status);
unmap_urb_for_dma(hcd, urb);
- usb_hcd_unlink_urb_from_ep(hcd, urb);
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
if (urb->reject)
* soon as practical. we've already set up the urb's return status,
* but we can't know if the callback completed already.
*/
-static int
-unlink1 (struct usb_hcd *hcd, struct urb *urb)
+static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
{
int value;
if (is_root_hub(urb->dev))
- value = usb_rh_urb_dequeue (hcd, urb);
+ value = usb_rh_urb_dequeue(hcd, urb, status);
else {
/* The only reason an HCD might fail this call is if
* it has not yet fully queued the urb to begin with.
* Such failures should be harmless. */
- value = hcd->driver->urb_dequeue (hcd, urb);
+ value = hcd->driver->urb_dequeue(hcd, urb, status);
}
-
- if (value != 0)
- dev_dbg (hcd->self.controller, "dequeue %p --> %d\n",
- urb, value);
return value;
}
int retval;
hcd = bus_to_hcd(urb->dev->bus);
-
- retval = usb_hcd_check_unlink_urb(hcd, urb, status);
- if (!retval)
- retval = unlink1(hcd, urb);
+ retval = unlink1(hcd, urb, status);
if (retval == 0)
retval = -EINPROGRESS;
- else if (retval != -EIDRM)
+ else if (retval != -EIDRM && retval != -EBUSY)
dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
urb, retval);
return retval;
*/
void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb)
{
- usb_hcd_unlink_urb_from_ep(hcd, urb);
unmap_urb_for_dma(hcd, urb);
usbmon_urb_complete (&hcd->self, urb);
usb_unanchor_urb(urb);
rescan:
spin_lock_irq(&hcd_urb_list_lock);
list_for_each_entry (urb, &ep->urb_list, urb_list) {
- int tmp;
int is_in;
/* the urb may already have been unlinked */
is_in = usb_urb_dir_in(urb);
spin_unlock(&hcd_urb_list_lock);
- spin_lock (&urb->lock);
- tmp = urb->status;
- if (tmp == -EINPROGRESS)
- urb->status = -ESHUTDOWN;
- spin_unlock (&urb->lock);
-
- /* kick hcd unless it's already returning this */
- if (tmp == -EINPROGRESS) {
- unlink1 (hcd, urb);
- dev_dbg (hcd->self.controller,
- "shutdown urb %p ep%d%s%s\n",
- urb, usb_endpoint_num(&ep->desc),
- is_in ? "in" : "out",
- ({ char *s;
-
- switch (usb_endpoint_type(&ep->desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- s = ""; break;
- case USB_ENDPOINT_XFER_BULK:
- s = "-bulk"; break;
- case USB_ENDPOINT_XFER_INT:
- s = "-intr"; break;
- default:
- s = "-iso"; break;
- };
- s;
- }));
- }
+ /* kick hcd */
+ unlink1(hcd, urb, -ESHUTDOWN);
+ dev_dbg (hcd->self.controller,
+ "shutdown urb %p ep%d%s%s\n",
+ urb, usb_endpoint_num(&ep->desc),
+ is_in ? "in" : "out",
+ ({ char *s;
+
+ switch (usb_endpoint_type(&ep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ s = ""; break;
+ case USB_ENDPOINT_XFER_BULK:
+ s = "-bulk"; break;
+ case USB_ENDPOINT_XFER_INT:
+ s = "-intr"; break;
+ default:
+ s = "-iso"; break;
+ };
+ s;
+ }));
usb_put_urb (urb);
/* list contents may have changed */
int (*get_frame_number) (struct usb_hcd *hcd);
/* manage i/o requests, device state */
- int (*urb_enqueue) (struct usb_hcd *hcd,
- struct usb_host_endpoint *ep,
- struct urb *urb,
- gfp_t mem_flags);
- int (*urb_dequeue) (struct usb_hcd *hcd, struct urb *urb);
+ int (*urb_enqueue)(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags);
+ int (*urb_dequeue)(struct usb_hcd *hcd,
+ struct urb *urb, int status);
/* hw synch, freeing endpoint resources that urb_dequeue can't */
void (*endpoint_disable)(struct usb_hcd *hcd,
/* Needed only if port-change IRQs are level-triggered */
};
+extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
+extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
+ int status);
+extern void usb_hcd_unlink_urb_from_ep(struct usb_hcd *hcd, struct urb *urb);
+
extern int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags);
extern int usb_hcd_unlink_urb (struct urb *urb, int status);
extern void usb_hcd_giveback_urb (struct usb_hcd *hcd, struct urb *urb);
static int dummy_urb_enqueue (
struct usb_hcd *hcd,
- struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags
) {
struct dummy *dum;
struct urbp *urbp;
unsigned long flags;
+ int rc;
if (!urb->transfer_buffer && urb->transfer_buffer_length)
return -EINVAL;
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
+ rc = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (rc) {
+ kfree(urbp);
+ goto done;
+ }
if (!dum->udev) {
dum->udev = urb->dev;
mod_timer (&dum->timer, jiffies + 1);
spin_unlock_irqrestore (&dum->lock, flags);
- return 0;
+ done:
+ return rc;
}
-static int dummy_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
+static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct dummy *dum;
unsigned long flags;
+ int rc;
/* giveback happens automatically in timer callback,
* so make sure the callback happens */
dum = hcd_to_dummy (hcd);
spin_lock_irqsave (&dum->lock, flags);
- if (dum->rh_state != DUMMY_RH_RUNNING && !list_empty(&dum->urbp_list))
+
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (!rc && dum->rh_state != DUMMY_RH_RUNNING &&
+ !list_empty(&dum->urbp_list))
mod_timer (&dum->timer, jiffies);
+
spin_unlock_irqrestore (&dum->lock, flags);
- return 0;
+ return rc;
}
static void maybe_set_status (struct urb *urb, int status)
if (ep)
ep->already_seen = ep->setup_stage = 0;
+ usb_hcd_unlink_urb_from_ep(dummy_to_hcd(dum), urb);
spin_unlock (&dum->lock);
usb_hcd_giveback_urb (dummy_to_hcd(dum), urb);
spin_lock (&dum->lock);
*/
static int ehci_urb_enqueue (
struct usb_hcd *hcd,
- struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags
) {
default:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
- return submit_async (ehci, ep, urb, &qtd_list, mem_flags);
+ return submit_async(ehci, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags))
return -ENOMEM;
- return intr_submit (ehci, ep, urb, &qtd_list, mem_flags);
+ return intr_submit(ehci, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
* completions normally happen asynchronously
*/
-static int ehci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
+static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
struct ehci_qh *qh;
unsigned long flags;
+ int rc;
spin_lock_irqsave (&ehci->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
switch (usb_pipetype (urb->pipe)) {
// case PIPE_CONTROL:
// case PIPE_BULK:
}
done:
spin_unlock_irqrestore (&ehci->lock, flags);
- return 0;
+ return rc;
}
/*-------------------------------------------------------------------------*/
#endif
/* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
spin_unlock (&ehci->lock);
usb_hcd_giveback_urb (ehci_to_hcd(ehci), urb);
spin_lock (&ehci->lock);
static int
submit_async (
struct ehci_hcd *ehci,
- struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
- int rc = 0;
+ int rc;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
- epnum = ep->desc.bEndpointAddress;
+ epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
__FUNCTION__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
- qtd, ep->hcpriv);
+ qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave (&ehci->lock, flags);
rc = -ESHUTDOWN;
goto done;
}
+ rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+ if (unlikely(rc))
+ goto done;
- qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
+ qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
+ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
rc = -ENOMEM;
goto done;
}
static int intr_submit (
struct ehci_hcd *ehci,
- struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
- int status = 0;
+ int status;
struct list_head empty;
/* get endpoint and transfer/schedule data */
- epnum = ep->desc.bEndpointAddress;
+ epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
&ehci_to_hcd(ehci)->flags))) {
status = -ESHUTDOWN;
- goto done;
+ goto done_not_linked;
}
+ status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+ if (unlikely(status))
+ goto done_not_linked;
/* get qh and force any scheduling errors */
INIT_LIST_HEAD (&empty);
- qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv);
+ qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
/* then queue the urb's tds to the qh */
- qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
+ qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON (qh == NULL);
/* ... update usbfs periodic stats */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs++;
done:
+ if (unlikely(status))
+ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
if (status)
qtd_list_free (ehci, urb, qtd_list);
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags)))
+ &ehci_to_hcd(ehci)->flags))) {
status = -ESHUTDOWN;
- else
- status = iso_stream_schedule (ehci, urb, stream);
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+ status = iso_stream_schedule(ehci, urb, stream);
if (likely (status == 0))
itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
+ else
+ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
done:
/* schedule ... need to lock */
spin_lock_irqsave (&ehci->lock, flags);
if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE,
- &ehci_to_hcd(ehci)->flags)))
+ &ehci_to_hcd(ehci)->flags))) {
status = -ESHUTDOWN;
- else
- status = iso_stream_schedule (ehci, urb, stream);
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+ status = iso_stream_schedule(ehci, urb, stream);
if (status == 0)
sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
+ else
+ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
+done_not_linked:
spin_unlock_irqrestore (&ehci->lock, flags);
done:
urb_dbg(urb, "Finish");
+ usb_hcd_unlink_urb_from_ep(isp116x_to_hcd(isp116x), urb);
spin_unlock(&isp116x->lock);
usb_hcd_giveback_urb(isp116x_to_hcd(isp116x), urb);
spin_lock(&isp116x->lock);
/*-----------------------------------------------------------------*/
static int isp116x_urb_enqueue(struct usb_hcd *hcd,
- struct usb_host_endpoint *hep, struct urb *urb,
+ struct urb *urb,
gfp_t mem_flags)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
int is_out = !usb_pipein(pipe);
int type = usb_pipetype(pipe);
int epnum = usb_pipeendpoint(pipe);
+ struct usb_host_endpoint *hep = urb->ep;
struct isp116x_ep *ep = NULL;
unsigned long flags;
int i;
if (!HC_IS_RUNNING(hcd->state)) {
kfree(ep);
ret = -ENODEV;
- goto fail;
+ goto fail_not_linked;
+ }
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret) {
+ kfree(ep);
+ goto fail_not_linked;
}
if (hep->hcpriv)
start_atl_transfers(isp116x);
fail:
+ if (ret)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ fail_not_linked:
spin_unlock_irqrestore(&isp116x->lock, flags);
return ret;
}
/*
Dequeue URBs.
*/
-static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+static int isp116x_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
{
struct isp116x *isp116x = hcd_to_isp116x(hcd);
struct usb_host_endpoint *hep;
struct isp116x_ep *ep, *ep_act;
unsigned long flags;
+ int rc;
spin_lock_irqsave(&isp116x->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
hep = urb->hcpriv;
- /* URB already unlinked (or never linked)? */
- if (!hep) {
- spin_unlock_irqrestore(&isp116x->lock, flags);
- return 0;
- }
ep = hep->hcpriv;
WARN_ON(hep != ep->hep);
if (urb)
finish_request(isp116x, ep, urb);
-
+ done:
spin_unlock_irqrestore(&isp116x->lock, flags);
- return 0;
+ return rc;
}
static void isp116x_endpoint_disable(struct usb_hcd *hcd,
*/
static int ohci_urb_enqueue (
struct usb_hcd *hcd,
- struct usb_host_endpoint *ep,
struct urb *urb,
gfp_t mem_flags
) {
#endif
/* every endpoint has a ed, locate and maybe (re)initialize it */
- if (! (ed = ed_get (ohci, ep, urb->dev, pipe, urb->interval)))
+ if (! (ed = ed_get (ohci, urb->ep, urb->dev, pipe, urb->interval)))
return -ENOMEM;
/* for the private part of the URB we need the number of TDs (size) */
retval = -ENODEV;
goto fail;
}
-
- /* in case of unlink-during-submit */
- spin_lock (&urb->lock);
- if (urb->status != -EINPROGRESS) {
- spin_unlock (&urb->lock);
- urb->hcpriv = urb_priv;
- finish_urb (ohci, urb);
- retval = 0;
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval)
goto fail;
- }
/* schedule the ed if needed */
if (ed->state == ED_IDLE) {
retval = ed_schedule (ohci, ed);
- if (retval < 0)
- goto fail0;
+ if (retval < 0) {
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ goto fail;
+ }
if (ed->type == PIPE_ISOCHRONOUS) {
u16 frame = ohci_frame_no(ohci);
urb->hcpriv = urb_priv;
td_submit_urb (ohci, urb);
-fail0:
- spin_unlock (&urb->lock);
fail:
if (retval)
urb_free_priv (ohci, urb_priv);
* asynchronously, and we might be dealing with an urb that's
* partially transferred, or an ED with other urbs being unlinked.
*/
-static int ohci_urb_dequeue (struct usb_hcd *hcd, struct urb *urb)
+static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
unsigned long flags;
+ int rc;
#ifdef OHCI_VERBOSE_DEBUG
urb_print (urb, "UNLINK", 1);
#endif
spin_lock_irqsave (&ohci->lock, flags);
- if (HC_IS_RUNNING(hcd->state)) {
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc) {
+ ; /* Do nothing */
+ } else if (HC_IS_RUNNING(hcd->state)) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
finish_urb (ohci, urb);
}
spin_unlock_irqrestore (&ohci->lock, flags);
- return 0;
+ return rc;
}
/*-------------------------------------------------------------------------*/
#endif
/* urb->complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(ohci_to_hcd(ohci), urb);
spin_unlock (&ohci->lock);
usb_hcd_giveback_urb (ohci_to_hcd(ohci), urb);
spin_lock (&ohci->lock);
if (urb) {
urb->status = -ENODEV;
urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597),
+ urb);
+
spin_unlock(&r8a66597->lock);
usb_hcd_giveback_urb(r8a66597_to_hcd(r8a66597), urb);
spin_lock(&r8a66597->lock);
urb->start_frame = r8a66597_get_frame(hcd);
urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(r8a66597_to_hcd(r8a66597), urb);
+
spin_unlock(&r8a66597->lock);
usb_hcd_giveback_urb(hcd, urb);
spin_lock(&r8a66597->lock);
}
static int r8a66597_urb_enqueue(struct usb_hcd *hcd,
- struct usb_host_endpoint *hep,
struct urb *urb,
gfp_t mem_flags)
{
+ struct usb_host_endpoint *hep = urb->ep;
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_td *td = NULL;
- int ret = 0, request = 0;
+ int ret, request = 0;
unsigned long flags;
spin_lock_irqsave(&r8a66597->lock, flags);
if (!get_urb_to_r8a66597_dev(r8a66597, urb)) {
ret = -ENODEV;
- goto error;
+ goto error_not_linked;
}
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto error_not_linked;
+
if (!hep->hcpriv) {
hep->hcpriv = kzalloc(sizeof(struct r8a66597_pipe),
GFP_ATOMIC);
if (list_empty(&r8a66597->pipe_queue[td->pipenum]))
request = 1;
list_add_tail(&td->queue, &r8a66597->pipe_queue[td->pipenum]);
-
- spin_lock(&urb->lock);
- if (urb->status != -EINPROGRESS) {
- spin_unlock(&urb->lock);
- ret = -EPIPE;
- goto error;
- }
urb->hcpriv = td;
- spin_unlock(&urb->lock);
if (request) {
ret = start_transfer(r8a66597, td);
set_td_timer(r8a66597, td);
error:
+ if (ret)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+error_not_linked:
spin_unlock_irqrestore(&r8a66597->lock, flags);
return ret;
}
-static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_td *td;
unsigned long flags;
+ int rc;
spin_lock_irqsave(&r8a66597->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
if (urb->hcpriv) {
td = urb->hcpriv;
pipe_stop(r8a66597, td->pipe);
disable_irq_empty(r8a66597, td->pipenum);
done(r8a66597, td, td->pipenum, urb);
}
+ done:
spin_unlock_irqrestore(&r8a66597->lock, flags);
- return 0;
+ return rc;
}
static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
urb->hcpriv = NULL;
spin_unlock(&urb->lock);
+ usb_hcd_unlink_urb_from_ep(sl811_to_hcd(sl811), urb);
spin_unlock(&sl811->lock);
usb_hcd_giveback_urb(sl811_to_hcd(sl811), urb);
spin_lock(&sl811->lock);
static int sl811h_urb_enqueue(
struct usb_hcd *hcd,
- struct usb_host_endpoint *hep,
struct urb *urb,
gfp_t mem_flags
) {
struct sl811h_ep *ep = NULL;
unsigned long flags;
int i;
- int retval = 0;
+ int retval;
+ struct usb_host_endpoint *hep = urb->ep;
#ifdef DISABLE_ISO
if (type == PIPE_ISOCHRONOUS)
|| !HC_IS_RUNNING(hcd->state)) {
retval = -ENODEV;
kfree(ep);
- goto fail;
+ goto fail_not_linked;
+ }
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval) {
+ kfree(ep);
+ goto fail_not_linked;
}
if (hep->hcpriv) {
start_transfer(sl811);
sl811_write(sl811, SL11H_IRQ_ENABLE, sl811->irq_enable);
fail:
+ if (retval)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+fail_not_linked:
spin_unlock_irqrestore(&sl811->lock, flags);
return retval;
}
-static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct sl811 *sl811 = hcd_to_sl811(hcd);
struct usb_host_endpoint *hep;
unsigned long flags;
struct sl811h_ep *ep;
- int retval = 0;
+ int retval;
spin_lock_irqsave(&sl811->lock, flags);
- hep = urb->hcpriv;
- if (!hep)
+ retval = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (retval)
goto fail;
+ hep = urb->hcpriv;
ep = hep->hcpriv;
if (ep) {
/* finish right away if this urb can't be active ...
VDBG("dequeue, urb %p active %s; wait4irq\n", urb,
(sl811->active_a == ep) ? "A" : "B");
} else
-fail:
retval = -EINVAL;
+ fail:
spin_unlock_irqrestore(&sl811->lock, flags);
return retval;
}
urb->status = status;
urb->hcpriv = NULL;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_next += 1;
if (ENDP_QUEUE_SIZE > --endp->queue_size) {
endp->active = 0;
urb->status = status;
urb->hcpriv = NULL;
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_next += 1;
if (ENDP_QUEUE_SIZE > --endp->queue_size) {
endp->active = 0;
}
static int create_endpoint_and_queue_int(struct u132 *u132,
- struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb,
+ struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
gfp_t mem_flags)
{
struct u132_ring *ring;
unsigned long irqs;
- u8 endp_number = ++u132->num_endpoints;
- struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] =
- kmalloc(sizeof(struct u132_endp), mem_flags);
+ int rc;
+ u8 endp_number;
+ struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
+
if (!endp) {
return -ENOMEM;
}
+
+ spin_lock_init(&endp->queue_lock.slock);
+ spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+ rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
+ if (rc) {
+ spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+ kfree(endp);
+ return rc;
+ }
+
+ endp_number = ++u132->num_endpoints;
+ urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
- spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
if (ring->curr_endp) {
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
- endp->hep = hep;
+ endp->hep = urb->ep;
endp->pipetype = usb_pipetype(urb->pipe);
u132_endp_init_kref(u132, endp);
if (usb_pipein(urb->pipe)) {
u132_udev_get_kref(u132, udev);
}
urb->hcpriv = u132;
- spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->delayed = 1;
endp->jiffies = jiffies + msecs_to_jiffies(urb->interval);
endp->udev_number = address;
return 0;
}
-static int queue_int_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
- struct usb_host_endpoint *hep, struct urb *urb,
+static int queue_int_on_old_endpoint(struct u132 *u132,
+ struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp, u8 address)
{
}
static int create_endpoint_and_queue_bulk(struct u132 *u132,
- struct u132_udev *udev, struct usb_host_endpoint *hep, struct urb *urb,
+ struct u132_udev *udev, struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp, u8 address,
gfp_t mem_flags)
{
int ring_number;
struct u132_ring *ring;
unsigned long irqs;
- u8 endp_number = ++u132->num_endpoints;
- struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] =
- kmalloc(sizeof(struct u132_endp), mem_flags);
+ int rc;
+ u8 endp_number;
+ struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
+
if (!endp) {
return -ENOMEM;
}
+
+ spin_lock_init(&endp->queue_lock.slock);
+ spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+ rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
+ if (rc) {
+ spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+ kfree(endp);
+ return rc;
+ }
+
+ endp_number = ++u132->num_endpoints;
+ urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
- spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
endp->dequeueing = 0;
endp->edset_flush = 0;
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
- endp->hep = hep;
+ endp->hep = urb->ep;
endp->pipetype = usb_pipetype(urb->pipe);
u132_endp_init_kref(u132, endp);
if (usb_pipein(urb->pipe)) {
}
ring->length += 1;
urb->hcpriv = u132;
- spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->udev_number = address;
endp->usb_addr = usb_addr;
endp->usb_endp = usb_endp;
}
static int queue_bulk_on_old_endpoint(struct u132 *u132, struct u132_udev *udev,
- struct usb_host_endpoint *hep, struct urb *urb,
+ struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp, u8 address)
{
}
static int create_endpoint_and_queue_control(struct u132 *u132,
- struct usb_host_endpoint *hep, struct urb *urb,
+ struct urb *urb,
struct usb_device *usb_dev, u8 usb_addr, u8 usb_endp,
gfp_t mem_flags)
{
struct u132_ring *ring;
- u8 endp_number = ++u132->num_endpoints;
- struct u132_endp *endp = hep->hcpriv = u132->endp[endp_number - 1] =
- kmalloc(sizeof(struct u132_endp), mem_flags);
+ unsigned long irqs;
+ int rc;
+ u8 endp_number;
+ struct u132_endp *endp = kmalloc(sizeof(struct u132_endp), mem_flags);
+
if (!endp) {
return -ENOMEM;
}
+
+ spin_lock_init(&endp->queue_lock.slock);
+ spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+ rc = usb_hcd_link_urb_to_ep(u132_to_hcd(u132), urb);
+ if (rc) {
+ spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+ kfree(endp);
+ return rc;
+ }
+
+ endp_number = ++u132->num_endpoints;
+ urb->ep->hcpriv = u132->endp[endp_number - 1] = endp;
INIT_DELAYED_WORK(&endp->scheduler, u132_hcd_endp_work_scheduler);
- spin_lock_init(&endp->queue_lock.slock);
INIT_LIST_HEAD(&endp->urb_more);
ring = endp->ring = &u132->ring[0];
if (ring->curr_endp) {
endp->delayed = 0;
endp->endp_number = endp_number;
endp->u132 = u132;
- endp->hep = hep;
+ endp->hep = urb->ep;
u132_endp_init_kref(u132, endp);
u132_endp_get_kref(u132, endp);
if (usb_addr == 0) {
- unsigned long irqs;
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
endp->udev_number = address;
udev->endp_number_in[usb_endp] = endp_number;
udev->endp_number_out[usb_endp] = endp_number;
urb->hcpriv = u132;
- spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
u132_endp_queue_work(u132, endp, 0);
return 0;
} else { /*(usb_addr > 0) */
- unsigned long irqs;
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
endp->udev_number = address;
udev->endp_number_in[usb_endp] = endp_number;
udev->endp_number_out[usb_endp] = endp_number;
urb->hcpriv = u132;
- spin_lock_irqsave(&endp->queue_lock.slock, irqs);
endp->queue_size = 1;
endp->queue_last = 0;
endp->queue_next = 0;
}
static int queue_control_on_old_endpoint(struct u132 *u132,
- struct usb_host_endpoint *hep, struct urb *urb,
+ struct urb *urb,
struct usb_device *usb_dev, struct u132_endp *endp, u8 usb_addr,
u8 usb_endp)
{
}
}
-static int u132_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *hep,
- struct urb *urb, gfp_t mem_flags)
+static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (irqs_disabled()) {
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
- struct u132_endp *endp = hep->hcpriv;
+ struct u132_endp *endp = urb->ep->hcpriv;
urb->actual_length = 0;
if (endp) {
unsigned long irqs;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
- retval = queue_int_on_old_endpoint(u132, udev,
- hep, urb, usb_dev, endp, usb_addr,
- usb_endp, address);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval == 0) {
+ retval = queue_int_on_old_endpoint(
+ u132, udev, urb,
+ usb_dev, endp,
+ usb_addr, usb_endp,
+ address);
+ if (retval)
+ usb_hcd_unlink_urb_from_ep(
+ hcd, urb);
+ }
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
return -EINVAL;
} else { /*(endp == NULL) */
return create_endpoint_and_queue_int(u132, udev,
- hep, urb, usb_dev, usb_addr, usb_endp,
- address, mem_flags);
+ urb, usb_dev, usb_addr,
+ usb_endp, address, mem_flags);
}
} else if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
dev_err(&u132->platform_dev->dev, "the hardware does no"
} else if (usb_pipetype(urb->pipe) == PIPE_BULK) {
u8 address = u132->addr[usb_addr].address;
struct u132_udev *udev = &u132->udev[address];
- struct u132_endp *endp = hep->hcpriv;
+ struct u132_endp *endp = urb->ep->hcpriv;
urb->actual_length = 0;
if (endp) {
unsigned long irqs;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
- retval = queue_bulk_on_old_endpoint(u132, udev,
- hep, urb, usb_dev, endp, usb_addr,
- usb_endp, address);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval == 0) {
+ retval = queue_bulk_on_old_endpoint(
+ u132, udev, urb,
+ usb_dev, endp,
+ usb_addr, usb_endp,
+ address);
+ if (retval)
+ usb_hcd_unlink_urb_from_ep(
+ hcd, urb);
+ }
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
return -EINVAL;
} else
return create_endpoint_and_queue_bulk(u132,
- udev, hep, urb, usb_dev, usb_addr,
+ udev, urb, usb_dev, usb_addr,
usb_endp, address, mem_flags);
} else {
- struct u132_endp *endp = hep->hcpriv;
+ struct u132_endp *endp = urb->ep->hcpriv;
u16 urb_size = 8;
u8 *b = urb->setup_packet;
int i = 0;
int retval;
spin_lock_irqsave(&endp->queue_lock.slock,
irqs);
- retval = queue_control_on_old_endpoint(u132,
- hep, urb, usb_dev, endp, usb_addr,
- usb_endp);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (retval == 0) {
+ retval = queue_control_on_old_endpoint(
+ u132, urb, usb_dev,
+ endp, usb_addr,
+ usb_endp);
+ if (retval)
+ usb_hcd_unlink_urb_from_ep(
+ hcd, urb);
+ }
spin_unlock_irqrestore(&endp->queue_lock.slock,
irqs);
if (retval) {
return -EINVAL;
} else
return create_endpoint_and_queue_control(u132,
- hep, urb, usb_dev, usb_addr, usb_endp,
+ urb, usb_dev, usb_addr, usb_endp,
mem_flags);
}
}
}
static int u132_endp_urb_dequeue(struct u132 *u132, struct u132_endp *endp,
- struct urb *urb)
+ struct urb *urb, int status)
{
unsigned long irqs;
+ int rc;
+
spin_lock_irqsave(&endp->queue_lock.slock, irqs);
+ rc = usb_hcd_check_unlink_urb(u132_to_hcd(u132), urb, status);
+ if (rc) {
+ spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
+ return rc;
+ }
if (endp->queue_size == 0) {
dev_err(&u132->platform_dev->dev, "urb=%p not found in endp[%d]"
"=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb,
}
if (urb_slot) {
struct usb_hcd *hcd = u132_to_hcd(u132);
+
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
endp->queue_size -= 1;
if (list_empty(&endp->urb_more)) {
spin_unlock_irqrestore(&endp->queue_lock.slock,
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return -EINVAL;
} else {
- int retval = dequeue_from_overflow_chain(u132, endp,
+ int retval;
+
+ usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132), urb);
+ retval = dequeue_from_overflow_chain(u132, endp,
urb);
spin_unlock_irqrestore(&endp->queue_lock.slock, irqs);
return retval;
}
}
-static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+static int u132_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct u132 *u132 = hcd_to_u132(hcd);
if (u132->going > 2) {
if (usb_pipein(urb->pipe)) {
u8 endp_number = udev->endp_number_in[usb_endp];
struct u132_endp *endp = u132->endp[endp_number - 1];
- return u132_endp_urb_dequeue(u132, endp, urb);
+ return u132_endp_urb_dequeue(u132, endp, urb, status);
} else {
u8 endp_number = udev->endp_number_out[usb_endp];
struct u132_endp *endp = u132->endp[endp_number - 1];
- return u132_endp_urb_dequeue(u132, endp, urb);
+ return u132_endp_urb_dequeue(u132, endp, urb, status);
}
}
}
}
static int uhci_urb_enqueue(struct usb_hcd *hcd,
- struct usb_host_endpoint *hep,
struct urb *urb, gfp_t mem_flags)
{
int ret;
spin_lock_irqsave(&uhci->lock, flags);
- ret = urb->status;
- if (ret != -EINPROGRESS) /* URB already unlinked! */
- goto done;
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto done_not_linked;
ret = -ENOMEM;
urbp = uhci_alloc_urb_priv(uhci, urb);
if (!urbp)
goto done;
- if (hep->hcpriv)
- qh = (struct uhci_qh *) hep->hcpriv;
+ if (urb->ep->hcpriv)
+ qh = urb->ep->hcpriv;
else {
- qh = uhci_alloc_qh(uhci, urb->dev, hep);
+ qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
if (!qh)
goto err_no_qh;
}
err_submit_failed:
if (qh->state == QH_STATE_IDLE)
uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
-
err_no_qh:
uhci_free_urb_priv(uhci, urbp);
-
done:
+ if (ret)
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+done_not_linked:
spin_unlock_irqrestore(&uhci->lock, flags);
return ret;
}
-static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
+static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct uhci_hcd *uhci = hcd_to_uhci(hcd);
unsigned long flags;
- struct urb_priv *urbp;
struct uhci_qh *qh;
+ int rc;
spin_lock_irqsave(&uhci->lock, flags);
- urbp = urb->hcpriv;
- if (!urbp) /* URB was never linked! */
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
goto done;
- qh = urbp->qh;
+
+ qh = ((struct urb_priv *) urb->hcpriv)->qh;
/* Remove Isochronous TDs from the frame list ASAP */
if (qh->type == USB_ENDPOINT_XFER_ISOC) {
done:
spin_unlock_irqrestore(&uhci->lock, flags);
- return 0;
+ return rc;
}
/*
}
uhci_free_urb_priv(uhci, urbp);
+ usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
spin_unlock(&uhci->lock);
usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);