if (qh->clearing_tt)
goto idle_timeout;
if (list_empty (&qh->qtd_list)) {
- qh_put (qh);
+ qh_destroy(ehci, qh);
break;
}
/* else FALL THROUGH */
}
-static void qh_destroy(struct ehci_qh *qh)
+static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- struct ehci_hcd *ehci = qh->ehci;
-
/* clean qtds first, and know this is not linked */
if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) {
ehci_dbg (ehci, "unused qh not empty!\n");
if (!qh->hw)
goto fail;
memset(qh->hw, 0, sizeof *qh->hw);
- qh->refcount = 1;
- qh->ehci = ehci;
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
return NULL;
}
-/* to share a qh (cpu threads, or hc) */
-static inline struct ehci_qh *qh_get (struct ehci_qh *qh)
-{
- WARN_ON(!qh->refcount);
- qh->refcount++;
- return qh;
-}
-
-static inline void qh_put (struct ehci_qh *qh)
-{
- if (!--qh->refcount)
- qh_destroy(qh);
-}
-
/*-------------------------------------------------------------------------*/
/* The queue heads and transfer descriptors are managed from pools tied
{
free_cached_lists(ehci);
if (ehci->async)
- qh_put (ehci->async);
+ qh_destroy(ehci, ehci->async);
ehci->async = NULL;
if (ehci->dummy)
- qh_put(ehci->dummy);
+ qh_destroy(ehci, ehci->dummy);
ehci->dummy = NULL;
/* DMA consistent memory and pools */
/* ... update hc-wide periodic stats (for usbfs) */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
}
- qh_put (qh);
}
if (unlikely(urb->unlinked)) {
ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev,
urb->dev->speed);
done:
- qh_put (qh);
+ qh_destroy(ehci, qh);
return NULL;
}
head->qh_next.qh = qh;
head->hw->hw_next = dma;
- qh_get(qh);
qh->xacterrs = 0;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
wmb ();
dummy->hw_token = token;
- urb->hcpriv = qh_get (qh);
+ urb->hcpriv = qh;
}
}
return qh;
// qh->hw_next = cpu_to_hc32(qh->qh_dma);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
- qh_put (qh); // refcount from reclaim
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
&& ehci->async->qh_next.qh == NULL)
timer_action (ehci, TIMER_ASYNC_OFF);
}
- qh_put(qh); /* refcount from async list */
if (next) {
ehci->reclaim = NULL;
}
qh->qh_state = QH_STATE_UNLINK;
- ehci->reclaim = qh = qh_get (qh);
+ ehci->reclaim = qh;
prev = ehci->async;
while (prev->qh_next.qh != qh)
* gets unlinked then ehci->qh_scan_next is adjusted
* in start_unlink_async().
*/
- qh = qh_get(qh);
temp = qh_completions(ehci, qh);
if (qh->needs_rescan)
unlink_async(ehci, qh);
qh->unlink_time = jiffies + EHCI_SHRINK_JIFFIES;
- qh_put(qh);
if (temp != 0)
goto rescan;
}
}
qh->qh_state = QH_STATE_LINKED;
qh->xacterrs = 0;
- qh_get (qh);
/* update per-qh bandwidth for usbfs */
ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
- qh_put (qh);
/* maybe turn off periodic schedule */
return disable_periodic(ehci);
switch (hc32_to_cpu(ehci, type)) {
case Q_TYPE_QH:
/* handle any completions */
- temp.qh = qh_get (q.qh);
+ temp.qh = q.qh;
type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
q = q.qh->qh_next;
if (temp.qh->stamp != ehci->periodic_stamp) {
temp.qh->needs_rescan))
intr_deschedule(ehci, temp.qh);
}
- qh_put (temp.qh);
break;
case Q_TYPE_FSTN:
/* for "save place" FSTNs, look at QH entries
struct ehci_qtd *dummy;
struct ehci_qh *reclaim; /* next to reclaim */
- struct ehci_hcd *ehci;
unsigned long unlink_time;
-
- /*
- * Do NOT use atomic operations for QH refcounting. On some CPUs
- * (PPC7448 for example), atomic operations cannot be performed on
- * memory that is cache-inhibited (i.e. being used for DMA).
- * Spinlocks are used to protect all QH fields.
- */
- u32 refcount;
unsigned stamp;
u8 needs_rescan; /* Dequeue during giveback */