summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r--drivers/usb/host/xhci-ring.c339
1 files changed, 212 insertions, 127 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index b62037b..3d9422f 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -143,17 +143,25 @@ static void next_trb(struct xhci_hcd *xhci,
* See Cycle bit rules. SW is the consumer for the event ring only.
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
-static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
- union xhci_trb *next = ++(ring->dequeue);
+ union xhci_trb *next;
unsigned long long addr;
ring->deq_updates++;
+
+ /* If this is not event ring, there is one more usable TRB */
+ if (ring->type != TYPE_EVENT &&
+ !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
+ ring->num_trbs_free++;
+ next = ++(ring->dequeue);
+
/* Update the dequeue pointer further if that was a link TRB or we're at
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->deq_seg, next)) {
- if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
+ if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
+ ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
}
ring->deq_seg = ring->deq_seg->next;
@@ -181,13 +189,17 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* prepare_transfer()?
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming, bool isoc)
+ bool more_trbs_coming)
{
u32 chain;
union xhci_trb *next;
unsigned long long addr;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+ /* If this is not event ring, there is one less usable TRB */
+ if (ring->type != TYPE_EVENT &&
+ !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
+ ring->num_trbs_free--;
next = ++(ring->enqueue);
ring->enq_updates++;
@@ -195,35 +207,35 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
* the end of an event ring segment (which doesn't have link TRBS)
*/
while (last_trb(xhci, ring, ring->enq_seg, next)) {
- if (!consumer) {
- if (ring != xhci->event_ring) {
- /*
- * If the caller doesn't plan on enqueueing more
- * TDs before ringing the doorbell, then we
- * don't want to give the link TRB to the
- * hardware just yet. We'll give the link TRB
- * back in prepare_ring() just before we enqueue
- * the TD at the top of the ring.
- */
- if (!chain && !more_trbs_coming)
- break;
+ if (ring->type != TYPE_EVENT) {
+ /*
+ * If the caller doesn't plan on enqueueing more
+ * TDs before ringing the doorbell, then we
+ * don't want to give the link TRB to the
+ * hardware just yet. We'll give the link TRB
+ * back in prepare_ring() just before we enqueue
+ * the TD at the top of the ring.
+ */
+ if (!chain && !more_trbs_coming)
+ break;
- /* If we're not dealing with 0.95 hardware or
- * isoc rings on AMD 0.96 host,
- * carry over the chain bit of the previous TRB
- * (which may mean the chain bit is cleared).
- */
- if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
+ /* If we're not dealing with 0.95 hardware or
+ * isoc rings on AMD 0.96 host,
+ * carry over the chain bit of the previous TRB
+ * (which may mean the chain bit is cleared).
+ */
+ if (!(ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST))
&& !xhci_link_trb_quirk(xhci)) {
- next->link.control &=
- cpu_to_le32(~TRB_CHAIN);
- next->link.control |=
- cpu_to_le32(chain);
- }
- /* Give this link TRB to the hardware */
- wmb();
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
+ next->link.control &=
+ cpu_to_le32(~TRB_CHAIN);
+ next->link.control |=
+ cpu_to_le32(chain);
}
+ /* Give this link TRB to the hardware */
+ wmb();
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
@@ -237,55 +249,23 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
}
/*
- * Check to see if there's room to enqueue num_trbs on the ring. See rules
- * above.
- * FIXME: this would be simpler and faster if we just kept track of the number
- * of free TRBs in a ring.
+ * Check to see if there's room to enqueue num_trbs on the ring and make sure
+ * enqueue pointer will not advance into dequeue segment. See rules above.
*/
-static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs)
{
- int i;
- union xhci_trb *enq = ring->enqueue;
- struct xhci_segment *enq_seg = ring->enq_seg;
- struct xhci_segment *cur_seg;
- unsigned int left_on_ring;
-
- /* If we are currently pointing to a link TRB, advance the
- * enqueue pointer before checking for space */
- while (last_trb(xhci, ring, enq_seg, enq)) {
- enq_seg = enq_seg->next;
- enq = enq_seg->trbs;
- }
-
- /* Check if ring is empty */
- if (enq == ring->dequeue) {
- /* Can't use link trbs */
- left_on_ring = TRBS_PER_SEGMENT - 1;
- for (cur_seg = enq_seg->next; cur_seg != enq_seg;
- cur_seg = cur_seg->next)
- left_on_ring += TRBS_PER_SEGMENT - 1;
-
- /* Always need one TRB free in the ring. */
- left_on_ring -= 1;
- if (num_trbs > left_on_ring) {
- xhci_warn(xhci, "Not enough room on ring; "
- "need %u TRBs, %u TRBs left\n",
- num_trbs, left_on_ring);
- return 0;
- }
- return 1;
- }
- /* Make sure there's an extra empty TRB available */
- for (i = 0; i <= num_trbs; ++i) {
- if (enq == ring->dequeue)
+ int num_trbs_in_deq_seg;
+
+ if (ring->num_trbs_free < num_trbs)
+ return 0;
+
+ if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
+ num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
+ if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
return 0;
- enq++;
- while (last_trb(xhci, ring, enq_seg, enq)) {
- enq_seg = enq_seg->next;
- enq = enq_seg->trbs;
- }
}
+
return 1;
}
@@ -892,6 +872,43 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
xhci_dbg(xhci, "xHCI host controller is dead.\n");
}
+
+static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
+ struct xhci_virt_device *dev,
+ struct xhci_ring *ep_ring,
+ unsigned int ep_index)
+{
+ union xhci_trb *dequeue_temp;
+ int num_trbs_free_temp;
+ bool revert = false;
+
+ num_trbs_free_temp = ep_ring->num_trbs_free;
+ dequeue_temp = ep_ring->dequeue;
+
+ while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
+ /* We have more usable TRBs */
+ ep_ring->num_trbs_free++;
+ ep_ring->dequeue++;
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
+ ep_ring->dequeue)) {
+ if (ep_ring->dequeue ==
+ dev->eps[ep_index].queued_deq_ptr)
+ break;
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+ if (ep_ring->dequeue == dequeue_temp) {
+ revert = true;
+ break;
+ }
+ }
+
+ if (revert) {
+ xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
+ ep_ring->num_trbs_free = num_trbs_free_temp;
+ }
+}
+
/*
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
* we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -973,8 +990,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
- ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
- ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
+ update_ring_for_set_deq_completion(xhci, dev,
+ ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq "
"Ptr command & xHCI internal state.\n");
@@ -1185,7 +1202,7 @@ bandwidth_change:
xhci->error_bitmask |= 1 << 6;
break;
}
- inc_deq(xhci, xhci->cmd_ring, false);
+ inc_deq(xhci, xhci->cmd_ring);
}
static void handle_vendor_event(struct xhci_hcd *xhci,
@@ -1237,6 +1254,26 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
return num_similar_speed_ports;
}
+static void handle_device_notification(struct xhci_hcd *xhci,
+ union xhci_trb *event)
+{
+ u32 slot_id;
+ struct usb_device *udev;
+
+ slot_id = TRB_TO_SLOT_ID(event->generic.field[3]);
+ if (!xhci->devs[slot_id]) {
+ xhci_warn(xhci, "Device Notification event for "
+ "unused slot %u\n", slot_id);
+ return;
+ }
+
+ xhci_dbg(xhci, "Device Wake Notification event for slot ID %u\n",
+ slot_id);
+ udev = xhci->devs[slot_id]->udev;
+ if (udev && udev->parent)
+ usb_wakeup_notification(udev->parent, udev->portnum);
+}
+
static void handle_port_status(struct xhci_hcd *xhci,
union xhci_trb *event)
{
@@ -1321,20 +1358,21 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
if (DEV_SUPERSPEED(temp)) {
- xhci_dbg(xhci, "resume SS port %d\n", port_id);
+ xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
+ /* Set a flag to say the port signaled remote wakeup,
+ * so we can tell the difference between the end of
+ * device and host initiated resume.
+ */
+ bus_state->port_remote_wakeup |= 1 << faked_port_index;
+ xhci_test_and_clear_bit(xhci, port_array,
+ faked_port_index, PORT_PLC);
xhci_set_link_state(xhci, port_array, faked_port_index,
XDEV_U0);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- faked_port_index + 1);
- if (!slot_id) {
- xhci_dbg(xhci, "slot_id is zero\n");
- goto cleanup;
- }
- xhci_ring_device(xhci, slot_id);
- xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- /* Clear PORT_PLC */
- xhci_test_and_clear_bit(xhci, port_array,
- faked_port_index, PORT_PLC);
+ /* Need to wait until the next link state change
+ * indicates the device is actually in U0.
+ */
+ bogus_port_status = true;
+ goto cleanup;
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
@@ -1345,13 +1383,39 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
+ if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
+ DEV_SUPERSPEED(temp)) {
+ xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
+ /* We've just brought the device into U0 through either the
+ * Resume state after a device remote wakeup, or through the
+ * U3Exit state after a host-initiated resume. If it's a device
+ * initiated remote wake, don't pass up the link state change,
+ * so the roothub behavior is consistent with external
+ * USB 3.0 hub behavior.
+ */
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ faked_port_index + 1);
+ if (slot_id && xhci->devs[slot_id])
+ xhci_ring_device(xhci, slot_id);
+ if (bus_state->port_remote_wakeup && (1 << faked_port_index)) {
+ bus_state->port_remote_wakeup &=
+ ~(1 << faked_port_index);
+ xhci_test_and_clear_bit(xhci, port_array,
+ faked_port_index, PORT_PLC);
+ usb_wakeup_notification(hcd->self.root_hub,
+ faked_port_index + 1);
+ bogus_port_status = true;
+ goto cleanup;
+ }
+ }
+
if (hcd->speed != HCD_USB3)
xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
PORT_PLC);
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
- inc_deq(xhci, xhci->event_ring, true);
+ inc_deq(xhci, xhci->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
@@ -1546,8 +1610,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring, false);
- inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring);
+ inc_deq(xhci, ep_ring);
}
td_cleanup:
@@ -1795,8 +1859,8 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring, false);
- inc_deq(xhci, ep_ring, false);
+ inc_deq(xhci, ep_ring);
+ inc_deq(xhci, ep_ring);
return finish_td(xhci, td, NULL, event, ep, status, true);
}
@@ -2183,7 +2247,7 @@ cleanup:
* Will roll back to continue process missed tds.
*/
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
- inc_deq(xhci, xhci->event_ring, true);
+ inc_deq(xhci, xhci->event_ring);
}
if (ret) {
@@ -2277,6 +2341,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
else
update_ptrs = 0;
break;
+ case TRB_TYPE(TRB_DEV_NOTE):
+ handle_device_notification(xhci, event);
+ break;
default:
if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
TRB_TYPE(48))
@@ -2295,7 +2362,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
if (update_ptrs)
/* Update SW event ring dequeue pointer */
- inc_deq(xhci, xhci->event_ring, true);
+ inc_deq(xhci, xhci->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
@@ -2346,11 +2413,11 @@ hw_died:
/* FIXME when MSI-X is supported and there are multiple vectors */
/* Clear the MSI-X event interrupt status */
- if (hcd->irq != -1) {
+ if (hcd->irq) {
u32 irq_pending;
/* Acknowledge the PCI interrupt */
irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- irq_pending |= 0x3;
+ irq_pending |= IMAN_IP;
xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
}
@@ -2411,7 +2478,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
* prepare_transfer()?
*/
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
- bool consumer, bool more_trbs_coming, bool isoc,
+ bool more_trbs_coming,
u32 field1, u32 field2, u32 field3, u32 field4)
{
struct xhci_generic_trb *trb;
@@ -2421,7 +2488,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
trb->field[3] = cpu_to_le32(field4);
- inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
+ inc_enq(xhci, ring, more_trbs_coming);
}
/*
@@ -2429,8 +2496,10 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
* FIXME allocate segments if the ring is full.
*/
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
- u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
+ u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
+ unsigned int num_trbs_needed;
+
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
case EP_STATE_DISABLED:
@@ -2458,11 +2527,25 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
*/
return -EINVAL;
}
- if (!room_on_ring(xhci, ep_ring, num_trbs)) {
- /* FIXME allocate more room */
- xhci_err(xhci, "ERROR no room on ep ring\n");
- return -ENOMEM;
- }
+
+ while (1) {
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
+
+ if (ep_ring == xhci->cmd_ring) {
+ xhci_err(xhci, "Do not support expand command ring\n");
+ return -ENOMEM;
+ }
+
+ xhci_dbg(xhci, "ERROR no room on ep ring, "
+ "try ring expansion\n");
+ num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
+ if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
+ mem_flags)) {
+ xhci_err(xhci, "Ring expansion failed\n");
+ return -ENOMEM;
+ }
+ };
if (enqueue_is_link_trb(ep_ring)) {
struct xhci_ring *ring = ep_ring;
@@ -2474,8 +2557,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* If we're not dealing with 0.95 hardware or isoc rings
* on AMD 0.96 host, clear the chain bit.
*/
- if (!xhci_link_trb_quirk(xhci) && !(isoc &&
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
+ if (!xhci_link_trb_quirk(xhci) &&
+ !(ring->type == TYPE_ISOC &&
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
next->link.control &= cpu_to_le32(~TRB_CHAIN);
else
next->link.control |= cpu_to_le32(TRB_CHAIN);
@@ -2503,7 +2587,6 @@ static int prepare_transfer(struct xhci_hcd *xhci,
unsigned int num_trbs,
struct urb *urb,
unsigned int td_index,
- bool isoc,
gfp_t mem_flags)
{
int ret;
@@ -2521,7 +2604,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
ret = prepare_ring(xhci, ep_ring,
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
- num_trbs, isoc, mem_flags);
+ num_trbs, mem_flags);
if (ret)
return ret;
@@ -2651,7 +2734,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
- return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+ return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/*
@@ -2731,7 +2814,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, false, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (trb_buff_len < 0)
return trb_buff_len;
@@ -2819,7 +2902,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
+ queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -2901,7 +2984,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, false, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
@@ -2973,7 +3056,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
more_trbs_coming = true;
else
more_trbs_coming = false;
- queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
+ queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3030,7 +3113,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs++;
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
- num_trbs, urb, 0, false, mem_flags);
+ num_trbs, urb, 0, mem_flags);
if (ret < 0)
return ret;
@@ -3063,7 +3146,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
}
- queue_trb(xhci, ep_ring, false, true, false,
+ queue_trb(xhci, ep_ring, true,
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
TRB_LEN(8) | TRB_INTR_TARGET(0),
@@ -3083,7 +3166,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, true, false,
+ queue_trb(xhci, ep_ring, true,
lower_32_bits(urb->transfer_dma),
upper_32_bits(urb->transfer_dma),
length_field,
@@ -3099,7 +3182,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field = 0;
else
field = TRB_DIR_IN;
- queue_trb(xhci, ep_ring, false, false, false,
+ queue_trb(xhci, ep_ring, false,
0,
0,
TRB_INTR_TARGET(0),
@@ -3239,8 +3322,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
- urb->stream_id, trbs_per_td, urb, i, true,
- mem_flags);
+ urb->stream_id, trbs_per_td, urb, i, mem_flags);
if (ret < 0) {
if (i == 0)
return ret;
@@ -3310,7 +3392,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
remainder |
TRB_INTR_TARGET(0);
- queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
+ queue_trb(xhci, ep_ring, more_trbs_coming,
lower_32_bits(addr),
upper_32_bits(addr),
length_field,
@@ -3357,6 +3439,7 @@ cleanup:
ep_ring->enqueue = urb_priv->td[0]->first_trb;
ep_ring->enq_seg = urb_priv->td[0]->start_seg;
ep_ring->cycle_state = start_cycle;
+ ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
return ret;
}
@@ -3393,7 +3476,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* Do not insert any td of the urb to the ring if the check failed.
*/
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
- num_trbs, true, mem_flags);
+ num_trbs, mem_flags);
if (ret)
return ret;
@@ -3429,7 +3512,9 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
- return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
+ ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
+
+ return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
}
/**** Command Ring Operations ****/
@@ -3452,7 +3537,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
reserved_trbs++;
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
- reserved_trbs, false, GFP_ATOMIC);
+ reserved_trbs, GFP_ATOMIC);
if (ret < 0) {
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
@@ -3460,8 +3545,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
"unfailable commands failed.\n");
return ret;
}
- queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
- field3, field4 | xhci->cmd_ring->cycle_state);
+ queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
+ field4 | xhci->cmd_ring->cycle_state);
return 0;
}