diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-03-25 03:53:42 -0300 |
commit | 03dd4cb26d967f9588437b0fc9cc0e8353322bb7 (patch) | |
tree | fa581f6dc1c0596391690d1f67eceef3af8246dc /drivers/usb/dwc2/hcd_ddma.c | |
parent | d4e493caf788ef44982e131ff9c786546904d934 (diff) |
Linux-libre 4.5-gnu
Diffstat (limited to 'drivers/usb/dwc2/hcd_ddma.c')
-rw-r--r-- | drivers/usb/dwc2/hcd_ddma.c | 253 |
1 files changed, 206 insertions, 47 deletions
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c index 78993aba9..a41274aa5 100644 --- a/drivers/usb/dwc2/hcd_ddma.c +++ b/drivers/usb/dwc2/hcd_ddma.c @@ -87,22 +87,31 @@ static u16 dwc2_frame_incr_val(struct dwc2_qh *qh) static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, gfp_t flags) { - qh->desc_list = dma_alloc_coherent(hsotg->dev, - sizeof(struct dwc2_hcd_dma_desc) * - dwc2_max_desc_num(qh), &qh->desc_list_dma, - flags); + struct kmem_cache *desc_cache; + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC + && qh->dev_speed == USB_SPEED_HIGH) + desc_cache = hsotg->desc_hsisoc_cache; + else + desc_cache = hsotg->desc_gen_cache; + + qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) * + dwc2_max_desc_num(qh); + + qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA); if (!qh->desc_list) return -ENOMEM; - memset(qh->desc_list, 0, - sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh)); + qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list, + qh->desc_list_sz, + DMA_TO_DEVICE); qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags); if (!qh->n_bytes) { - dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) - * dwc2_max_desc_num(qh), qh->desc_list, - qh->desc_list_dma); + dma_unmap_single(hsotg->dev, qh->desc_list_dma, + qh->desc_list_sz, + DMA_FROM_DEVICE); + kfree(qh->desc_list); qh->desc_list = NULL; return -ENOMEM; } @@ -112,10 +121,18 @@ static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { + struct kmem_cache *desc_cache; + + if (qh->ep_type == USB_ENDPOINT_XFER_ISOC + && qh->dev_speed == USB_SPEED_HIGH) + desc_cache = hsotg->desc_hsisoc_cache; + else + desc_cache = hsotg->desc_gen_cache; + if (qh->desc_list) { - dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc) - * dwc2_max_desc_num(qh), qh->desc_list, - qh->desc_list_dma); + dma_unmap_single(hsotg->dev, qh->desc_list_dma, + qh->desc_list_sz, DMA_FROM_DEVICE); + kmem_cache_free(desc_cache, qh->desc_list); qh->desc_list = NULL; } @@ -128,21 +145,20 @@ static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags) if (hsotg->frame_list) return 0; - hsotg->frame_list = dma_alloc_coherent(hsotg->dev, - 4 * FRLISTEN_64_SIZE, - &hsotg->frame_list_dma, - mem_flags); + hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE; + hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA); if (!hsotg->frame_list) return -ENOMEM; - memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE); + hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list, + hsotg->frame_list_sz, + DMA_TO_DEVICE); + return 0; } static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) { - u32 *frame_list; - dma_addr_t frame_list_dma; unsigned long flags; spin_lock_irqsave(&hsotg->lock, flags); @@ -152,14 +168,14 @@ static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg) return; } - frame_list = hsotg->frame_list; - frame_list_dma = hsotg->frame_list_dma; + dma_unmap_single(hsotg->dev, hsotg->frame_list_dma, + hsotg->frame_list_sz, DMA_FROM_DEVICE); + + kfree(hsotg->frame_list); hsotg->frame_list = NULL; spin_unlock_irqrestore(&hsotg->lock, flags); - dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list, - frame_list_dma); } static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en) @@ -249,6 +265,15 @@ static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, j = (j + inc) & (FRLISTEN_64_SIZE - 1); } while (j != i); + /* + * Sync frame list since controller will access it if periodic + * channel is currently enabled. + */ + dma_sync_single_for_device(hsotg->dev, + hsotg->frame_list_dma, + hsotg->frame_list_sz, + DMA_TO_DEVICE); + if (!enable) return; @@ -278,6 +303,7 @@ static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg, hsotg->non_periodic_channels--; } else { dwc2_update_frame_list(hsotg, qh, 0); + hsotg->available_host_channels++; } /* @@ -360,6 +386,8 @@ err0: */ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) { + unsigned long flags; + dwc2_desc_list_free(hsotg, qh); /* @@ -369,8 +397,10 @@ void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) * when it comes here from endpoint disable routine * channel remains assigned. */ + spin_lock_irqsave(&hsotg->lock, flags); if (qh->channel) dwc2_release_channel_ddma(hsotg, qh); + spin_unlock_irqrestore(&hsotg->lock, flags); if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC || qh->ep_type == USB_ENDPOINT_XFER_INT) && @@ -524,14 +554,23 @@ static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT & HOST_DMA_ISOC_NBYTES_MASK; + /* Set active bit */ + dma_desc->status |= HOST_DMA_A; + + qh->ntd++; + qtd->isoc_frame_index_last++; + #ifdef ISOC_URB_GIVEBACK_ASAP /* Set IOC for each descriptor corresponding to last frame of URB */ if (qtd->isoc_frame_index_last == qtd->urb->packet_count) dma_desc->status |= HOST_DMA_IOC; #endif - qh->ntd++; - qtd->isoc_frame_index_last++; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (idx * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, @@ -539,11 +578,32 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, { struct dwc2_qtd *qtd; u32 max_xfer_size; - u16 idx, inc, n_desc, ntd_max = 0; + u16 idx, inc, n_desc = 0, ntd_max = 0; + u16 cur_idx; + u16 next_idx; idx = qh->td_last; inc = qh->interval; - n_desc = 0; + hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg); + cur_idx = dwc2_frame_list_idx(hsotg->frame_number); + next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed); + + /* + * Ensure current frame number didn't overstep last scheduled + * descriptor. If it happens, the only way to recover is to move + * qh->td_last to current frame number + 1. + * So that next isoc descriptor will be scheduled on frame number + 1 + * and not on a past frame. + */ + if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) { + if (inc < 32) { + dev_vdbg(hsotg->dev, + "current frame number overstep last descriptor\n"); + qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc, + qh->dev_speed); + idx = qh->td_last; + } + } if (qh->interval) { ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) / @@ -556,15 +616,20 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS; list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) { + if (qtd->in_process && + qtd->isoc_frame_index_last == + qtd->urb->packet_count) + continue; + + qtd->isoc_td_first = idx; while (qh->ntd < ntd_max && qtd->isoc_frame_index_last < qtd->urb->packet_count) { - if (n_desc > 1) - qh->desc_list[n_desc - 1].status |= HOST_DMA_A; dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh, max_xfer_size, idx); idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed); n_desc++; } + qtd->isoc_td_last = idx; qtd->in_process = 1; } @@ -575,6 +640,11 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, if (qh->ntd == ntd_max) { idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); qh->desc_list[idx].status |= HOST_DMA_IOC; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + (idx * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } #else /* @@ -604,13 +674,12 @@ static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg, idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed); qh->desc_list[idx].status |= HOST_DMA_IOC; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (idx * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); #endif - - if (n_desc) { - qh->desc_list[n_desc - 1].status |= HOST_DMA_A; - if (n_desc > 1) - qh->desc_list[0].status |= HOST_DMA_A; - } } static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, @@ -647,6 +716,12 @@ static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg, dma_desc->buf = (u32)chan->xfer_dma; + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + (n_desc * sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); + /* * Last (or only) descriptor of IN transfer with actual size less * than MaxPacket @@ -697,6 +772,12 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, "set A bit in desc %d (%p)\n", n_desc - 1, &qh->desc_list[n_desc - 1]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + + ((n_desc - 1) * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc); dev_vdbg(hsotg->dev, @@ -722,10 +803,19 @@ static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg, HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A; dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n", n_desc - 1, &qh->desc_list[n_desc - 1]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma + (n_desc - 1) * + sizeof(struct dwc2_hcd_dma_desc), + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); if (n_desc > 1) { qh->desc_list[0].status |= HOST_DMA_A; dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n", &qh->desc_list[0]); + dma_sync_single_for_device(hsotg->dev, + qh->desc_list_dma, + sizeof(struct dwc2_hcd_dma_desc), + DMA_TO_DEVICE); } chan->ntd = n_desc; } @@ -800,7 +890,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, struct dwc2_qh *qh, u16 idx) { - struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx]; + struct dwc2_hcd_dma_desc *dma_desc; struct dwc2_hcd_iso_packet_desc *frame_desc; u16 remain = 0; int rc = 0; @@ -808,6 +898,13 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg, if (!qtd->urb) return -EINVAL; + dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_FROM_DEVICE); + + dma_desc = &qh->desc_list[idx]; + frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last]; dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset); if (chan->ep_is_in) @@ -911,17 +1008,51 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) { if (!qtd->in_process) break; + + /* + * Ensure idx corresponds to descriptor where first urb of this + * qtd was added. In fact, during isoc desc init, dwc2 may skip + * an index if current frame number is already over this index. + */ + if (idx != qtd->isoc_td_first) { + dev_vdbg(hsotg->dev, + "try to complete %d instead of %d\n", + idx, qtd->isoc_td_first); + idx = qtd->isoc_td_first; + } + do { + struct dwc2_qtd *qtd_next; + u16 cur_idx; + rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh, idx); if (rc < 0) return; idx = dwc2_desclist_idx_inc(idx, qh->interval, chan->speed); - if (rc == DWC2_CMPL_STOP) - goto stop_scan; + if (!rc) + continue; + if (rc == DWC2_CMPL_DONE) break; + + /* rc == DWC2_CMPL_STOP */ + + if (qh->interval >= 32) + goto stop_scan; + + qh->td_first = idx; + cur_idx = dwc2_frame_list_idx(hsotg->frame_number); + qtd_next = list_first_entry(&qh->qtd_list, + struct dwc2_qtd, + qtd_list_entry); + if (dwc2_frame_idx_num_gt(cur_idx, + qtd_next->isoc_td_last)) + break; + + goto stop_scan; + } while (idx != qh->td_first); } @@ -1029,6 +1160,12 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, if (!urb) return -EINVAL; + dma_sync_single_for_cpu(hsotg->dev, + qh->desc_list_dma + (desc_num * + sizeof(struct dwc2_hcd_dma_desc)), + sizeof(struct dwc2_hcd_dma_desc), + DMA_FROM_DEVICE); + dma_desc = &qh->desc_list[desc_num]; n_bytes = qh->n_bytes[desc_num]; dev_vdbg(hsotg->dev, @@ -1040,8 +1177,8 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg, if (failed || (*xfer_done && urb->status != -EINPROGRESS)) { dwc2_host_complete(hsotg, qtd, urb->status); dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); - dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", - failed, *xfer_done, urb->status); + dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n", + failed, *xfer_done); return failed; } @@ -1096,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { int i; + int qtd_desc_count; qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); xfer_done = 0; + qtd_desc_count = qtd->n_desc; - for (i = 0; i < qtd->n_desc; i++) { + for (i = 0; i < qtd_desc_count; i++) { if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, desc_num, halt_status, - &xfer_done)) { - qtd = NULL; - break; - } + &xfer_done)) + goto stop_scan; + desc_num++; } } +stop_scan: if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { /* * Resetting the data toggle for bulk and interrupt endpoints @@ -1118,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg, */ if (halt_status == DWC2_HC_XFER_STALL) qh->data_toggle = DWC2_HC_PID_DATA0; - else if (qtd) + else dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); } @@ -1165,6 +1304,21 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, /* Release the channel if halted or session completed */ if (halt_status != DWC2_HC_XFER_COMPLETE || list_empty(&qh->qtd_list)) { + struct dwc2_qtd *qtd, *qtd_tmp; + + /* + * Kill all remainings QTDs since channel has been + * halted. + */ + list_for_each_entry_safe(qtd, qtd_tmp, + &qh->qtd_list, + qtd_list_entry) { + dwc2_host_complete(hsotg, qtd, + -ECONNRESET); + dwc2_hcd_qtd_unlink_and_free(hsotg, + qtd, qh); + } + /* Halt the channel if session completed */ if (halt_status == DWC2_HC_XFER_COMPLETE) dwc2_hc_halt(hsotg, chan, halt_status); @@ -1174,7 +1328,12 @@ void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg, /* Keep in assigned schedule to continue transfer */ list_move(&qh->qh_list_entry, &hsotg->periodic_sched_assigned); - continue_isoc_xfer = 1; + /* + * If channel has been halted during giveback of urb + * then prevent any new scheduling. + */ + if (!chan->halt_status) + continue_isoc_xfer = 1; } /* * Todo: Consider the case when period exceeds FrameList size. |