2727 */
2828
2929#include "tusb_option.h"
30+ #include "common/tusb_fifo.h"
3031
3132#if CFG_TUSB_MCU == OPT_MCU_ESP32S2 && TUSB_OPT_DEVICE_ENABLED
3233
5960
6061typedef struct {
6162 uint8_t * buffer ;
63+ // tu_fifo_t * ff; // TODO support dcd_edpt_xfer_fifo API
6264 uint16_t total_len ;
6365 uint16_t queued_len ;
6466 uint16_t max_size ;
@@ -371,6 +373,7 @@ bool dcd_edpt_xfer(uint8_t rhport, uint8_t ep_addr, uint8_t *buffer, uint16_t to
371373
372374 xfer_ctl_t * xfer = XFER_CTL_BASE (epnum , dir );
373375 xfer -> buffer = buffer ;
376+ // xfer->ff = NULL; // TODO support dcd_edpt_xfer_fifo API
374377 xfer -> total_len = total_bytes ;
375378 xfer -> queued_len = 0 ;
376379 xfer -> short_packet = false;
@@ -406,6 +409,56 @@ bool dcd_edpt_xfer(uint8_t rhport, uint8_t ep_addr, uint8_t *buffer, uint16_t to
406409 return true;
407410}
408411
412+ #if 0 // TODO support dcd_edpt_xfer_fifo API
413+ bool dcd_edpt_xfer_fifo (uint8_t rhport , uint8_t ep_addr , tu_fifo_t * ff , uint16_t total_bytes )
414+ {
415+ (void )rhport ;
416+
417+ // USB buffers always work in bytes so to avoid unnecessary divisions we demand item_size = 1
418+ TU_ASSERT (ff -> item_size == 1 );
419+
420+ uint8_t const epnum = tu_edpt_number (ep_addr );
421+ uint8_t const dir = tu_edpt_dir (ep_addr );
422+
423+ xfer_ctl_t * xfer = XFER_CTL_BASE (epnum , dir );
424+ xfer -> buffer = NULL ;
425+ xfer -> ff = ff ;
426+ xfer -> total_len = total_bytes ;
427+ xfer -> queued_len = 0 ;
428+ xfer -> short_packet = false;
429+
430+ uint16_t num_packets = (total_bytes / xfer -> max_size );
431+ uint8_t short_packet_size = total_bytes % xfer -> max_size ;
432+
433+ // Zero-size packet is special case.
434+ if (short_packet_size > 0 || (total_bytes == 0 )) {
435+ num_packets ++ ;
436+ }
437+
438+ ESP_LOGV (TAG , "Transfer <-> EP%i, %s, pkgs: %i, bytes: %i" ,
439+ epnum , ((dir == TUSB_DIR_IN ) ? "USB0.HOST (in)" : "HOST->DEV (out)" ),
440+ num_packets , total_bytes );
441+
442+ // IN and OUT endpoint xfers are interrupt-driven, we just schedule them
443+ // here.
444+ if (dir == TUSB_DIR_IN ) {
445+ // A full IN transfer (multiple packets, possibly) triggers XFRC.
446+ USB0 .in_ep_reg [epnum ].dieptsiz = (num_packets << USB_D_PKTCNT0_S ) | total_bytes ;
447+ USB0 .in_ep_reg [epnum ].diepctl |= USB_D_EPENA1_M | USB_D_CNAK1_M ; // Enable | CNAK
448+
449+ // Enable fifo empty interrupt only if there are something to put in the fifo.
450+ if (total_bytes != 0 ) {
451+ USB0 .dtknqr4_fifoemptymsk |= (1 << epnum );
452+ }
453+ } else {
454+ // Each complete packet for OUT xfers triggers XFRC.
455+ USB0 .out_ep_reg [epnum ].doeptsiz |= USB_PKTCNT0_M | ((xfer -> max_size & USB_XFERSIZE0_V ) << USB_XFERSIZE0_S );
456+ USB0 .out_ep_reg [epnum ].doepctl |= USB_EPENA0_M | USB_CNAK0_M ;
457+ }
458+ return true;
459+ }
460+ #endif
461+
409462void dcd_edpt_stall (uint8_t rhport , uint8_t ep_addr )
410463{
411464 (void )rhport ;
@@ -514,35 +567,46 @@ static void receive_packet(xfer_ctl_t *xfer, /* usb_out_endpoint_t * out_ep, */
514567 to_recv_size = (xfer_size > xfer -> max_size ) ? xfer -> max_size : xfer_size ;
515568 }
516569
517- uint8_t to_recv_rem = to_recv_size % 4 ;
518- uint16_t to_recv_size_aligned = to_recv_size - to_recv_rem ;
519-
520- // Do not assume xfer buffer is aligned.
521- uint8_t * base = (xfer -> buffer + xfer -> queued_len );
522-
523- // This for loop always runs at least once- skip if less than 4 bytes
524- // to collect.
525- if (to_recv_size >= 4 ) {
526- for (uint16_t i = 0 ; i < to_recv_size_aligned ; i += 4 ) {
527- uint32_t tmp = (* rx_fifo );
528- base [i ] = tmp & 0x000000FF ;
529- base [i + 1 ] = (tmp & 0x0000FF00 ) >> 8 ;
530- base [i + 2 ] = (tmp & 0x00FF0000 ) >> 16 ;
531- base [i + 3 ] = (tmp & 0xFF000000 ) >> 24 ;
532- }
570+ // Common buffer read
571+ #if 0 // TODO support dcd_edpt_xfer_fifo API
572+ if (xfer -> ff )
573+ {
574+ // Ring buffer
575+ tu_fifo_write_n_const_addr_full_words (xfer -> ff , (const void * ) rx_fifo , to_recv_size );
533576 }
577+ else
578+ #endif
579+ {
580+ uint8_t to_recv_rem = to_recv_size % 4 ;
581+ uint16_t to_recv_size_aligned = to_recv_size - to_recv_rem ;
582+
583+ // Do not assume xfer buffer is aligned.
584+ uint8_t * base = (xfer -> buffer + xfer -> queued_len );
585+
586+ // This for loop always runs at least once- skip if less than 4 bytes
587+ // to collect.
588+ if (to_recv_size >= 4 ) {
589+ for (uint16_t i = 0 ; i < to_recv_size_aligned ; i += 4 ) {
590+ uint32_t tmp = (* rx_fifo );
591+ base [i ] = tmp & 0x000000FF ;
592+ base [i + 1 ] = (tmp & 0x0000FF00 ) >> 8 ;
593+ base [i + 2 ] = (tmp & 0x00FF0000 ) >> 16 ;
594+ base [i + 3 ] = (tmp & 0xFF000000 ) >> 24 ;
595+ }
596+ }
534597
535- // Do not read invalid bytes from RX FIFO.
536- if (to_recv_rem != 0 ) {
537- uint32_t tmp = (* rx_fifo );
538- uint8_t * last_32b_bound = base + to_recv_size_aligned ;
598+ // Do not read invalid bytes from RX FIFO.
599+ if (to_recv_rem != 0 ) {
600+ uint32_t tmp = (* rx_fifo );
601+ uint8_t * last_32b_bound = base + to_recv_size_aligned ;
539602
540- last_32b_bound [0 ] = tmp & 0x000000FF ;
541- if (to_recv_rem > 1 ) {
542- last_32b_bound [1 ] = (tmp & 0x0000FF00 ) >> 8 ;
543- }
544- if (to_recv_rem > 2 ) {
545- last_32b_bound [2 ] = (tmp & 0x00FF0000 ) >> 16 ;
603+ last_32b_bound [0 ] = tmp & 0x000000FF ;
604+ if (to_recv_rem > 1 ) {
605+ last_32b_bound [1 ] = (tmp & 0x0000FF00 ) >> 8 ;
606+ }
607+ if (to_recv_rem > 2 ) {
608+ last_32b_bound [2 ] = (tmp & 0x00FF0000 ) >> 16 ;
609+ }
546610 }
547611 }
548612
@@ -562,37 +626,47 @@ static void transmit_packet(xfer_ctl_t *xfer, volatile usb_in_endpoint_t *in_ep,
562626 xfer -> queued_len = xfer -> total_len - remaining ;
563627
564628 uint16_t to_xfer_size = (remaining > xfer -> max_size ) ? xfer -> max_size : remaining ;
565- uint8_t to_xfer_rem = to_xfer_size % 4 ;
566- uint16_t to_xfer_size_aligned = to_xfer_size - to_xfer_rem ;
567-
568- // Buffer might not be aligned to 32b, so we need to force alignment
569- // by copying to a temp var.
570- uint8_t * base = (xfer -> buffer + xfer -> queued_len );
571-
572- // This for loop always runs at least once- skip if less than 4 bytes
573- // to send off.
574- if (to_xfer_size >= 4 ) {
575- for (uint16_t i = 0 ; i < to_xfer_size_aligned ; i += 4 ) {
576- uint32_t tmp = base [i ] | (base [i + 1 ] << 8 ) |
577- (base [i + 2 ] << 16 ) | (base [i + 3 ] << 24 );
578- (* tx_fifo ) = tmp ;
579- }
629+
630+ #if 0 // TODO support dcd_edpt_xfer_fifo API
631+ if (xfer -> ff )
632+ {
633+ tu_fifo_read_n_const_addr_full_words (xfer -> ff , (void * ) tx_fifo , to_xfer_size );
580634 }
635+ else
636+ #endif
637+ {
638+ uint8_t to_xfer_rem = to_xfer_size % 4 ;
639+ uint16_t to_xfer_size_aligned = to_xfer_size - to_xfer_rem ;
640+
641+ // Buffer might not be aligned to 32b, so we need to force alignment
642+ // by copying to a temp var.
643+ uint8_t * base = (xfer -> buffer + xfer -> queued_len );
644+
645+ // This for loop always runs at least once- skip if less than 4 bytes
646+ // to send off.
647+ if (to_xfer_size >= 4 ) {
648+ for (uint16_t i = 0 ; i < to_xfer_size_aligned ; i += 4 ) {
649+ uint32_t tmp = base [i ] | (base [i + 1 ] << 8 ) |
650+ (base [i + 2 ] << 16 ) | (base [i + 3 ] << 24 );
651+ (* tx_fifo ) = tmp ;
652+ }
653+ }
581654
582- // Do not read beyond end of buffer if not divisible by 4.
583- if (to_xfer_rem != 0 ) {
584- uint32_t tmp = 0 ;
585- uint8_t * last_32b_bound = base + to_xfer_size_aligned ;
655+ // Do not read beyond end of buffer if not divisible by 4.
656+ if (to_xfer_rem != 0 ) {
657+ uint32_t tmp = 0 ;
658+ uint8_t * last_32b_bound = base + to_xfer_size_aligned ;
586659
587- tmp |= last_32b_bound [0 ];
588- if (to_xfer_rem > 1 ) {
589- tmp |= (last_32b_bound [1 ] << 8 );
590- }
591- if (to_xfer_rem > 2 ) {
592- tmp |= (last_32b_bound [2 ] << 16 );
593- }
660+ tmp |= last_32b_bound [0 ];
661+ if (to_xfer_rem > 1 ) {
662+ tmp |= (last_32b_bound [1 ] << 8 );
663+ }
664+ if (to_xfer_rem > 2 ) {
665+ tmp |= (last_32b_bound [2 ] << 16 );
666+ }
594667
595- (* tx_fifo ) = tmp ;
668+ (* tx_fifo ) = tmp ;
669+ }
596670 }
597671}
598672
0 commit comments