|
39 | 39 | #define DWC2_DEBUG 2
|
40 | 40 |
|
41 | 41 | #include "device/dcd.h"
|
| 42 | +#include "device/usbd_pvt.h" |
42 | 43 | #include "dwc2_common.h"
|
43 | 44 |
|
44 | 45 | //--------------------------------------------------------------------+
|
@@ -52,6 +53,7 @@ typedef struct {
|
52 | 53 | uint8_t interval;
|
53 | 54 | } xfer_ctl_t;
|
54 | 55 |
|
| 56 | +// This variable is modified from ISR context, so it must be protected by critical section |
55 | 57 | static xfer_ctl_t xfer_status[DWC2_EP_MAX][2];
|
56 | 58 | #define XFER_CTL_BASE(_ep, _dir) (&xfer_status[_ep][_dir])
|
57 | 59 |
|
@@ -343,6 +345,9 @@ static void edpt_disable(uint8_t rhport, uint8_t ep_addr, bool stall) {
|
343 | 345 | }
|
344 | 346 | }
|
345 | 347 |
|
| 348 | +// Since this function returns void, it is not possible to return a boolean success message |
| 349 | +// We must make sure that this function is not called when the EP is disabled |
| 350 | +// Must be called from critical section |
346 | 351 | static void edpt_schedule_packets(uint8_t rhport, const uint8_t epnum, const uint8_t dir) {
|
347 | 352 | dwc2_regs_t* dwc2 = DWC2_REG(rhport);
|
348 | 353 | xfer_ctl_t* const xfer = XFER_CTL_BASE(epnum, dir);
|
@@ -553,6 +558,8 @@ void dcd_edpt_close_all(uint8_t rhport) {
|
553 | 558 | dwc2_regs_t* dwc2 = DWC2_REG(rhport);
|
554 | 559 | uint8_t const ep_count = _dwc2_controller[rhport].ep_count;
|
555 | 560 |
|
| 561 | +usbd_spin_lock(false); |
| 562 | + |
556 | 563 | _dcd_data.allocated_epin_count = 0;
|
557 | 564 |
|
558 | 565 | // Disable non-control interrupt
|
@@ -574,8 +581,9 @@ void dcd_edpt_close_all(uint8_t rhport) {
|
574 | 581 |
|
575 | 582 | dfifo_flush_tx(dwc2, 0x10); // all tx fifo
|
576 | 583 | dfifo_flush_rx(dwc2);
|
577 |
| - |
578 | 584 | dfifo_device_init(rhport); // re-init dfifo
|
| 585 | + |
| 586 | +usbd_spin_unlock(false); |
579 | 587 | }
|
580 | 588 |
|
581 | 589 | bool dcd_edpt_iso_alloc(uint8_t rhport, uint8_t ep_addr, uint16_t largest_packet_size) {
|
@@ -593,21 +601,31 @@ bool dcd_edpt_iso_activate(uint8_t rhport, tusb_desc_endpoint_t const * p_endpo
|
593 | 601 | bool dcd_edpt_xfer(uint8_t rhport, uint8_t ep_addr, uint8_t* buffer, uint16_t total_bytes) {
|
594 | 602 | uint8_t const epnum = tu_edpt_number(ep_addr);
|
595 | 603 | uint8_t const dir = tu_edpt_dir(ep_addr);
|
596 |
| - |
597 | 604 | xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, dir);
|
598 |
| -xfer->buffer = buffer; |
599 |
| -xfer->ff = NULL; |
600 |
| -xfer->total_len = total_bytes; |
| 605 | +bool ret; |
601 | 606 |
|
602 |
| -// EP0 can only handle one packet |
603 |
| -if (epnum == 0) { |
604 |
| -_dcd_data.ep0_pending[dir] = total_bytes; |
| 607 | +usbd_spin_lock(false); |
| 608 | + |
| 609 | +if (xfer->max_size == 0) { |
| 610 | +ret = false; // Endpoint is closed |
| 611 | +} else { |
| 612 | +xfer->buffer = buffer; |
| 613 | +xfer->ff = NULL; |
| 614 | +xfer->total_len = total_bytes; |
| 615 | + |
| 616 | +// EP0 can only handle one packet |
| 617 | +if (epnum == 0) { |
| 618 | +_dcd_data.ep0_pending[dir] = total_bytes; |
| 619 | +} |
| 620 | + |
| 621 | +// Schedule packets to be sent within interrupt |
| 622 | +edpt_schedule_packets(rhport, epnum, dir); |
| 623 | +ret = true; |
605 | 624 | }
|
606 | 625 |
|
607 |
| -// Schedule packets to be sent within interrupt |
608 |
| -edpt_schedule_packets(rhport, epnum, dir); |
| 626 | +usbd_spin_unlock(false); |
609 | 627 |
|
610 |
| -return true; |
| 628 | +return ret; |
611 | 629 | }
|
612 | 630 |
|
613 | 631 | // The number of bytes has to be given explicitly to allow more flexible control of how many
|
@@ -620,17 +638,27 @@ bool dcd_edpt_xfer_fifo(uint8_t rhport, uint8_t ep_addr, tu_fifo_t* ff, uint16_t
|
620 | 638 |
|
621 | 639 | uint8_t const epnum = tu_edpt_number(ep_addr);
|
622 | 640 | uint8_t const dir = tu_edpt_dir(ep_addr);
|
623 |
| - |
624 | 641 | xfer_ctl_t* xfer = XFER_CTL_BASE(epnum, dir);
|
625 |
| -xfer->buffer = NULL; |
626 |
| -xfer->ff = ff; |
627 |
| -xfer->total_len = total_bytes; |
| 642 | +bool ret; |
628 | 643 |
|
629 |
| -// Schedule packets to be sent within interrupt |
630 |
| -// TODO xfer fifo may only available for slave mode |
631 |
| -edpt_schedule_packets(rhport, epnum, dir); |
| 644 | +usbd_spin_lock(false); |
632 | 645 |
|
633 |
| -return true; |
| 646 | +if (xfer->max_size == 0) { |
| 647 | +ret = false; // Endpoint is closed |
| 648 | +} else { |
| 649 | +xfer->buffer = NULL; |
| 650 | +xfer->ff = ff; |
| 651 | +xfer->total_len = total_bytes; |
| 652 | + |
| 653 | +// Schedule packets to be sent within interrupt |
| 654 | +// TODO xfer fifo may only available for slave mode |
| 655 | +edpt_schedule_packets(rhport, epnum, dir); |
| 656 | +ret = true; |
| 657 | +} |
| 658 | + |
| 659 | +usbd_spin_unlock(false); |
| 660 | + |
| 661 | +return ret; |
634 | 662 | }
|
635 | 663 |
|
636 | 664 | void dcd_edpt_stall(uint8_t rhport, uint8_t ep_addr) {
|
@@ -657,6 +685,7 @@ void dcd_edpt_clear_stall(uint8_t rhport, uint8_t ep_addr) {
|
657 | 685 | //--------------------------------------------------------------------
|
658 | 686 |
|
659 | 687 | // 7.4.1 Initialization on USB Reset
|
| 688 | +// Must be called from critical section |
660 | 689 | static void handle_bus_reset(uint8_t rhport) {
|
661 | 690 | dwc2_regs_t *dwc2 = DWC2_REG(rhport);
|
662 | 691 | const uint8_t ep_count = dwc2_ep_count(dwc2);
|
@@ -1009,7 +1038,6 @@ static void handle_ep_irq(uint8_t rhport, uint8_t dir) {
|
1009 | 1038 | */
|
1010 | 1039 | void dcd_int_handler(uint8_t rhport) {
|
1011 | 1040 | dwc2_regs_t* dwc2 = DWC2_REG(rhport);
|
1012 |
| - |
1013 | 1041 | const uint32_t gintmask = dwc2->gintmsk;
|
1014 | 1042 | const uint32_t gintsts = dwc2->gintsts & gintmask;
|
1015 | 1043 |
|
@@ -1019,7 +1047,10 @@ void dcd_int_handler(uint8_t rhport) {
|
1019 | 1047 | #if TU_CHECK_MCU(OPT_MCU_ESP32S2, OPT_MCU_ESP32S3)
|
1020 | 1048 | _allocated_fifos = 1;
|
1021 | 1049 | #endif
|
| 1050 | + |
| 1051 | +usbd_spin_lock(true); |
1022 | 1052 | handle_bus_reset(rhport);
|
| 1053 | +usbd_spin_unlock(true); |
1023 | 1054 | }
|
1024 | 1055 |
|
1025 | 1056 | if (gintsts & GINTSTS_ENUMDNE) {
|
|
0 commit comments