1 // SPDX-License-Identifier: GPL-2.0-only
3 * Espressif Systems Wireless LAN device driver
5 * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
9 #include <linux/mutex.h>
10 #include <linux/mmc/sdio.h>
11 #include <linux/mmc/sdio_func.h>
12 #include <linux/mmc/sdio_ids.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
16 #include "esp_sdio_api.h"
18 #include "esp_bt_api.h"
19 #include <linux/kthread.h>
20 #include "esp_stats.h"
21 #include "include/esp_kernel_port.h"
23 #define MAX_WRITE_RETRIES 2
24 #define TX_MAX_PENDING_COUNT 200
25 #define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
27 #define CHECK_SDIO_RW_ERROR(ret) do { \
29 esp_err("CMD53 read/write error at %d\n", __LINE__); \
32 struct esp_sdio_context sdio_context;
33 static atomic_t tx_pending;
34 static atomic_t queue_items[MAX_PRIORITY_QUEUES];
36 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
37 struct task_struct *monitor_thread;
39 struct task_struct *tx_thread;
41 volatile u8 host_sleep;
43 static int init_context(struct esp_sdio_context *context);
44 static struct sk_buff *read_packet(struct esp_adapter *adapter);
45 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
46 /*int deinit_context(struct esp_adapter *adapter);*/
48 static const struct sdio_device_id esp_devices[] = {
49 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_1) },
50 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_2) },
54 static void esp_process_interrupt(struct esp_sdio_context *context, u32 int_status)
60 if (int_status & ESP_SLAVE_RX_NEW_PACKET_INT) {
61 esp_process_new_packet_intr(context->adapter);
65 static void esp_handle_isr(struct sdio_func *func)
67 struct esp_sdio_context *context = NULL;
78 context = sdio_get_drvdata(func);
84 int_status = kmalloc(sizeof(u32), GFP_ATOMIC);
90 /* Read interrupt status register */
91 ret = esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
92 (u8 *) int_status, sizeof(*int_status), ACQUIRE_LOCK);
93 CHECK_SDIO_RW_ERROR(ret);
95 esp_process_interrupt(context, *int_status);
97 /* Clear interrupt status */
98 ret = esp_write_reg(context, ESP_SLAVE_INT_CLR_REG,
99 (u8 *) int_status, sizeof(*int_status), ACQUIRE_LOCK);
100 CHECK_SDIO_RW_ERROR(ret);
105 int generate_slave_intr(struct esp_sdio_context *context, u8 data)
113 val = kmalloc(sizeof(u8), GFP_KERNEL);
121 ret = esp_write_reg(context, ESP_SLAVE_SCRATCH_REG_7, val,
122 sizeof(*val), ACQUIRE_LOCK);
129 static void deinit_sdio_func(struct sdio_func *func)
131 sdio_claim_host(func);
133 sdio_release_irq(func);
134 /* Disable sdio function */
135 sdio_disable_func(func);
136 sdio_release_host(func);
137 sdio_set_drvdata(func, NULL);
140 static int esp_slave_get_tx_buffer_num(struct esp_sdio_context *context, u32 *tx_num, u8 is_lock_needed)
145 len = kmalloc(sizeof(u32), GFP_KERNEL);
151 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) len, sizeof(*len), is_lock_needed);
158 *len = (*len >> 16) & ESP_TX_BUFFER_MASK;
159 *len = (*len + ESP_TX_BUFFER_MAX - context->tx_buffer_count) % ESP_TX_BUFFER_MAX;
167 static int esp_get_len_from_slave(struct esp_sdio_context *context, u32 *rx_size, u8 is_lock_needed)
173 len = kmalloc(sizeof(u32), GFP_KERNEL);
179 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
180 (u8 *) len, sizeof(*len), is_lock_needed);
187 *len &= ESP_SLAVE_LEN_MASK;
189 if (*len >= context->rx_byte_count)
190 *len = (*len + ESP_RX_BYTE_MAX - context->rx_byte_count) % ESP_RX_BYTE_MAX;
192 /* Handle a case of roll over */
193 temp = ESP_RX_BYTE_MAX - context->rx_byte_count;
196 if (*len > ESP_RX_BUFFER_SIZE) {
197 esp_info("Len from slave[%d] exceeds max [%d]\n",
198 *len, ESP_RX_BUFFER_SIZE);
209 static void flush_sdio(struct esp_sdio_context *context)
213 if (!context || !context->adapter)
217 skb = read_packet(context->adapter);
224 esp_info("Flushed %d bytes\n", skb->len);
231 static void esp_remove(struct sdio_func *func)
233 struct esp_sdio_context *context;
234 uint8_t prio_q_idx = 0;
236 context = sdio_get_drvdata(func);
238 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
240 kthread_stop(monitor_thread);
243 context->state = ESP_CONTEXT_INIT;
244 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++)
245 skb_queue_purge(&(sdio_context.tx_q[prio_q_idx]));
249 kthread_stop(tx_thread);
252 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
255 context->state = ESP_CONTEXT_DISABLED;
257 if (context->adapter) {
258 esp_remove_card(context->adapter);
260 if (context->adapter->hcidev) {
261 esp_deinit_bt(context->adapter);
267 deinit_sdio_func(context->func);
268 context->func = NULL;
270 memset(context, 0, sizeof(struct esp_sdio_context));
274 static struct esp_if_ops if_ops = {
276 .write = write_packet,
279 static int get_firmware_data(struct esp_sdio_context *context)
284 val = kmalloc(sizeof(u32), GFP_KERNEL);
290 /* Initialize rx_byte_count */
291 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
292 (u8 *) val, sizeof(*val), ACQUIRE_LOCK);
298 esp_info("Rx Pre ====== %d\n", context->rx_byte_count);
299 context->rx_byte_count = *val & ESP_SLAVE_LEN_MASK;
300 esp_info("Rx Pos ====== %d\n", context->rx_byte_count);
302 /* Initialize tx_buffer_count */
303 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) val,
304 sizeof(*val), ACQUIRE_LOCK);
311 *val = ((*val >> 16) & ESP_TX_BUFFER_MASK);
312 esp_info("Tx Pre ====== %d\n", context->tx_buffer_count);
314 if (*val >= ESP_MAX_BUF_CNT)
315 context->tx_buffer_count = (*val) - ESP_MAX_BUF_CNT;
317 context->tx_buffer_count = 0;
318 esp_info("Tx Pos ====== %d\n", context->tx_buffer_count);
324 static int init_context(struct esp_sdio_context *context)
327 uint8_t prio_q_idx = 0;
333 ret = get_firmware_data(context);
337 context->adapter = esp_get_adapter();
339 if (unlikely(!context->adapter))
340 esp_err("Failed to get adapter\n");
342 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
343 skb_queue_head_init(&(sdio_context.tx_q[prio_q_idx]));
344 atomic_set(&queue_items[prio_q_idx], 0);
347 context->adapter->if_type = ESP_IF_TYPE_SDIO;
352 static struct sk_buff *read_packet(struct esp_adapter *adapter)
354 u32 len_from_slave, data_left, len_to_read, size, num_blocks;
358 struct esp_sdio_context *context;
360 if (!adapter || !adapter->if_context) {
361 esp_err("INVALID args\n");
365 context = adapter->if_context;
367 if (!context || (context->state != ESP_CONTEXT_READY) || !context->func) {
368 esp_err("Invalid context/state\n");
372 sdio_claim_host(context->func);
374 data_left = len_to_read = len_from_slave = num_blocks = 0;
377 ret = esp_get_len_from_slave(context, &len_from_slave, LOCK_ALREADY_ACQUIRED);
379 if (ret || !len_from_slave) {
380 sdio_release_host(context->func);
384 size = ESP_BLOCK_SIZE * 4;
386 if (len_from_slave > size) {
387 esp_info("Rx large packet: %d\n", len_from_slave);
390 skb = esp_alloc_skb(len_from_slave);
393 esp_err("SKB alloc failed\n");
394 sdio_release_host(context->func);
398 skb_put(skb, len_from_slave);
401 data_left = len_from_slave;
404 num_blocks = data_left/ESP_BLOCK_SIZE;
407 if (!context->rx_byte_count) {
408 start_time = ktime_get_ns();
413 len_to_read = num_blocks * ESP_BLOCK_SIZE;
414 ret = esp_read_block(context,
415 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
416 pos, len_to_read, LOCK_ALREADY_ACQUIRED);
418 len_to_read = data_left;
419 /* 4 byte aligned length */
420 ret = esp_read_block(context,
421 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
422 pos, (len_to_read + 3) & (~3), LOCK_ALREADY_ACQUIRED);
426 esp_err("Failed to read data - %d [%u - %d]\n", ret, num_blocks, len_to_read);
429 sdio_release_host(context->func);
433 data_left -= len_to_read;
435 context->rx_byte_count += len_to_read;
436 context->rx_byte_count = context->rx_byte_count % ESP_RX_BYTE_MAX;
438 } while (data_left > 0);
440 sdio_release_host(context->func);
445 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
447 u32 max_pkt_size = ESP_RX_BUFFER_SIZE - sizeof(struct esp_payload_header);
448 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
449 struct esp_skb_cb *cb = NULL;
450 uint8_t prio = PRIO_Q_LOW;
452 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
453 esp_err("Invalid args\n");
462 if (skb->len > max_pkt_size) {
463 esp_err("Drop pkt of len[%u] > max SDIO transport len[%u]\n",
464 skb->len, max_pkt_size);
470 cb = (struct esp_skb_cb *)skb->cb;
471 if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
472 esp_tx_pause(cb->priv);
475 /* esp_err("TX Pause busy");*/
479 /* Enqueue SKB in tx_q */
480 atomic_inc(&tx_pending);
482 /* Notify to process queue */
483 if (payload_header->if_type == ESP_INTERNAL_IF)
485 else if (payload_header->if_type == ESP_HCI_IF)
490 atomic_inc(&queue_items[prio]);
491 skb_queue_tail(&(sdio_context.tx_q[prio]), skb);
496 static int is_sdio_write_buffer_available(u32 buf_needed)
498 #define BUFFER_AVAILABLE 1
499 #define BUFFER_UNAVAILABLE 0
502 static u32 buf_available;
503 struct esp_sdio_context *context = &sdio_context;
504 u8 retry = MAX_WRITE_RETRIES;
506 /*If buffer needed are less than buffer available
507 then only read for available buffer number from slave*/
508 if (buf_available < buf_needed) {
510 ret = esp_slave_get_tx_buffer_num(context, &buf_available, ACQUIRE_LOCK);
512 if (buf_available < buf_needed) {
514 /* Release SDIO and retry after delay*/
516 usleep_range(10, 50);
524 if (buf_available >= buf_needed)
525 buf_available -= buf_needed;
528 /* No buffer available at slave */
529 return BUFFER_UNAVAILABLE;
532 return BUFFER_AVAILABLE;
535 static int tx_process(void *data)
541 u32 data_left, len_to_send, pad;
542 struct sk_buff *tx_skb = NULL;
543 struct esp_adapter *adapter = (struct esp_adapter *) data;
544 struct esp_sdio_context *context = NULL;
545 struct esp_skb_cb *cb = NULL;
548 context = adapter->if_context;
550 while (!kthread_should_stop()) {
552 if (context->state != ESP_CONTEXT_READY) {
554 esp_err("not ready");
559 /* TODO: Use wait_event_interruptible_timeout */
564 if (atomic_read(&queue_items[PRIO_Q_HIGH]) > 0) {
565 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_HIGH]));
569 atomic_dec(&queue_items[PRIO_Q_HIGH]);
570 } else if (atomic_read(&queue_items[PRIO_Q_MID]) > 0) {
571 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_MID]));
575 atomic_dec(&queue_items[PRIO_Q_MID]);
576 } else if (atomic_read(&queue_items[PRIO_Q_LOW]) > 0) {
577 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_LOW]));
581 atomic_dec(&queue_items[PRIO_Q_LOW]);
584 esp_err("not ready 2 [%d %d]\n",
585 atomic_read(&queue_items[PRIO_Q_OTHERS]),
586 atomic_read(&queue_items[PRIO_Q_SERIAL]));
592 if (atomic_read(&tx_pending))
593 atomic_dec(&tx_pending);
595 retry = MAX_WRITE_RETRIES;
597 /* resume network tx queue if bearable load */
598 cb = (struct esp_skb_cb *)tx_skb->cb;
599 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
600 esp_tx_resume(cb->priv);
602 esp_raw_tp_queue_resume();
606 buf_needed = (tx_skb->len + ESP_RX_BUFFER_SIZE - 1) / ESP_RX_BUFFER_SIZE;
608 /*If SDIO slave buffer is available to write then only write data
609 else wait till buffer is available*/
610 ret = is_sdio_write_buffer_available(buf_needed);
612 dev_kfree_skb(tx_skb);
617 data_left = len_to_send = 0;
619 data_left = tx_skb->len;
620 pad = ESP_BLOCK_SIZE - (data_left % ESP_BLOCK_SIZE);
625 block_cnt = data_left / ESP_BLOCK_SIZE;
626 len_to_send = data_left;
627 ret = esp_write_block(context, ESP_SLAVE_CMD53_END_ADDR - len_to_send,
628 pos, (len_to_send + 3) & (~3), ACQUIRE_LOCK);
631 esp_err("Failed to send data: %d %d %d\n", ret, len_to_send, data_left);
635 data_left -= len_to_send;
640 /* drop the packet */
641 dev_kfree_skb(tx_skb);
645 context->tx_buffer_count += buf_needed;
646 context->tx_buffer_count = context->tx_buffer_count % ESP_TX_BUFFER_MAX;
648 dev_kfree_skb(tx_skb);
656 static struct esp_sdio_context *init_sdio_func(struct sdio_func *func)
658 struct esp_sdio_context *context = NULL;
664 context = &sdio_context;
666 context->func = func;
668 sdio_claim_host(func);
670 /* Enable Function */
671 ret = sdio_enable_func(func);
677 ret = sdio_claim_irq(func, esp_handle_isr);
679 sdio_disable_func(func);
683 /* Set private data */
684 sdio_set_drvdata(func, context);
686 context->state = ESP_CONTEXT_INIT;
688 sdio_release_host(func);
693 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
694 static int monitor_process(void *data)
696 u32 val, intr, len_reg, rdata, old_len = 0;
697 struct esp_sdio_context *context = (struct esp_sdio_context *) data;
700 while (!kthread_should_stop()) {
703 val = intr = len_reg = rdata = 0;
705 esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
706 (u8 *) &val, sizeof(val), ACQUIRE_LOCK);
708 len_reg = val & ESP_SLAVE_LEN_MASK;
711 esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) &val,
712 sizeof(val), ACQUIRE_LOCK);
714 rdata = ((val >> 16) & ESP_TX_BUFFER_MASK);
716 esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
717 (u8 *) &intr, sizeof(intr), ACQUIRE_LOCK);
720 if (len_reg > context->rx_byte_count) {
721 if (old_len && (context->rx_byte_count == old_len)) {
722 esp_dbg("Monitor thread ----> [%d - %d] [%d - %d] %d\n",
723 len_reg, context->rx_byte_count,
724 rdata, context->tx_buffer_count, intr);
726 skb = read_packet(context->adapter);
732 esp_dbg("Flushed %d bytes\n", skb->len);
734 /* drop the packet */
740 old_len = context->rx_byte_count;
748 static int esp_probe(struct sdio_func *func,
749 const struct sdio_device_id *id)
751 struct esp_sdio_context *context = NULL;
754 if (func->num != 1) {
758 esp_info("ESP network device detected\n");
760 context = init_sdio_func(func);
766 if (sdio_context.sdio_clk_mhz) {
767 struct mmc_host *host = func->card->host;
768 u32 hz = sdio_context.sdio_clk_mhz * NUMBER_1M;
769 /* Expansion of mmc_set_clock that isnt exported */
770 if (hz < host->f_min)
772 if (hz > host->f_max)
774 host->ios.clock = hz;
775 host->ops->set_ios(host, &host->ios);
778 context->state = ESP_CONTEXT_READY;
779 atomic_set(&tx_pending, 0);
780 ret = init_context(context);
782 deinit_sdio_func(func);
786 tx_thread = kthread_run(tx_process, context->adapter, "esp32_TX");
789 esp_err("Failed to create esp32_sdio TX thread\n");
791 context->adapter->dev = &func->dev;
792 generate_slave_intr(context, BIT(ESP_OPEN_DATA_PATH));
795 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
796 monitor_thread = kthread_run(monitor_process, context, "Monitor process");
799 esp_err("Failed to create monitor thread\n");
805 static int esp_suspend(struct device *dev)
807 struct sdio_func *func = NULL;
808 struct esp_sdio_context *context = NULL;
811 esp_info("Failed to inform ESP that host is suspending\n");
815 func = dev_to_sdio_func(dev);
817 esp_info("----> Host Suspend\n");
820 context = sdio_get_drvdata(func);
823 esp_info("Failed to inform ESP that host is suspending\n");
829 generate_slave_intr(context, BIT(ESP_POWER_SAVE_ON));
832 sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
834 /* Enale OOB IRQ and host wake up */
835 enable_irq(SDIO_OOB_IRQ);
836 enable_irq_wake(SDIO_OOB_IRQ);
841 static int esp_resume(struct device *dev)
843 struct sdio_func *func = NULL;
844 struct esp_sdio_context *context = NULL;
847 esp_info("Failed to inform ESP that host is awake\n");
851 func = dev_to_sdio_func(dev);
853 esp_info("-----> Host Awake\n");
855 /* Host woke up.. Disable OOB IRQ */
856 disable_irq_wake(SDIO_OOB_IRQ);
857 disable_irq(SDIO_OOB_IRQ);
861 context = sdio_get_drvdata(func);
864 esp_info("Failed to inform ESP that host is awake\n");
868 /* generate_slave_intr(context, BIT(ESP_RESET));*/
869 get_firmware_data(context);
871 generate_slave_intr(context, BIT(ESP_POWER_SAVE_OFF));
876 static const struct dev_pm_ops esp_pm_ops = {
877 .suspend = esp_suspend,
878 .resume = esp_resume,
881 static const struct of_device_id esp_sdio_of_match[] = {
882 { .compatible = "espressif,esp_sdio", },
885 MODULE_DEVICE_TABLE(of, esp_sdio_of_match);
887 /* SDIO driver structure to be registered with kernel */
888 static struct sdio_driver esp_sdio_driver = {
890 .id_table = esp_devices,
892 .remove = esp_remove,
895 .owner = THIS_MODULE,
897 .of_match_table = esp_sdio_of_match,
901 int esp_init_interface_layer(struct esp_adapter *adapter, u32 speed)
906 adapter->if_context = &sdio_context;
907 adapter->if_ops = &if_ops;
908 sdio_context.adapter = adapter;
909 sdio_context.sdio_clk_mhz = speed;
911 return sdio_register_driver(&esp_sdio_driver);
914 void process_event_esp_bootup(struct esp_adapter *adapter, u8 *evt_buf, u8 len)
916 u8 len_left = len, tag_len;
918 struct esp_sdio_context *context = &sdio_context;
929 tag_len = *(pos + 1);
931 esp_info("EVENT: %d\n", *pos);
933 if (*pos == ESP_BOOTUP_CAPABILITY) {
935 adapter->capabilities = *(pos + 2);
936 process_capabilities(adapter);
937 print_capabilities(*(pos + 2));
939 } else if (*pos == ESP_BOOTUP_FIRMWARE_CHIP_ID) {
941 esp_info("ESP chipset detected [%s]\n",
942 *(pos+2) == ESP_FIRMWARE_CHIP_ESP32 ? "esp32" :
943 *(pos+2) == ESP_FIRMWARE_CHIP_ESP32S2 ? "esp32-s2" :
944 *(pos+2) == ESP_FIRMWARE_CHIP_ESP32S3 ? "esp32-s3" :
945 *(pos+2) == ESP_FIRMWARE_CHIP_ESP32C2 ? "esp32-c2" :
946 *(pos+2) == ESP_FIRMWARE_CHIP_ESP32C3 ? "esp32-c3" :
947 *(pos+2) == ESP_FIRMWARE_CHIP_ESP32C6 ? "esp32-c6" :
950 if ((*(pos+2) != ESP_FIRMWARE_CHIP_ESP32) && (*(pos+2) != ESP_FIRMWARE_CHIP_ESP32C6))
951 esp_err("SDIO is only supported with ESP32/ESP32C6\n");
953 } else if (*pos == ESP_BOOTUP_TEST_RAW_TP) {
954 process_test_capabilities(*(pos + 2));
956 } else if (*pos == ESP_BOOTUP_FW_DATA) {
958 if (tag_len != sizeof(struct fw_data))
959 esp_info("Length not matching to firmware data size\n");
961 if (process_fw_data((struct fw_data *)(pos + 2)))
963 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
968 esp_warn("Unsupported tag in event");
972 len_left -= (tag_len+2);
975 if (esp_add_card(adapter)) {
976 esp_err("network iterface init failed\n");
977 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
981 void esp_deinit_interface_layer(void)
983 sdio_unregister_driver(&esp_sdio_driver);