2 * Espressif Systems Wireless LAN device driver
4 * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
6 * SPDX-License-Identifier: GPL-2.0-only
9 #include <linux/mutex.h>
10 #include <linux/mmc/sdio.h>
11 #include <linux/mmc/sdio_func.h>
12 #include <linux/mmc/sdio_ids.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
16 #include "esp_sdio_api.h"
18 #include "esp_bt_api.h"
19 #include <linux/kthread.h>
20 #include "esp_stats.h"
21 #include "include/esp_kernel_port.h"
23 #define MAX_WRITE_RETRIES 2
24 #define TX_MAX_PENDING_COUNT 200
25 #define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
27 #define CHECK_SDIO_RW_ERROR(ret) do { \
29 esp_err("CMD53 read/write error at %d\n", __LINE__); \
32 struct esp_sdio_context sdio_context;
33 static atomic_t tx_pending;
34 static atomic_t queue_items[MAX_PRIORITY_QUEUES];
36 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
37 struct task_struct *monitor_thread;
39 struct task_struct *tx_thread;
41 volatile u8 host_sleep = 0;
43 static int init_context(struct esp_sdio_context *context);
44 static struct sk_buff * read_packet(struct esp_adapter *adapter);
45 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
46 /*int deinit_context(struct esp_adapter *adapter);*/
48 static const struct sdio_device_id esp_devices[] = {
49 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_1) },
50 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_2) },
54 static void esp_process_interrupt(struct esp_sdio_context *context, u32 int_status)
60 if (int_status & ESP_SLAVE_RX_NEW_PACKET_INT) {
61 esp_process_new_packet_intr(context->adapter);
65 static void esp_handle_isr(struct sdio_func *func)
67 struct esp_sdio_context *context = NULL;
78 context = sdio_get_drvdata(func);
84 int_status = kmalloc(sizeof(u32), GFP_ATOMIC);
90 /* Read interrupt status register */
91 ret = esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
92 (u8 *) int_status, sizeof(* int_status), ACQUIRE_LOCK);
93 CHECK_SDIO_RW_ERROR(ret);
95 esp_process_interrupt(context, *int_status);
97 /* Clear interrupt status */
98 ret = esp_write_reg(context, ESP_SLAVE_INT_CLR_REG,
99 (u8 *) int_status, sizeof(* int_status), ACQUIRE_LOCK);
100 CHECK_SDIO_RW_ERROR(ret);
105 int generate_slave_intr(struct esp_sdio_context *context, u8 data)
113 val = kmalloc(sizeof(u8), GFP_KERNEL);
121 ret = esp_write_reg(context, ESP_SLAVE_SCRATCH_REG_7, val,
122 sizeof(*val), ACQUIRE_LOCK);
129 static void deinit_sdio_func(struct sdio_func *func)
131 sdio_claim_host(func);
133 sdio_release_irq(func);
134 /* Disable sdio function */
135 sdio_disable_func(func);
136 sdio_release_host(func);
137 sdio_set_drvdata(func, NULL);
140 static int esp_slave_get_tx_buffer_num(struct esp_sdio_context *context, u32 *tx_num, u8 is_lock_needed)
145 len = kmalloc(sizeof(u32), GFP_KERNEL);
151 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8*) len, sizeof(*len), is_lock_needed);
158 *len = (*len >> 16) & ESP_TX_BUFFER_MASK;
159 *len = (*len + ESP_TX_BUFFER_MAX - context->tx_buffer_count) % ESP_TX_BUFFER_MAX;
167 static int esp_get_len_from_slave(struct esp_sdio_context *context, u32 *rx_size, u8 is_lock_needed)
173 len = kmalloc(sizeof(u32), GFP_KERNEL);
179 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
180 (u8 *) len, sizeof(*len), is_lock_needed);
187 *len &= ESP_SLAVE_LEN_MASK;
189 if (*len >= context->rx_byte_count)
190 *len = (*len + ESP_RX_BYTE_MAX - context->rx_byte_count) % ESP_RX_BYTE_MAX;
192 /* Handle a case of roll over */
193 temp = ESP_RX_BYTE_MAX - context->rx_byte_count;
196 if (*len > ESP_RX_BUFFER_SIZE) {
197 esp_info("Len from slave[%d] exceeds max [%d]\n",
198 *len, ESP_RX_BUFFER_SIZE);
209 static void flush_sdio(struct esp_sdio_context *context)
213 if (!context || !context->adapter)
217 skb = read_packet(context->adapter);
224 esp_info("Flushed %d bytes\n", skb->len);
231 static void esp_remove(struct sdio_func *func)
233 struct esp_sdio_context *context;
234 uint8_t prio_q_idx = 0;
235 context = sdio_get_drvdata(func);
237 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
239 kthread_stop(monitor_thread);
242 context->state = ESP_CONTEXT_INIT;
243 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++)
244 skb_queue_purge(&(sdio_context.tx_q[prio_q_idx]));
248 kthread_stop(tx_thread);
251 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
254 context->state = ESP_CONTEXT_DISABLED;
256 if (context->adapter) {
257 esp_remove_card(context->adapter);
259 if (context->adapter->hcidev) {
260 esp_deinit_bt(context->adapter);
266 deinit_sdio_func(context->func);
267 context->func = NULL;
269 memset(context, 0, sizeof(struct esp_sdio_context));
273 static struct esp_if_ops if_ops = {
275 .write = write_packet,
278 static int get_firmware_data(struct esp_sdio_context *context)
283 val = kmalloc(sizeof(u32), GFP_KERNEL);
289 /* Initialize rx_byte_count */
290 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
291 (u8 *) val, sizeof(* val), ACQUIRE_LOCK);
297 esp_info("Rx Pre ====== %d\n", context->rx_byte_count);
298 context->rx_byte_count = *val & ESP_SLAVE_LEN_MASK;
299 esp_info("Rx Pos ====== %d\n", context->rx_byte_count);
301 /* Initialize tx_buffer_count */
302 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) val,
303 sizeof(* val), ACQUIRE_LOCK);
310 *val = ((*val >> 16) & ESP_TX_BUFFER_MASK);
311 esp_info("Tx Pre ====== %d\n", context->tx_buffer_count);
313 if (*val >= ESP_MAX_BUF_CNT)
314 context->tx_buffer_count = (*val) - ESP_MAX_BUF_CNT;
316 context->tx_buffer_count = 0;
317 esp_info("Tx Pos ====== %d\n", context->tx_buffer_count);
323 static int init_context(struct esp_sdio_context *context)
326 uint8_t prio_q_idx = 0;
332 ret = get_firmware_data(context);
336 context->adapter = esp_get_adapter();
338 if (unlikely(!context->adapter))
339 esp_err("Failed to get adapter\n");
341 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
342 skb_queue_head_init(&(sdio_context.tx_q[prio_q_idx]));
343 atomic_set(&queue_items[prio_q_idx], 0);
346 context->adapter->if_type = ESP_IF_TYPE_SDIO;
351 static struct sk_buff * read_packet(struct esp_adapter *adapter)
353 u32 len_from_slave, data_left, len_to_read, size, num_blocks;
357 struct esp_sdio_context *context;
359 if (!adapter || !adapter->if_context) {
360 esp_err("INVALID args\n");
364 context = adapter->if_context;
366 if(!context || (context->state != ESP_CONTEXT_READY) || !context->func) {
367 esp_err("Invalid context/state\n");
371 sdio_claim_host(context->func);
373 data_left = len_to_read = len_from_slave = num_blocks = 0;
376 ret = esp_get_len_from_slave(context, &len_from_slave, LOCK_ALREADY_ACQUIRED);
378 if (ret || !len_from_slave) {
379 sdio_release_host(context->func);
383 size = ESP_BLOCK_SIZE * 4;
385 if (len_from_slave > size) {
386 esp_info("Rx large packet: %d\n", len_from_slave);
389 skb = esp_alloc_skb(len_from_slave);
392 esp_err("SKB alloc failed\n");
393 sdio_release_host(context->func);
397 skb_put(skb, len_from_slave);
400 data_left = len_from_slave;
403 num_blocks = data_left/ESP_BLOCK_SIZE;
406 if (!context->rx_byte_count) {
407 start_time = ktime_get_ns();
412 len_to_read = num_blocks * ESP_BLOCK_SIZE;
413 ret = esp_read_block(context,
414 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
415 pos, len_to_read, LOCK_ALREADY_ACQUIRED);
417 len_to_read = data_left;
418 /* 4 byte aligned length */
419 ret = esp_read_block(context,
420 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
421 pos, (len_to_read + 3) & (~3), LOCK_ALREADY_ACQUIRED);
425 esp_err("Failed to read data - %d [%u - %d]\n", ret, num_blocks, len_to_read);
428 sdio_release_host(context->func);
432 data_left -= len_to_read;
434 context->rx_byte_count += len_to_read;
435 context->rx_byte_count = context->rx_byte_count % ESP_RX_BYTE_MAX;
437 } while (data_left > 0);
439 sdio_release_host(context->func);
444 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
446 u32 max_pkt_size = ESP_RX_BUFFER_SIZE - sizeof(struct esp_payload_header);
447 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
448 struct esp_skb_cb * cb = NULL;
449 uint8_t prio = PRIO_Q_LOW;
451 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
452 esp_err("Invalid args\n");
461 if (skb->len > max_pkt_size) {
462 esp_err("Drop pkt of len[%u] > max SDIO transport len[%u]\n",
463 skb->len, max_pkt_size);
469 cb = (struct esp_skb_cb *)skb->cb;
470 if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
471 esp_tx_pause(cb->priv);
474 /* esp_err("TX Pause busy");*/
478 /* Enqueue SKB in tx_q */
479 atomic_inc(&tx_pending);
481 /* Notify to process queue */
482 if (payload_header->if_type == ESP_INTERNAL_IF)
484 else if (payload_header->if_type == ESP_HCI_IF)
489 atomic_inc(&queue_items[prio]);
490 skb_queue_tail(&(sdio_context.tx_q[prio]), skb);
495 static int is_sdio_write_buffer_available(u32 buf_needed)
497 #define BUFFER_AVAILABLE 1
498 #define BUFFER_UNAVAILABLE 0
501 static u32 buf_available = 0;
502 struct esp_sdio_context *context = &sdio_context;
503 u8 retry = MAX_WRITE_RETRIES;
505 /*If buffer needed are less than buffer available
506 then only read for available buffer number from slave*/
507 if (buf_available < buf_needed) {
509 ret = esp_slave_get_tx_buffer_num(context, &buf_available, ACQUIRE_LOCK);
511 if (buf_available < buf_needed) {
513 /* Release SDIO and retry after delay*/
523 if (buf_available >= buf_needed)
524 buf_available -= buf_needed;
527 /* No buffer available at slave */
528 return BUFFER_UNAVAILABLE;
531 return BUFFER_AVAILABLE;
534 static int tx_process(void *data)
540 u32 data_left, len_to_send, pad;
541 struct sk_buff *tx_skb = NULL;
542 struct esp_adapter *adapter = (struct esp_adapter *) data;
543 struct esp_sdio_context *context = NULL;
544 struct esp_skb_cb * cb = NULL;
547 context = adapter->if_context;
549 while (!kthread_should_stop()) {
551 if (context->state != ESP_CONTEXT_READY) {
553 esp_err("not ready");
558 /* TODO: Use wait_event_interruptible_timeout */
563 if (atomic_read(&queue_items[PRIO_Q_HIGH]) > 0) {
564 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_HIGH]));
568 atomic_dec(&queue_items[PRIO_Q_HIGH]);
569 } else if (atomic_read(&queue_items[PRIO_Q_MID]) > 0) {
570 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_MID]));
574 atomic_dec(&queue_items[PRIO_Q_MID]);
575 } else if (atomic_read(&queue_items[PRIO_Q_LOW]) > 0) {
576 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_LOW]));
580 atomic_dec(&queue_items[PRIO_Q_LOW]);
583 esp_err("not ready 2 [%d %d]\n",
584 atomic_read(&queue_items[PRIO_Q_OTHERS]),
585 atomic_read(&queue_items[PRIO_Q_SERIAL]));
591 if (atomic_read(&tx_pending))
592 atomic_dec(&tx_pending);
594 retry = MAX_WRITE_RETRIES;
596 /* resume network tx queue if bearable load */
597 cb = (struct esp_skb_cb *)tx_skb->cb;
598 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
599 esp_tx_resume(cb->priv);
601 esp_raw_tp_queue_resume();
605 buf_needed = (tx_skb->len + ESP_RX_BUFFER_SIZE - 1) / ESP_RX_BUFFER_SIZE;
607 /*If SDIO slave buffer is available to write then only write data
608 else wait till buffer is available*/
609 ret = is_sdio_write_buffer_available(buf_needed);
611 dev_kfree_skb(tx_skb);
616 data_left = len_to_send = 0;
618 data_left = tx_skb->len;
619 pad = ESP_BLOCK_SIZE - (data_left % ESP_BLOCK_SIZE);
624 block_cnt = data_left / ESP_BLOCK_SIZE;
625 len_to_send = data_left;
626 ret = esp_write_block(context, ESP_SLAVE_CMD53_END_ADDR - len_to_send,
627 pos, (len_to_send + 3) & (~3), ACQUIRE_LOCK);
630 esp_err("Failed to send data: %d %d %d\n", ret, len_to_send, data_left);
634 data_left -= len_to_send;
639 /* drop the packet */
640 dev_kfree_skb(tx_skb);
644 context->tx_buffer_count += buf_needed;
645 context->tx_buffer_count = context->tx_buffer_count % ESP_TX_BUFFER_MAX;
647 dev_kfree_skb(tx_skb);
655 static struct esp_sdio_context * init_sdio_func(struct sdio_func *func)
657 struct esp_sdio_context *context = NULL;
663 context = &sdio_context;
665 context->func = func;
667 sdio_claim_host(func);
669 /* Enable Function */
670 ret = sdio_enable_func(func);
676 ret = sdio_claim_irq(func, esp_handle_isr);
678 sdio_disable_func(func);
682 /* Set private data */
683 sdio_set_drvdata(func, context);
685 context->state = ESP_CONTEXT_INIT;
687 sdio_release_host(func);
692 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
693 static int monitor_process(void *data)
695 u32 val, intr, len_reg, rdata, old_len = 0;
696 struct esp_sdio_context *context = (struct esp_sdio_context *) data;
699 while (!kthread_should_stop()) {
702 val = intr = len_reg = rdata = 0;
704 esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
705 (u8 *) &val, sizeof(val), ACQUIRE_LOCK);
707 len_reg = val & ESP_SLAVE_LEN_MASK;
710 esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) &val,
711 sizeof(val), ACQUIRE_LOCK);
713 rdata = ((val >> 16) & ESP_TX_BUFFER_MASK);
715 esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
716 (u8 *) &intr, sizeof(intr), ACQUIRE_LOCK);
719 if (len_reg > context->rx_byte_count) {
720 if (old_len && (context->rx_byte_count == old_len)) {
721 esp_dbg("Monitor thread ----> [%d - %d] [%d - %d] %d\n",
722 len_reg, context->rx_byte_count,
723 rdata, context->tx_buffer_count, intr);
725 skb = read_packet(context->adapter);
731 esp_dbg("Flushed %d bytes\n", skb->len);
733 /* drop the packet */
739 old_len = context->rx_byte_count;
747 static int esp_probe(struct sdio_func *func,
748 const struct sdio_device_id *id)
750 struct esp_sdio_context *context = NULL;
753 if (func->num != 1) {
757 esp_info("ESP network device detected\n");
759 context = init_sdio_func(func);
765 context->state = ESP_CONTEXT_READY;
766 atomic_set(&tx_pending, 0);
767 ret = init_context(context);
769 deinit_sdio_func(func);
773 tx_thread = kthread_run(tx_process, context->adapter, "esp32_TX");
776 esp_err("Failed to create esp32_sdio TX thread\n");
778 context->adapter->dev = &func->dev;
779 generate_slave_intr(context, BIT(ESP_OPEN_DATA_PATH));
782 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
783 monitor_thread = kthread_run(monitor_process, context, "Monitor process");
786 esp_err("Failed to create monitor thread\n");
792 static int esp_suspend(struct device *dev)
794 struct sdio_func *func = NULL;
795 struct esp_sdio_context *context = NULL;
798 esp_info("Failed to inform ESP that host is suspending\n");
802 func = dev_to_sdio_func(dev);
804 esp_info("----> Host Suspend\n");
807 context = sdio_get_drvdata(func);
810 esp_info("Failed to inform ESP that host is suspending\n");
816 generate_slave_intr(context, BIT(ESP_POWER_SAVE_ON));
819 sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
821 /* Enale OOB IRQ and host wake up */
822 enable_irq(SDIO_OOB_IRQ);
823 enable_irq_wake(SDIO_OOB_IRQ);
828 static int esp_resume(struct device *dev)
830 struct sdio_func *func = NULL;
831 struct esp_sdio_context *context = NULL;
834 esp_info("Failed to inform ESP that host is awake\n");
838 func = dev_to_sdio_func(dev);
840 esp_info("-----> Host Awake\n");
842 /* Host woke up.. Disable OOB IRQ */
843 disable_irq_wake(SDIO_OOB_IRQ);
844 disable_irq(SDIO_OOB_IRQ);
848 context = sdio_get_drvdata(func);
851 esp_info("Failed to inform ESP that host is awake\n");
855 /* generate_slave_intr(context, BIT(ESP_RESET));*/
856 get_firmware_data(context);
858 generate_slave_intr(context, BIT(ESP_POWER_SAVE_OFF));
863 static const struct dev_pm_ops esp_pm_ops = {
864 .suspend = esp_suspend,
865 .resume = esp_resume,
868 /* SDIO driver structure to be registered with kernel */
869 static struct sdio_driver esp_sdio_driver = {
871 .id_table = esp_devices,
873 .remove = esp_remove,
875 .owner = THIS_MODULE,
881 int esp_init_interface_layer(struct esp_adapter *adapter)
886 adapter->if_context = &sdio_context;
887 adapter->if_ops = &if_ops;
888 sdio_context.adapter = adapter;
890 return sdio_register_driver(&esp_sdio_driver);
893 void process_event_esp_bootup(struct esp_adapter *adapter, u8 *evt_buf, u8 len)
895 u8 len_left = len, tag_len;
897 struct esp_sdio_context *context = &sdio_context;
908 tag_len = *(pos + 1);
910 esp_info("EVENT: %d\n", *pos);
912 if (*pos == ESP_BOOTUP_CAPABILITY) {
914 adapter->capabilities = *(pos + 2);
915 process_capabilities(adapter);
916 print_capabilities(*(pos + 2));
918 } else if (*pos == ESP_BOOTUP_FIRMWARE_CHIP_ID){
920 esp_info("ESP chipset detected [%s]\n",
921 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32 ? "esp32":
922 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32S2 ? "esp32-s2" :
923 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32C3 ? "esp32-c3" :
924 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32S3 ? "esp32-s3" :
927 if (*(pos+2)!=ESP_FIRMWARE_CHIP_ESP32)
928 esp_err("SDIO is only supported with ESP32\n");
930 } else if (*pos == ESP_BOOTUP_TEST_RAW_TP) {
931 process_test_capabilities(*(pos + 2));
933 } else if (*pos == ESP_BOOTUP_FW_DATA) {
935 if (tag_len != sizeof(struct fw_data))
936 esp_info("Length not matching to firmware data size\n");
938 if (process_fw_data((struct fw_data*)(pos + 2)))
940 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
945 esp_warn("Unsupported tag in event");
949 len_left -= (tag_len+2);
952 if (esp_add_card(adapter)) {
953 esp_err("network iterface init failed\n");
954 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
958 void esp_deinit_interface_layer(void)
960 sdio_unregister_driver(&esp_sdio_driver);