1 // SPDX-License-Identifier: GPL-2.0-only
3 * Espressif Systems Wireless LAN device driver
5 * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
9 #include <linux/mutex.h>
10 #include <linux/mmc/sdio.h>
11 #include <linux/mmc/sdio_func.h>
12 #include <linux/mmc/sdio_ids.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
16 #include "esp_sdio_api.h"
18 #include "esp_bt_api.h"
19 #include <linux/kthread.h>
20 #include "esp_stats.h"
21 #include "esp_utils.h"
22 #include "include/esp_kernel_port.h"
24 #define MAX_WRITE_RETRIES 2
25 #define TX_MAX_PENDING_COUNT 200
26 #define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
28 #define CHECK_SDIO_RW_ERROR(ret) do { \
30 esp_err("CMD53 read/write error at %d\n", __LINE__); \
33 struct esp_sdio_context sdio_context;
34 static atomic_t tx_pending;
35 static atomic_t queue_items[MAX_PRIORITY_QUEUES];
37 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
38 struct task_struct *monitor_thread;
40 struct task_struct *tx_thread;
42 volatile u8 host_sleep;
44 static int init_context(struct esp_sdio_context *context);
45 static struct sk_buff *read_packet(struct esp_adapter *adapter);
46 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
47 /*int deinit_context(struct esp_adapter *adapter);*/
49 static const struct sdio_device_id esp_devices[] = {
50 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_1) },
51 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_2) },
55 static void esp_process_interrupt(struct esp_sdio_context *context, u32 int_status)
61 if (int_status & ESP_SLAVE_RX_NEW_PACKET_INT) {
62 esp_process_new_packet_intr(context->adapter);
66 static void esp_handle_isr(struct sdio_func *func)
68 struct esp_sdio_context *context = NULL;
79 context = sdio_get_drvdata(func);
85 int_status = kmalloc(sizeof(u32), GFP_ATOMIC);
91 /* Read interrupt status register */
92 ret = esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
93 (u8 *) int_status, sizeof(*int_status), ACQUIRE_LOCK);
94 CHECK_SDIO_RW_ERROR(ret);
96 esp_process_interrupt(context, *int_status);
98 /* Clear interrupt status */
99 ret = esp_write_reg(context, ESP_SLAVE_INT_CLR_REG,
100 (u8 *) int_status, sizeof(*int_status), ACQUIRE_LOCK);
101 CHECK_SDIO_RW_ERROR(ret);
106 int generate_slave_intr(struct esp_sdio_context *context, u8 data)
114 val = kmalloc(sizeof(u8), GFP_KERNEL);
122 ret = esp_write_reg(context, ESP_SLAVE_SCRATCH_REG_7, val,
123 sizeof(*val), ACQUIRE_LOCK);
130 static void deinit_sdio_func(struct sdio_func *func)
132 sdio_claim_host(func);
134 sdio_release_irq(func);
135 /* Disable sdio function */
136 sdio_disable_func(func);
137 sdio_release_host(func);
138 sdio_set_drvdata(func, NULL);
141 static int esp_slave_get_tx_buffer_num(struct esp_sdio_context *context, u32 *tx_num, u8 is_lock_needed)
146 len = kmalloc(sizeof(u32), GFP_KERNEL);
152 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) len, sizeof(*len), is_lock_needed);
159 *len = (*len >> 16) & ESP_TX_BUFFER_MASK;
160 *len = (*len + ESP_TX_BUFFER_MAX - context->tx_buffer_count) % ESP_TX_BUFFER_MAX;
168 int esp_deinit_module(struct esp_adapter *adapter)
170 /* Second & onward bootup cleanup is not required for SDIO:
171 * As Removal of SDIO triggers complete Deinit and SDIO insertion/
172 * detection, triggers probing which does initialization.
178 static int esp_get_len_from_slave(struct esp_sdio_context *context, u32 *rx_size, u8 is_lock_needed)
184 len = kmalloc(sizeof(u32), GFP_KERNEL);
190 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
191 (u8 *) len, sizeof(*len), is_lock_needed);
198 *len &= ESP_SLAVE_LEN_MASK;
200 if (*len >= context->rx_byte_count)
201 *len = (*len + ESP_RX_BYTE_MAX - context->rx_byte_count) % ESP_RX_BYTE_MAX;
203 /* Handle a case of roll over */
204 temp = ESP_RX_BYTE_MAX - context->rx_byte_count;
207 if (*len > ESP_RX_BUFFER_SIZE) {
208 esp_info("Len from slave[%d] exceeds max [%d]\n",
209 *len, ESP_RX_BUFFER_SIZE);
220 static void flush_sdio(struct esp_sdio_context *context)
224 if (!context || !context->adapter)
228 skb = read_packet(context->adapter);
235 esp_info("Flushed %d bytes\n", skb->len);
242 static void esp_remove(struct sdio_func *func)
244 struct esp_sdio_context *context;
245 uint8_t prio_q_idx = 0;
247 context = sdio_get_drvdata(func);
249 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
251 kthread_stop(monitor_thread);
254 context->state = ESP_CONTEXT_INIT;
255 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++)
256 skb_queue_purge(&(sdio_context.tx_q[prio_q_idx]));
260 kthread_stop(tx_thread);
263 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
266 context->state = ESP_CONTEXT_DISABLED;
268 if (context->adapter) {
269 esp_remove_card(context->adapter);
271 if (context->adapter->hcidev) {
272 esp_deinit_bt(context->adapter);
278 deinit_sdio_func(context->func);
279 context->func = NULL;
281 memset(context, 0, sizeof(struct esp_sdio_context));
283 esp_debug("ESP SDIO cleanup completed\n");
286 static struct esp_if_ops if_ops = {
288 .write = write_packet,
291 static int get_firmware_data(struct esp_sdio_context *context)
296 val = kmalloc(sizeof(u32), GFP_KERNEL);
302 /* Initialize rx_byte_count */
303 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
304 (u8 *) val, sizeof(*val), ACQUIRE_LOCK);
310 esp_info("Rx Pre ====== %d\n", context->rx_byte_count);
311 context->rx_byte_count = *val & ESP_SLAVE_LEN_MASK;
312 esp_info("Rx Pos ====== %d\n", context->rx_byte_count);
314 /* Initialize tx_buffer_count */
315 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) val,
316 sizeof(*val), ACQUIRE_LOCK);
323 *val = ((*val >> 16) & ESP_TX_BUFFER_MASK);
324 esp_info("Tx Pre ====== %d\n", context->tx_buffer_count);
326 if (*val >= ESP_MAX_BUF_CNT)
327 context->tx_buffer_count = (*val) - ESP_MAX_BUF_CNT;
329 context->tx_buffer_count = 0;
330 esp_info("Tx Pos ====== %d\n", context->tx_buffer_count);
336 static int init_context(struct esp_sdio_context *context)
339 uint8_t prio_q_idx = 0;
345 ret = get_firmware_data(context);
349 context->adapter = esp_get_adapter();
351 if (unlikely(!context->adapter))
352 esp_err("Failed to get adapter\n");
354 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
355 skb_queue_head_init(&(sdio_context.tx_q[prio_q_idx]));
356 atomic_set(&queue_items[prio_q_idx], 0);
359 context->adapter->if_type = ESP_IF_TYPE_SDIO;
364 static struct sk_buff *read_packet(struct esp_adapter *adapter)
366 u32 len_from_slave, data_left, len_to_read, size, num_blocks;
370 struct esp_sdio_context *context;
372 if (!adapter || !adapter->if_context) {
373 esp_err("INVALID args\n");
377 context = adapter->if_context;
379 if (!context || (context->state != ESP_CONTEXT_READY) || !context->func) {
380 esp_err("Invalid context/state\n");
384 sdio_claim_host(context->func);
386 data_left = len_to_read = len_from_slave = num_blocks = 0;
389 ret = esp_get_len_from_slave(context, &len_from_slave, LOCK_ALREADY_ACQUIRED);
391 if (ret || !len_from_slave) {
392 sdio_release_host(context->func);
396 size = ESP_BLOCK_SIZE * 4;
398 if (len_from_slave > size) {
399 esp_info("Rx large packet: %d\n", len_from_slave);
402 skb = esp_alloc_skb(len_from_slave);
405 esp_err("SKB alloc failed\n");
406 sdio_release_host(context->func);
410 skb_put(skb, len_from_slave);
413 data_left = len_from_slave;
416 num_blocks = data_left/ESP_BLOCK_SIZE;
419 if (!context->rx_byte_count) {
420 start_time = ktime_get_ns();
425 len_to_read = num_blocks * ESP_BLOCK_SIZE;
426 ret = esp_read_block(context,
427 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
428 pos, len_to_read, LOCK_ALREADY_ACQUIRED);
430 len_to_read = data_left;
431 /* 4 byte aligned length */
432 ret = esp_read_block(context,
433 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
434 pos, (len_to_read + 3) & (~3), LOCK_ALREADY_ACQUIRED);
438 esp_err("Failed to read data - %d [%u - %d]\n", ret, num_blocks, len_to_read);
441 sdio_release_host(context->func);
445 data_left -= len_to_read;
447 context->rx_byte_count += len_to_read;
448 context->rx_byte_count = context->rx_byte_count % ESP_RX_BYTE_MAX;
450 } while (data_left > 0);
452 sdio_release_host(context->func);
457 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
459 u32 max_pkt_size = ESP_RX_BUFFER_SIZE - sizeof(struct esp_payload_header);
460 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
461 struct esp_skb_cb *cb = NULL;
462 uint8_t prio = PRIO_Q_LOW;
464 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
465 esp_err("Invalid args\n");
474 if (skb->len > max_pkt_size) {
475 esp_err("Drop pkt of len[%u] > max SDIO transport len[%u]\n",
476 skb->len, max_pkt_size);
482 cb = (struct esp_skb_cb *)skb->cb;
483 if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
484 esp_tx_pause(cb->priv);
487 /* esp_err("TX Pause busy");*/
491 /* Enqueue SKB in tx_q */
492 atomic_inc(&tx_pending);
494 /* Notify to process queue */
495 if (payload_header->if_type == ESP_INTERNAL_IF)
497 else if (payload_header->if_type == ESP_HCI_IF)
502 atomic_inc(&queue_items[prio]);
503 skb_queue_tail(&(sdio_context.tx_q[prio]), skb);
508 static int is_sdio_write_buffer_available(u32 buf_needed)
510 #define BUFFER_AVAILABLE 1
511 #define BUFFER_UNAVAILABLE 0
514 static u32 buf_available;
515 struct esp_sdio_context *context = &sdio_context;
516 u8 retry = MAX_WRITE_RETRIES;
518 /*If buffer needed are less than buffer available
519 then only read for available buffer number from slave*/
520 if (buf_available < buf_needed) {
522 ret = esp_slave_get_tx_buffer_num(context, &buf_available, ACQUIRE_LOCK);
524 if (buf_available < buf_needed) {
526 /* Release SDIO and retry after delay*/
528 usleep_range(10, 50);
536 if (buf_available >= buf_needed)
537 buf_available -= buf_needed;
540 /* No buffer available at slave */
541 return BUFFER_UNAVAILABLE;
544 return BUFFER_AVAILABLE;
547 static int tx_process(void *data)
553 u32 data_left, len_to_send, pad;
554 struct sk_buff *tx_skb = NULL;
555 struct esp_adapter *adapter = (struct esp_adapter *) data;
556 struct esp_sdio_context *context = NULL;
557 struct esp_skb_cb *cb = NULL;
560 context = adapter->if_context;
562 while (!kthread_should_stop()) {
564 if (context->state != ESP_CONTEXT_READY) {
566 esp_err("not ready");
571 /* TODO: Use wait_event_interruptible_timeout */
576 if (atomic_read(&queue_items[PRIO_Q_HIGH]) > 0) {
577 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_HIGH]));
581 atomic_dec(&queue_items[PRIO_Q_HIGH]);
582 } else if (atomic_read(&queue_items[PRIO_Q_MID]) > 0) {
583 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_MID]));
587 atomic_dec(&queue_items[PRIO_Q_MID]);
588 } else if (atomic_read(&queue_items[PRIO_Q_LOW]) > 0) {
589 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_LOW]));
593 atomic_dec(&queue_items[PRIO_Q_LOW]);
596 esp_err("not ready 2 [%d %d]\n",
597 atomic_read(&queue_items[PRIO_Q_OTHERS]),
598 atomic_read(&queue_items[PRIO_Q_SERIAL]));
604 if (atomic_read(&tx_pending))
605 atomic_dec(&tx_pending);
607 retry = MAX_WRITE_RETRIES;
609 /* resume network tx queue if bearable load */
610 cb = (struct esp_skb_cb *)tx_skb->cb;
611 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
612 esp_tx_resume(cb->priv);
614 esp_raw_tp_queue_resume();
618 buf_needed = (tx_skb->len + ESP_RX_BUFFER_SIZE - 1) / ESP_RX_BUFFER_SIZE;
620 /*If SDIO slave buffer is available to write then only write data
621 else wait till buffer is available*/
622 ret = is_sdio_write_buffer_available(buf_needed);
624 dev_kfree_skb(tx_skb);
629 data_left = len_to_send = 0;
631 data_left = tx_skb->len;
632 pad = ESP_BLOCK_SIZE - (data_left % ESP_BLOCK_SIZE);
637 block_cnt = data_left / ESP_BLOCK_SIZE;
638 len_to_send = data_left;
639 ret = esp_write_block(context, ESP_SLAVE_CMD53_END_ADDR - len_to_send,
640 pos, (len_to_send + 3) & (~3), ACQUIRE_LOCK);
643 esp_err("Failed to send data: %d %d %d\n", ret, len_to_send, data_left);
647 data_left -= len_to_send;
652 /* drop the packet */
653 dev_kfree_skb(tx_skb);
657 context->tx_buffer_count += buf_needed;
658 context->tx_buffer_count = context->tx_buffer_count % ESP_TX_BUFFER_MAX;
660 dev_kfree_skb(tx_skb);
668 static struct esp_sdio_context *init_sdio_func(struct sdio_func *func, int *sdio_ret)
670 struct esp_sdio_context *context = NULL;
676 context = &sdio_context;
678 context->func = func;
680 sdio_claim_host(func);
682 /* Enable Function */
683 ret = sdio_enable_func(func);
685 esp_err("sdio_enable_func ret: %d\n", ret);
688 sdio_release_host(func);
694 ret = sdio_claim_irq(func, esp_handle_isr);
696 esp_err("sdio_claim_irq ret: %d\n", ret);
697 sdio_disable_func(func);
701 sdio_release_host(func);
706 /* Set private data */
707 sdio_set_drvdata(func, context);
709 context->state = ESP_CONTEXT_INIT;
711 sdio_release_host(func);
716 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
717 static int monitor_process(void *data)
719 u32 val, intr, len_reg, rdata, old_len = 0;
720 struct esp_sdio_context *context = (struct esp_sdio_context *) data;
723 while (!kthread_should_stop()) {
726 val = intr = len_reg = rdata = 0;
728 esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
729 (u8 *) &val, sizeof(val), ACQUIRE_LOCK);
731 len_reg = val & ESP_SLAVE_LEN_MASK;
734 esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) &val,
735 sizeof(val), ACQUIRE_LOCK);
737 rdata = ((val >> 16) & ESP_TX_BUFFER_MASK);
739 esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
740 (u8 *) &intr, sizeof(intr), ACQUIRE_LOCK);
743 if (len_reg > context->rx_byte_count) {
744 if (old_len && (context->rx_byte_count == old_len)) {
745 esp_dbg("Monitor thread ----> [%d - %d] [%d - %d] %d\n",
746 len_reg, context->rx_byte_count,
747 rdata, context->tx_buffer_count, intr);
749 skb = read_packet(context->adapter);
755 esp_dbg("Flushed %d bytes\n", skb->len);
757 /* drop the packet */
763 old_len = context->rx_byte_count;
771 static int esp_probe(struct sdio_func *func,
772 const struct sdio_device_id *id)
774 struct esp_sdio_context *context = NULL;
777 if (func->num != 1) {
781 esp_info("ESP network device detected\n");
783 context = init_sdio_func(func, &ret);;
792 if (sdio_context.sdio_clk_mhz) {
793 struct mmc_host *host = func->card->host;
794 u32 hz = sdio_context.sdio_clk_mhz * NUMBER_1M;
795 /* Expansion of mmc_set_clock that isnt exported */
796 if (hz < host->f_min)
798 if (hz > host->f_max)
800 host->ios.clock = hz;
801 host->ops->set_ios(host, &host->ios);
804 context->state = ESP_CONTEXT_READY;
805 atomic_set(&tx_pending, 0);
806 ret = init_context(context);
808 deinit_sdio_func(func);
812 tx_thread = kthread_run(tx_process, context->adapter, "esp_TX");
815 esp_err("Failed to create esp_sdio TX thread\n");
817 context->adapter->dev = &func->dev;
818 generate_slave_intr(context, BIT(ESP_OPEN_DATA_PATH));
821 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
822 monitor_thread = kthread_run(monitor_process, context, "Monitor process");
825 esp_err("Failed to create monitor thread\n");
828 esp_debug("ESP SDIO probe completed\n");
833 static int esp_suspend(struct device *dev)
835 struct sdio_func *func = NULL;
836 struct esp_sdio_context *context = NULL;
839 esp_info("Failed to inform ESP that host is suspending\n");
843 func = dev_to_sdio_func(dev);
845 esp_info("----> Host Suspend\n");
848 context = sdio_get_drvdata(func);
851 esp_info("Failed to inform ESP that host is suspending\n");
857 generate_slave_intr(context, BIT(ESP_POWER_SAVE_ON));
860 sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
862 /* Enale OOB IRQ and host wake up */
863 enable_irq(SDIO_OOB_IRQ);
864 enable_irq_wake(SDIO_OOB_IRQ);
869 static int esp_resume(struct device *dev)
871 struct sdio_func *func = NULL;
872 struct esp_sdio_context *context = NULL;
875 esp_info("Failed to inform ESP that host is awake\n");
879 func = dev_to_sdio_func(dev);
881 esp_info("-----> Host Awake\n");
883 /* Host woke up.. Disable OOB IRQ */
884 disable_irq_wake(SDIO_OOB_IRQ);
885 disable_irq(SDIO_OOB_IRQ);
889 context = sdio_get_drvdata(func);
892 esp_info("Failed to inform ESP that host is awake\n");
896 /* generate_slave_intr(context, BIT(ESP_RESET));*/
897 get_firmware_data(context);
899 generate_slave_intr(context, BIT(ESP_POWER_SAVE_OFF));
904 static const struct dev_pm_ops esp_pm_ops = {
905 .suspend = esp_suspend,
906 .resume = esp_resume,
909 static const struct of_device_id esp_sdio_of_match[] = {
910 { .compatible = "espressif,esp_sdio", },
913 MODULE_DEVICE_TABLE(of, esp_sdio_of_match);
915 /* SDIO driver structure to be registered with kernel */
916 static struct sdio_driver esp_sdio_driver = {
917 .name = KBUILD_MODNAME,
918 .id_table = esp_devices,
920 .remove = esp_remove,
922 .name = KBUILD_MODNAME,
923 .owner = THIS_MODULE,
925 .of_match_table = esp_sdio_of_match,
929 int esp_init_interface_layer(struct esp_adapter *adapter, u32 speed)
934 adapter->if_context = &sdio_context;
935 adapter->if_ops = &if_ops;
936 sdio_context.adapter = adapter;
937 sdio_context.sdio_clk_mhz = speed;
939 return sdio_register_driver(&esp_sdio_driver);
942 int esp_validate_chipset(struct esp_adapter *adapter, u8 chipset)
947 case ESP_FIRMWARE_CHIP_ESP32:
948 case ESP_FIRMWARE_CHIP_ESP32C6:
949 adapter->chipset = chipset;
950 esp_info("Chipset=%s ID=%02x detected over SDIO\n", esp_chipname_from_id(chipset), chipset);
952 case ESP_FIRMWARE_CHIP_ESP32S2:
953 case ESP_FIRMWARE_CHIP_ESP32S3:
954 case ESP_FIRMWARE_CHIP_ESP32C2:
955 case ESP_FIRMWARE_CHIP_ESP32C3:
956 esp_err("Chipset=%s ID=%02x not supported for SDIO\n", esp_chipname_from_id(chipset), chipset);
957 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
960 esp_err("Unrecognized Chipset ID=%02x\n", chipset);
961 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
968 int esp_adjust_spi_clock(struct esp_adapter *adapter, u8 spi_clk_mhz)
970 /* SPI bus specific call, silently discard */
974 void esp_deinit_interface_layer(void)
976 sdio_unregister_driver(&esp_sdio_driver);