1 // SPDX-License-Identifier: GPL-2.0-only
3 * Espressif Systems Wireless LAN device driver
5 * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
9 #include <linux/mutex.h>
10 #include <linux/mmc/sdio.h>
11 #include <linux/mmc/sdio_func.h>
12 #include <linux/mmc/sdio_ids.h>
13 #include <linux/mmc/card.h>
14 #include <linux/mmc/host.h>
15 #include <linux/module.h>
17 #include "esp_sdio_api.h"
19 #include "esp_bt_api.h"
20 #include <linux/kthread.h>
21 #include "esp_stats.h"
22 #include "esp_utils.h"
23 #include "include/esp_kernel_port.h"
25 extern u32 raw_tp_mode;
26 #define MAX_WRITE_RETRIES 2
27 #define TX_MAX_PENDING_COUNT 200
28 #define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
30 #define CHECK_SDIO_RW_ERROR(ret) do { \
32 esp_err("CMD53 read/write error at %d\n", __LINE__); \
35 struct esp_sdio_context sdio_context;
36 static atomic_t tx_pending;
37 static atomic_t queue_items[MAX_PRIORITY_QUEUES];
39 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
40 struct task_struct *monitor_thread;
42 struct task_struct *tx_thread;
44 volatile u8 host_sleep;
46 static int init_context(struct esp_sdio_context *context);
47 static struct sk_buff *read_packet(struct esp_adapter *adapter);
48 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
49 /*int deinit_context(struct esp_adapter *adapter);*/
51 static const struct sdio_device_id esp_devices[] = {
52 { SDIO_DEVICE(ESP_VENDOR_ID_1, ESP_DEVICE_ID_ESP32_1) },
53 { SDIO_DEVICE(ESP_VENDOR_ID_1, ESP_DEVICE_ID_ESP32_2) },
54 { SDIO_DEVICE(ESP_VENDOR_ID_2, ESP_DEVICE_ID_ESP32C6_1) },
55 { SDIO_DEVICE(ESP_VENDOR_ID_2, ESP_DEVICE_ID_ESP32C6_2) },
59 static void esp_process_interrupt(struct esp_sdio_context *context, u32 int_status)
65 if (int_status & ESP_SLAVE_RX_NEW_PACKET_INT) {
66 esp_process_new_packet_intr(context->adapter);
70 static void esp_handle_isr(struct sdio_func *func)
72 struct esp_sdio_context *context = NULL;
83 context = sdio_get_drvdata(func);
89 int_status = kmalloc(sizeof(u32), GFP_ATOMIC);
95 /* Read interrupt status register */
96 ret = esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
97 (u8 *) int_status, sizeof(*int_status), ACQUIRE_LOCK);
98 CHECK_SDIO_RW_ERROR(ret);
100 esp_process_interrupt(context, *int_status);
102 /* Clear interrupt status */
103 ret = esp_write_reg(context, ESP_SLAVE_INT_CLR_REG,
104 (u8 *) int_status, sizeof(*int_status), ACQUIRE_LOCK);
105 CHECK_SDIO_RW_ERROR(ret);
110 int generate_slave_intr(struct esp_sdio_context *context, u8 data)
118 val = kmalloc(sizeof(u8), GFP_KERNEL);
126 ret = esp_write_reg(context, ESP_SLAVE_SCRATCH_REG_7, val,
127 sizeof(*val), ACQUIRE_LOCK);
134 static void deinit_sdio_func(struct sdio_func *func)
136 sdio_claim_host(func);
138 sdio_release_irq(func);
139 /* Disable sdio function */
140 sdio_disable_func(func);
141 sdio_release_host(func);
142 sdio_set_drvdata(func, NULL);
145 static int esp_slave_get_tx_buffer_num(struct esp_sdio_context *context, u32 *tx_num, u8 is_lock_needed)
150 len = kmalloc(sizeof(u32), GFP_KERNEL);
156 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) len, sizeof(*len), is_lock_needed);
163 *len = (*len >> 16) & ESP_TX_BUFFER_MASK;
164 *len = (*len + ESP_TX_BUFFER_MAX - context->tx_buffer_count) % ESP_TX_BUFFER_MAX;
172 int esp_deinit_module(struct esp_adapter *adapter)
174 /* Second & onward bootup cleanup is not required for SDIO:
175 * As Removal of SDIO triggers complete Deinit and SDIO insertion/
176 * detection, triggers probing which does initialization.
181 static int esp_get_len_from_slave(struct esp_sdio_context *context, u32 *rx_size, u8 is_lock_needed)
187 len = kmalloc(sizeof(u32), GFP_KERNEL);
193 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
194 (u8 *) len, sizeof(*len), is_lock_needed);
201 *len &= ESP_SLAVE_LEN_MASK;
203 if (*len >= context->rx_byte_count)
204 *len = (*len + ESP_RX_BYTE_MAX - context->rx_byte_count) % ESP_RX_BYTE_MAX;
206 /* Handle a case of roll over */
207 temp = ESP_RX_BYTE_MAX - context->rx_byte_count;
210 if (*len > ESP_RX_BUFFER_SIZE) {
211 esp_info("Len from slave[%d] exceeds max [%d]\n",
212 *len, ESP_RX_BUFFER_SIZE);
223 static void flush_sdio(struct esp_sdio_context *context)
227 if (!context || !context->adapter)
231 skb = read_packet(context->adapter);
238 esp_info("Flushed %d bytes\n", skb->len);
245 static void esp_remove(struct sdio_func *func)
247 struct esp_sdio_context *context;
248 uint8_t prio_q_idx = 0;
250 context = sdio_get_drvdata(func);
252 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
254 kthread_stop(monitor_thread);
257 context->state = ESP_CONTEXT_INIT;
258 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++)
259 skb_queue_purge(&(sdio_context.tx_q[prio_q_idx]));
263 kthread_stop(tx_thread);
266 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
269 context->state = ESP_CONTEXT_DISABLED;
271 if (context->adapter) {
272 esp_remove_card(context->adapter);
274 if (context->adapter->hcidev) {
275 esp_deinit_bt(context->adapter);
281 deinit_sdio_func(context->func);
282 context->func = NULL;
284 memset(context, 0, sizeof(struct esp_sdio_context));
286 esp_dbg("ESP SDIO cleanup completed\n");
289 static struct esp_if_ops if_ops = {
291 .write = write_packet,
294 static int get_firmware_data(struct esp_sdio_context *context)
299 val = kmalloc(sizeof(u32), GFP_KERNEL);
305 /* Initialize rx_byte_count */
306 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
307 (u8 *) val, sizeof(*val), ACQUIRE_LOCK);
313 esp_info("Rx Pre ====== %d\n", context->rx_byte_count);
314 context->rx_byte_count = *val & ESP_SLAVE_LEN_MASK;
315 esp_info("Rx Pos ====== %d\n", context->rx_byte_count);
317 /* Initialize tx_buffer_count */
318 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) val,
319 sizeof(*val), ACQUIRE_LOCK);
326 *val = ((*val >> 16) & ESP_TX_BUFFER_MASK);
327 esp_info("Tx Pre ====== %d\n", context->tx_buffer_count);
329 if (*val >= ESP_MAX_BUF_CNT)
330 context->tx_buffer_count = (*val) - ESP_MAX_BUF_CNT;
332 context->tx_buffer_count = 0;
333 esp_info("Tx Pos ====== %d\n", context->tx_buffer_count);
339 static int init_context(struct esp_sdio_context *context)
342 uint8_t prio_q_idx = 0;
348 ret = get_firmware_data(context);
352 context->adapter = esp_get_adapter();
354 if (unlikely(!context->adapter))
355 esp_err("Failed to get adapter\n");
357 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
358 skb_queue_head_init(&(sdio_context.tx_q[prio_q_idx]));
359 atomic_set(&queue_items[prio_q_idx], 0);
362 context->adapter->if_type = ESP_IF_TYPE_SDIO;
367 static struct sk_buff *read_packet(struct esp_adapter *adapter)
369 u32 len_from_slave, data_left, len_to_read, size, num_blocks;
373 struct esp_sdio_context *context;
375 if (!adapter || !adapter->if_context) {
376 esp_err("INVALID args\n");
380 context = adapter->if_context;
382 if (!context || (context->state != ESP_CONTEXT_READY) || !context->func) {
383 esp_err("Invalid context/state\n");
387 sdio_claim_host(context->func);
389 data_left = len_to_read = len_from_slave = num_blocks = 0;
392 ret = esp_get_len_from_slave(context, &len_from_slave, LOCK_ALREADY_ACQUIRED);
394 if (ret || !len_from_slave) {
395 sdio_release_host(context->func);
399 size = ESP_BLOCK_SIZE * 4;
401 if (len_from_slave > size) {
402 esp_info("Rx large packet: %d\n", len_from_slave);
405 skb = esp_alloc_skb(len_from_slave);
408 esp_err("SKB alloc failed\n");
409 sdio_release_host(context->func);
413 skb_put(skb, len_from_slave);
416 data_left = len_from_slave;
419 num_blocks = data_left/ESP_BLOCK_SIZE;
422 if (!context->rx_byte_count) {
423 start_time = ktime_get_ns();
428 len_to_read = num_blocks * ESP_BLOCK_SIZE;
429 ret = esp_read_block(context,
430 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
431 pos, len_to_read, LOCK_ALREADY_ACQUIRED);
433 len_to_read = data_left;
434 /* 4 byte aligned length */
435 ret = esp_read_block(context,
436 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
437 pos, (len_to_read + 3) & (~3), LOCK_ALREADY_ACQUIRED);
441 esp_err("Failed to read data - %d [%u - %d]\n", ret, num_blocks, len_to_read);
444 sdio_release_host(context->func);
448 data_left -= len_to_read;
450 context->rx_byte_count += len_to_read;
451 context->rx_byte_count = context->rx_byte_count % ESP_RX_BYTE_MAX;
453 } while (data_left > 0);
455 sdio_release_host(context->func);
460 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
462 u32 max_pkt_size = ESP_RX_BUFFER_SIZE - sizeof(struct esp_payload_header);
463 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
464 struct esp_skb_cb *cb = NULL;
465 uint8_t prio = PRIO_Q_LOW;
467 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
468 esp_err("Invalid args\n");
477 if (skb->len > max_pkt_size) {
478 esp_err("Drop pkt of len[%u] > max SDIO transport len[%u]\n",
479 skb->len, max_pkt_size);
485 cb = (struct esp_skb_cb *)skb->cb;
486 if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
487 esp_tx_pause(cb->priv);
490 /* esp_err("TX Pause busy");*/
494 /* Enqueue SKB in tx_q */
495 atomic_inc(&tx_pending);
497 /* Notify to process queue */
498 if (payload_header->if_type == ESP_INTERNAL_IF)
500 else if (payload_header->if_type == ESP_HCI_IF)
505 atomic_inc(&queue_items[prio]);
506 skb_queue_tail(&(sdio_context.tx_q[prio]), skb);
511 static int is_sdio_write_buffer_available(u32 buf_needed)
513 #define BUFFER_AVAILABLE 1
514 #define BUFFER_UNAVAILABLE 0
517 static u32 buf_available;
518 struct esp_sdio_context *context = &sdio_context;
519 u8 retry = MAX_WRITE_RETRIES;
521 /*If buffer needed are less than buffer available
522 then only read for available buffer number from slave*/
523 if (buf_available < buf_needed) {
525 ret = esp_slave_get_tx_buffer_num(context, &buf_available, ACQUIRE_LOCK);
527 if (buf_available < buf_needed) {
529 /* Release SDIO and retry after delay*/
531 usleep_range(10, 50);
539 if (buf_available >= buf_needed)
540 buf_available -= buf_needed;
543 /* No buffer available at slave */
544 return BUFFER_UNAVAILABLE;
547 return BUFFER_AVAILABLE;
550 static int tx_process(void *data)
556 u32 data_left, len_to_send, pad;
557 struct sk_buff *tx_skb = NULL;
558 struct esp_adapter *adapter = (struct esp_adapter *) data;
559 struct esp_sdio_context *context = NULL;
560 struct esp_skb_cb *cb = NULL;
563 context = adapter->if_context;
565 while (!kthread_should_stop()) {
567 if (context->state != ESP_CONTEXT_READY) {
569 esp_err("not ready");
574 /* TODO: Use wait_event_interruptible_timeout */
579 if (atomic_read(&queue_items[PRIO_Q_HIGH]) > 0) {
580 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_HIGH]));
584 atomic_dec(&queue_items[PRIO_Q_HIGH]);
585 } else if (atomic_read(&queue_items[PRIO_Q_MID]) > 0) {
586 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_MID]));
590 atomic_dec(&queue_items[PRIO_Q_MID]);
591 } else if (atomic_read(&queue_items[PRIO_Q_LOW]) > 0) {
592 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_LOW]));
596 atomic_dec(&queue_items[PRIO_Q_LOW]);
598 /* esp_verbose("not ready high=%d mid=%d low=%d\n",
599 atomic_read(&queue_items[PRIO_Q_HIGH]),
600 atomic_read(&queue_items[PRIO_Q_MID]),
601 atomic_read(&queue_items[PRIO_Q_LOW])); */
606 if (atomic_read(&tx_pending))
607 atomic_dec(&tx_pending);
609 retry = MAX_WRITE_RETRIES;
611 /* resume network tx queue if bearable load */
612 cb = (struct esp_skb_cb *)tx_skb->cb;
613 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
614 esp_tx_resume(cb->priv);
616 if (raw_tp_mode != 0) {
617 esp_raw_tp_queue_resume();
622 buf_needed = (tx_skb->len + ESP_RX_BUFFER_SIZE - 1) / ESP_RX_BUFFER_SIZE;
624 /*If SDIO slave buffer is available to write then only write data
625 else wait till buffer is available*/
626 ret = is_sdio_write_buffer_available(buf_needed);
628 dev_kfree_skb(tx_skb);
633 data_left = len_to_send = 0;
635 data_left = tx_skb->len;
636 pad = ESP_BLOCK_SIZE - (data_left % ESP_BLOCK_SIZE);
641 block_cnt = data_left / ESP_BLOCK_SIZE;
642 len_to_send = data_left;
643 ret = esp_write_block(context, ESP_SLAVE_CMD53_END_ADDR - len_to_send,
644 pos, (len_to_send + 3) & (~3), ACQUIRE_LOCK);
647 esp_err("Failed to send data: %d %d %d\n", ret, len_to_send, data_left);
651 data_left -= len_to_send;
656 /* drop the packet */
657 dev_kfree_skb(tx_skb);
661 context->tx_buffer_count += buf_needed;
662 context->tx_buffer_count = context->tx_buffer_count % ESP_TX_BUFFER_MAX;
664 dev_kfree_skb(tx_skb);
672 static struct esp_sdio_context *init_sdio_func(struct sdio_func *func, int *sdio_ret)
674 struct esp_sdio_context *context = NULL;
680 context = &sdio_context;
682 context->func = func;
684 sdio_claim_host(func);
686 /* Enable Function */
687 ret = sdio_enable_func(func);
689 esp_err("sdio_enable_func ret: %d\n", ret);
692 sdio_release_host(func);
698 ret = sdio_claim_irq(func, esp_handle_isr);
700 esp_err("sdio_claim_irq ret: %d\n", ret);
701 sdio_disable_func(func);
705 sdio_release_host(func);
710 /* Set private data */
711 sdio_set_drvdata(func, context);
713 context->state = ESP_CONTEXT_INIT;
715 sdio_release_host(func);
720 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
721 static int monitor_process(void *data)
723 u32 val, intr, len_reg, rdata, old_len = 0;
724 struct esp_sdio_context *context = (struct esp_sdio_context *) data;
727 while (!kthread_should_stop()) {
730 val = intr = len_reg = rdata = 0;
732 esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
733 (u8 *) &val, sizeof(val), ACQUIRE_LOCK);
735 len_reg = val & ESP_SLAVE_LEN_MASK;
738 esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) &val,
739 sizeof(val), ACQUIRE_LOCK);
741 rdata = ((val >> 16) & ESP_TX_BUFFER_MASK);
743 esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
744 (u8 *) &intr, sizeof(intr), ACQUIRE_LOCK);
747 if (len_reg > context->rx_byte_count) {
748 if (old_len && (context->rx_byte_count == old_len)) {
749 esp_dbg("Monitor thread ----> [%d - %d] [%d - %d] %d\n",
750 len_reg, context->rx_byte_count,
751 rdata, context->tx_buffer_count, intr);
753 skb = read_packet(context->adapter);
759 esp_dbg("Flushed %d bytes\n", skb->len);
761 /* drop the packet */
767 old_len = context->rx_byte_count;
775 static int esp_probe(struct sdio_func *func,
776 const struct sdio_device_id *id)
778 struct esp_sdio_context *context = NULL;
781 if (func->num != 1) {
785 esp_info("ESP network device detected\n");
787 context = init_sdio_func(func, &ret);;
796 if (sdio_context.sdio_clk_mhz) {
797 struct mmc_host *host = func->card->host;
798 u32 hz = sdio_context.sdio_clk_mhz * NUMBER_1M;
799 /* Expansion of mmc_set_clock that isnt exported */
800 if (hz < host->f_min)
802 if (hz > host->f_max)
804 host->ios.clock = hz;
805 host->ops->set_ios(host, &host->ios);
808 context->state = ESP_CONTEXT_READY;
809 atomic_set(&tx_pending, 0);
810 ret = init_context(context);
812 deinit_sdio_func(func);
816 tx_thread = kthread_run(tx_process, context->adapter, "esp_TX");
819 esp_err("Failed to create esp_sdio TX thread\n");
821 context->adapter->dev = &func->dev;
822 generate_slave_intr(context, BIT(ESP_OPEN_DATA_PATH));
825 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
826 monitor_thread = kthread_run(monitor_process, context, "Monitor process");
829 esp_err("Failed to create monitor thread\n");
832 esp_dbg("ESP SDIO probe completed\n");
837 static int esp_suspend(struct device *dev)
839 struct sdio_func *func = NULL;
840 struct esp_sdio_context *context = NULL;
843 esp_info("Failed to inform ESP that host is suspending\n");
847 func = dev_to_sdio_func(dev);
849 esp_info("----> Host Suspend\n");
852 context = sdio_get_drvdata(func);
855 esp_info("Failed to inform ESP that host is suspending\n");
861 generate_slave_intr(context, BIT(ESP_POWER_SAVE_ON));
864 sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
866 /* Enale OOB IRQ and host wake up */
867 enable_irq(SDIO_OOB_IRQ);
868 enable_irq_wake(SDIO_OOB_IRQ);
873 static int esp_resume(struct device *dev)
875 struct sdio_func *func = NULL;
876 struct esp_sdio_context *context = NULL;
879 esp_info("Failed to inform ESP that host is awake\n");
883 func = dev_to_sdio_func(dev);
885 esp_info("-----> Host Awake\n");
887 /* Host woke up.. Disable OOB IRQ */
888 disable_irq_wake(SDIO_OOB_IRQ);
889 disable_irq(SDIO_OOB_IRQ);
893 context = sdio_get_drvdata(func);
896 esp_info("Failed to inform ESP that host is awake\n");
900 /* generate_slave_intr(context, BIT(ESP_RESET));*/
901 get_firmware_data(context);
903 generate_slave_intr(context, BIT(ESP_POWER_SAVE_OFF));
908 static const struct dev_pm_ops esp_pm_ops = {
909 .suspend = esp_suspend,
910 .resume = esp_resume,
913 static const struct of_device_id esp_sdio_of_match[] = {
914 { .compatible = "espressif,esp_sdio", },
917 MODULE_DEVICE_TABLE(of, esp_sdio_of_match);
919 /* SDIO driver structure to be registered with kernel */
920 static struct sdio_driver esp_sdio_driver = {
921 .name = KBUILD_MODNAME,
922 .id_table = esp_devices,
924 .remove = esp_remove,
926 .name = KBUILD_MODNAME,
927 .owner = THIS_MODULE,
929 .of_match_table = esp_sdio_of_match,
933 int esp_init_interface_layer(struct esp_adapter *adapter, const struct esp_if_params *params)
938 adapter->if_context = &sdio_context;
939 adapter->if_ops = &if_ops;
940 sdio_context.adapter = adapter;
941 sdio_context.sdio_clk_mhz = params->speed;
943 return sdio_register_driver(&esp_sdio_driver);
946 int esp_validate_chipset(struct esp_adapter *adapter, u8 chipset)
951 case ESP_FIRMWARE_CHIP_ESP32:
952 case ESP_FIRMWARE_CHIP_ESP32C6:
953 adapter->chipset = chipset;
954 esp_info("Chipset=%s ID=%02x detected over SDIO\n", esp_chipname_from_id(chipset), chipset);
956 case ESP_FIRMWARE_CHIP_ESP32S2:
957 case ESP_FIRMWARE_CHIP_ESP32S3:
958 case ESP_FIRMWARE_CHIP_ESP32C2:
959 case ESP_FIRMWARE_CHIP_ESP32C3:
960 esp_err("Chipset=%s ID=%02x not supported for SDIO\n", esp_chipname_from_id(chipset), chipset);
961 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
964 esp_err("Unrecognized Chipset ID=%02x\n", chipset);
965 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
972 int esp_adjust_spi_clock(struct esp_adapter *adapter, u8 spi_clk_mhz)
974 /* SPI bus specific call, silently discard */
978 void esp_deinit_interface_layer(void)
980 sdio_unregister_driver(&esp_sdio_driver);