2 * Espressif Systems Wireless LAN device driver
4 * Copyright (C) 2015-2021 Espressif Systems (Shanghai) PTE LTD
6 * This software file (the "File") is distributed by Espressif Systems (Shanghai)
7 * PTE LTD under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
20 #include <linux/mutex.h>
21 #include <linux/mmc/sdio.h>
22 #include <linux/mmc/sdio_func.h>
23 #include <linux/mmc/sdio_ids.h>
24 #include <linux/mmc/card.h>
25 #include <linux/mmc/host.h>
27 #include "esp_sdio_api.h"
29 #include "esp_bt_api.h"
30 #ifdef CONFIG_SUPPORT_ESP_SERIAL
31 #include "esp_serial.h"
33 #include <linux/kthread.h>
34 #include <linux/printk.h>
36 #define MAX_WRITE_RETRIES 2
37 #define TX_MAX_PENDING_COUNT 200
38 #define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
40 #define CHECK_SDIO_RW_ERROR(ret) do { \
42 printk(KERN_ERR "%s: CMD53 read/write error at %d\n", __func__, __LINE__); \
45 struct esp_sdio_context sdio_context;
46 static atomic_t tx_pending;
47 static atomic_t queue_items[MAX_PRIORITY_QUEUES];
49 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
50 struct task_struct *monitor_thread;
52 struct task_struct *tx_thread;
54 static int init_context(struct esp_sdio_context *context);
55 static struct sk_buff * read_packet(struct esp_adapter *adapter);
56 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
57 /*int deinit_context(struct esp_adapter *adapter);*/
59 static const struct sdio_device_id esp_devices[] = {
60 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_1) },
61 { SDIO_DEVICE(ESP_VENDOR_ID, ESP_DEVICE_ID_2) },
65 static void print_capabilities(u32 cap)
67 printk(KERN_INFO "Features supported are:\n");
68 if (cap & ESP_WLAN_SDIO_SUPPORT)
69 printk(KERN_INFO "\t * WLAN\n");
70 if ((cap & ESP_BT_UART_SUPPORT) || (cap & ESP_BT_SDIO_SUPPORT)) {
71 printk(KERN_INFO "\t * BT/BLE\n");
72 if (cap & ESP_BT_UART_SUPPORT)
73 printk(KERN_INFO "\t - HCI over UART\n");
74 if (cap & ESP_BT_SDIO_SUPPORT)
75 printk(KERN_INFO "\t - HCI over SDIO\n");
77 if ((cap & ESP_BLE_ONLY_SUPPORT) && (cap & ESP_BR_EDR_ONLY_SUPPORT))
78 printk(KERN_INFO "\t - BT/BLE dual mode\n");
79 else if (cap & ESP_BLE_ONLY_SUPPORT)
80 printk(KERN_INFO "\t - BLE only\n");
81 else if (cap & ESP_BR_EDR_ONLY_SUPPORT)
82 printk(KERN_INFO "\t - BR EDR only\n");
86 static void esp_process_interrupt(struct esp_sdio_context *context, u32 int_status)
92 if (int_status & ESP_SLAVE_RX_NEW_PACKET_INT) {
93 esp_process_new_packet_intr(context->adapter);
97 static void esp_handle_isr(struct sdio_func *func)
99 struct esp_sdio_context *context = NULL;
107 context = sdio_get_drvdata(func);
113 int_status = kmalloc(sizeof(u32), GFP_ATOMIC);
119 /* Read interrupt status register */
120 ret = esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
121 (u8 *) int_status, sizeof(* int_status), ACQUIRE_LOCK);
122 CHECK_SDIO_RW_ERROR(ret);
124 esp_process_interrupt(context, *int_status);
126 /* Clear interrupt status */
127 ret = esp_write_reg(context, ESP_SLAVE_INT_CLR_REG,
128 (u8 *) int_status, sizeof(* int_status), ACQUIRE_LOCK);
129 CHECK_SDIO_RW_ERROR(ret);
134 int generate_slave_intr(struct esp_sdio_context *context, u8 data)
142 val = kmalloc(sizeof(u8), GFP_KERNEL);
150 ret = esp_write_reg(context, ESP_SLAVE_SCRATCH_REG_7, val,
151 sizeof(*val), ACQUIRE_LOCK);
158 static void deinit_sdio_func(struct sdio_func *func)
160 sdio_claim_host(func);
162 sdio_release_irq(func);
163 /* Disable sdio function */
164 sdio_disable_func(func);
165 sdio_release_host(func);
166 sdio_set_drvdata(func, NULL);
169 static int esp_slave_get_tx_buffer_num(struct esp_sdio_context *context, u32 *tx_num, u8 is_lock_needed)
174 len = kmalloc(sizeof(u32), GFP_KERNEL);
180 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8*) len, sizeof(*len), is_lock_needed);
187 *len = (*len >> 16) & ESP_TX_BUFFER_MASK;
188 *len = (*len + ESP_TX_BUFFER_MAX - context->tx_buffer_count) % ESP_TX_BUFFER_MAX;
196 static int esp_get_len_from_slave(struct esp_sdio_context *context, u32 *rx_size, u8 is_lock_needed)
202 len = kmalloc(sizeof(u32), GFP_KERNEL);
208 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
209 (u8 *) len, sizeof(*len), is_lock_needed);
216 *len &= ESP_SLAVE_LEN_MASK;
218 if (*len >= context->rx_byte_count)
219 *len = (*len + ESP_RX_BYTE_MAX - context->rx_byte_count) % ESP_RX_BYTE_MAX;
221 /* Handle a case of roll over */
222 temp = ESP_RX_BYTE_MAX - context->rx_byte_count;
225 if (*len > ESP_RX_BUFFER_SIZE) {
226 printk(KERN_INFO "%s: Len from slave[%d] exceeds max [%d]\n",
227 __func__, *len, ESP_RX_BUFFER_SIZE);
237 static void flush_sdio(struct esp_sdio_context *context)
241 if (!context || !context->adapter)
245 skb = read_packet(context->adapter);
252 printk (KERN_INFO "%s: Flushed %d bytes\n", __func__, skb->len);
257 static void esp_remove(struct sdio_func *func)
259 struct esp_sdio_context *context;
260 uint8_t prio_q_idx = 0;
261 context = sdio_get_drvdata(func);
263 printk(KERN_INFO "%s -> Remove card", __func__);
265 #ifdef CONFIG_SUPPORT_ESP_SERIAL
266 esp_serial_cleanup();
269 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
271 kthread_stop(monitor_thread);
275 kthread_stop(tx_thread);
278 generate_slave_intr(context, BIT(ESP_CLOSE_DATA_PATH));
283 if (context->adapter) {
284 esp_remove_card(context->adapter);
286 if (context->adapter->hcidev) {
287 esp_deinit_bt(context->adapter);
291 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
292 skb_queue_purge(&(sdio_context.tx_q[prio_q_idx]));
295 memset(context, 0, sizeof(struct esp_sdio_context));
298 deinit_sdio_func(func);
300 printk (KERN_INFO "%s: Context deinit %d - %d\n", __func__, context->rx_byte_count,
301 context->tx_buffer_count);
305 static struct esp_if_ops if_ops = {
307 .write = write_packet,
310 static int init_context(struct esp_sdio_context *context)
314 uint8_t prio_q_idx = 0;
320 val = kmalloc(sizeof(u32), GFP_KERNEL);
326 /* Initialize rx_byte_count */
327 ret = esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
328 (u8 *) val, sizeof(* val), ACQUIRE_LOCK);
334 context->rx_byte_count = *val & ESP_SLAVE_LEN_MASK;
336 /* Initialize tx_buffer_count */
337 ret = esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) val,
338 sizeof(* val), ACQUIRE_LOCK);
345 *val = ((*val >> 16) & ESP_TX_BUFFER_MASK);
347 if (*val >= ESP_MAX_BUF_CNT)
348 context->tx_buffer_count = (*val) - ESP_MAX_BUF_CNT;
350 context->tx_buffer_count = 0;
352 context->adapter = esp_get_adapter();
354 if (unlikely(!context->adapter))
355 printk (KERN_ERR "%s: Failed to get adapter\n", __func__);
357 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
358 skb_queue_head_init(&(sdio_context.tx_q[prio_q_idx]));
359 atomic_set(&queue_items[prio_q_idx], 0);
362 context->adapter->if_type = ESP_IF_TYPE_SDIO;
368 static struct sk_buff * read_packet(struct esp_adapter *adapter)
370 u32 len_from_slave, data_left, len_to_read, size, num_blocks;
374 struct esp_sdio_context *context;
376 if (!adapter || !adapter->if_context) {
377 printk (KERN_ERR "%s: INVALID args\n", __func__);
381 context = adapter->if_context;
383 sdio_claim_host(context->func);
385 data_left = len_to_read = len_from_slave = num_blocks = 0;
388 ret = esp_get_len_from_slave(context, &len_from_slave, LOCK_ALREADY_ACQUIRED);
390 if (ret || !len_from_slave) {
391 sdio_release_host(context->func);
395 size = ESP_BLOCK_SIZE * 4;
397 if (len_from_slave > size) {
398 printk(KERN_INFO "Rx large packet: %d\n", len_from_slave);
401 skb = esp_alloc_skb(len_from_slave);
404 printk (KERN_ERR "%s: SKB alloc failed\n", __func__);
405 sdio_release_host(context->func);
409 skb_put(skb, len_from_slave);
412 data_left = len_from_slave;
415 num_blocks = data_left/ESP_BLOCK_SIZE;
418 if (!context->rx_byte_count) {
419 start_time = ktime_get_ns();
424 len_to_read = num_blocks * ESP_BLOCK_SIZE;
425 ret = esp_read_block(context,
426 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
427 pos, len_to_read, LOCK_ALREADY_ACQUIRED);
429 len_to_read = data_left;
430 /* 4 byte aligned length */
431 ret = esp_read_block(context,
432 ESP_SLAVE_CMD53_END_ADDR - len_to_read,
433 pos, (len_to_read + 3) & (~3), LOCK_ALREADY_ACQUIRED);
437 printk (KERN_ERR "%s: Failed to read data - %d [%u - %d]\n", __func__, ret, num_blocks, len_to_read);
439 sdio_release_host(context->func);
443 data_left -= len_to_read;
445 context->rx_byte_count += len_to_read;
446 context->rx_byte_count = context->rx_byte_count % ESP_RX_BYTE_MAX;
448 } while (data_left > 0);
450 sdio_release_host(context->func);
455 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
457 u32 max_pkt_size = ESP_RX_BUFFER_SIZE;
458 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
460 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
461 printk(KERN_ERR "%s: Invalid args\n", __func__);
468 if (skb->len > max_pkt_size) {
469 printk(KERN_ERR "%s: Drop pkt of len[%u] > max SDIO transport len[%u]\n",
470 __func__, skb->len, max_pkt_size);
475 if (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT) {
481 /* Enqueue SKB in tx_q */
482 atomic_inc(&tx_pending);
484 /* Notify to process queue */
485 if (payload_header->if_type == ESP_SERIAL_IF) {
486 atomic_inc(&queue_items[PRIO_Q_SERIAL]);
487 skb_queue_tail(&(sdio_context.tx_q[PRIO_Q_SERIAL]), skb);
488 } else if (payload_header->if_type == ESP_HCI_IF) {
489 atomic_inc(&queue_items[PRIO_Q_BT]);
490 skb_queue_tail(&(sdio_context.tx_q[PRIO_Q_BT]), skb);
492 atomic_inc(&queue_items[PRIO_Q_OTHERS]);
493 skb_queue_tail(&(sdio_context.tx_q[PRIO_Q_OTHERS]), skb);
499 static int tx_process(void *data)
503 u32 buf_needed = 0, buf_available = 0;
505 u32 data_left, len_to_send, pad;
506 struct sk_buff *tx_skb = NULL;
507 struct esp_adapter *adapter = (struct esp_adapter *) data;
508 struct esp_sdio_context *context = NULL;
511 context = adapter->if_context;
513 while (!kthread_should_stop()) {
515 if (context->state != ESP_CONTEXT_READY) {
520 if (atomic_read(&queue_items[PRIO_Q_SERIAL]) > 0) {
521 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_SERIAL]));
525 atomic_dec(&queue_items[PRIO_Q_SERIAL]);
526 }else if (atomic_read(&queue_items[PRIO_Q_BT]) > 0) {
527 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_BT]));
531 atomic_dec(&queue_items[PRIO_Q_BT]);
532 } else if (atomic_read(&queue_items[PRIO_Q_OTHERS]) > 0) {
533 tx_skb = skb_dequeue(&(context->tx_q[PRIO_Q_OTHERS]));
537 atomic_dec(&queue_items[PRIO_Q_OTHERS]);
543 if (atomic_read(&tx_pending))
544 atomic_dec(&tx_pending);
546 retry = MAX_WRITE_RETRIES;
548 /* resume network tx queue if bearable load */
549 if (atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
553 buf_needed = (tx_skb->len + ESP_RX_BUFFER_SIZE - 1) / ESP_RX_BUFFER_SIZE;
556 sdio_claim_host(context->func);
558 ret = esp_slave_get_tx_buffer_num(context, &buf_available, LOCK_ALREADY_ACQUIRED);
560 if (buf_available < buf_needed) {
561 sdio_release_host(context->func);
563 /* Release SDIO and retry after delay*/
573 /* No buffer available at slave */
574 dev_kfree_skb(tx_skb);
579 data_left = len_to_send = 0;
581 data_left = tx_skb->len;
582 pad = ESP_BLOCK_SIZE - (data_left % ESP_BLOCK_SIZE);
587 block_cnt = data_left / ESP_BLOCK_SIZE;
588 len_to_send = data_left;
589 ret = esp_write_block(context, ESP_SLAVE_CMD53_END_ADDR - len_to_send,
590 pos, (len_to_send + 3) & (~3), LOCK_ALREADY_ACQUIRED);
593 printk (KERN_ERR "%s: Failed to send data: %d %d %d\n", __func__, ret, len_to_send, data_left);
594 sdio_release_host(context->func);
598 data_left -= len_to_send;
603 /* drop the packet */
604 dev_kfree_skb(tx_skb);
608 context->tx_buffer_count += buf_needed;
609 context->tx_buffer_count = context->tx_buffer_count % ESP_TX_BUFFER_MAX;
611 sdio_release_host(context->func);
612 dev_kfree_skb(tx_skb);
619 static struct esp_sdio_context * init_sdio_func(struct sdio_func *func)
621 struct esp_sdio_context *context = NULL;
627 context = &sdio_context;
629 context->func = func;
631 sdio_claim_host(func);
633 /* Enable Function */
634 ret = sdio_enable_func(func);
640 ret = sdio_claim_irq(func, esp_handle_isr);
642 sdio_disable_func(func);
646 /* Set private data */
647 sdio_set_drvdata(func, context);
649 context->state = ESP_CONTEXT_INIT;
651 sdio_release_host(func);
656 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
657 static int monitor_process(void *data)
659 u32 val, intr, len_reg, rdata, old_len = 0;
660 struct esp_sdio_context *context = (struct esp_sdio_context *) data;
663 while (!kthread_should_stop()) {
666 val = intr = len_reg = rdata = 0;
668 esp_read_reg(context, ESP_SLAVE_PACKET_LEN_REG,
669 (u8 *) &val, sizeof(val), ACQUIRE_LOCK);
671 len_reg = val & ESP_SLAVE_LEN_MASK;
674 esp_read_reg(context, ESP_SLAVE_TOKEN_RDATA, (u8 *) &val,
675 sizeof(val), ACQUIRE_LOCK);
677 rdata = ((val >> 16) & ESP_TX_BUFFER_MASK);
679 esp_read_reg(context, ESP_SLAVE_INT_ST_REG,
680 (u8 *) &intr, sizeof(intr), ACQUIRE_LOCK);
683 if (len_reg > context->rx_byte_count) {
684 if (old_len && (context->rx_byte_count == old_len)) {
685 printk (KERN_DEBUG "Monitor thread ----> [%d - %d] [%d - %d] %d\n",
686 len_reg, context->rx_byte_count,
687 rdata, context->tx_buffer_count, intr);
689 skb = read_packet(context->adapter);
695 printk (KERN_DEBUG "%s: Flushed %d bytes\n", __func__, skb->len);
697 /* drop the packet */
702 old_len = context->rx_byte_count;
710 static int esp_probe(struct sdio_func *func,
711 const struct sdio_device_id *id)
713 struct esp_sdio_context *context = NULL;
716 if (func->num != 1) {
720 printk(KERN_INFO "%s: ESP network device detected\n", __func__);
722 context = init_sdio_func(func);
728 atomic_set(&tx_pending, 0);
729 ret = init_context(context);
731 deinit_sdio_func(func);
735 tx_thread = kthread_run(tx_process, context->adapter, "esp32_TX");
738 printk (KERN_ERR "Failed to create esp32_sdio TX thread\n");
740 #ifdef CONFIG_SUPPORT_ESP_SERIAL
741 ret = esp_serial_init((void *) context->adapter);
744 printk(KERN_ERR "Error initialising serial interface\n");
749 ret = esp_add_card(context->adapter);
752 printk (KERN_ERR "Failed to add card\n");
753 deinit_sdio_func(func);
759 context->state = ESP_CONTEXT_READY;
761 #ifdef CONFIG_ENABLE_MONITOR_PROCESS
762 monitor_thread = kthread_run(monitor_process, context, "Monitor process");
765 printk (KERN_ERR "Failed to create monitor thread\n");
768 generate_slave_intr(context, BIT(ESP_OPEN_DATA_PATH));
772 /* SDIO driver structure to be registered with kernel */
773 static struct sdio_driver esp_sdio_driver = {
775 .id_table = esp_devices,
777 .remove = esp_remove,
780 int esp_init_interface_layer(struct esp_adapter *adapter)
785 adapter->if_context = &sdio_context;
786 adapter->if_ops = &if_ops;
787 sdio_context.adapter = adapter;
789 return sdio_register_driver(&esp_sdio_driver);
792 void process_init_event(u8 *evt_buf, u8 len)
794 u8 len_left = len, tag_len;
803 tag_len = *(pos + 1);
804 printk(KERN_INFO "EVENT: %d\n", *pos);
805 if (*pos == ESP_PRIV_CAPABILITY) {
806 process_capabilities(*(pos + 2));
807 print_capabilities(*(pos + 2));
809 printk (KERN_WARNING "Unsupported tag in event");
812 len_left -= (tag_len+2);
816 void esp_deinit_interface_layer(void)
818 sdio_unregister_driver(&esp_sdio_driver);