1 // SPDX-License-Identifier: GPL-2.0-only
3 * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
7 #include <linux/spi/spi.h>
8 #include <linux/gpio.h>
9 #include <linux/mutex.h>
10 #include <linux/delay.h>
14 #include "esp_bt_api.h"
15 #include "esp_kernel_port.h"
16 #include "esp_stats.h"
17 #include "esp_utils.h"
18 #include "esp_cfg80211.h"
20 #define SPI_INITIAL_CLK_MHZ 10
21 #define TX_MAX_PENDING_COUNT 100
22 #define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
24 static struct sk_buff *read_packet(struct esp_adapter *adapter);
25 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
26 static void spi_exit(void);
27 static int spi_init(void);
28 static void adjust_spi_clock(u8 spi_clk_mhz);
30 volatile u8 data_path;
31 volatile u8 host_sleep;
32 static struct esp_spi_context spi_context;
33 static char hardware_type = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
34 static atomic_t tx_pending;
36 static struct esp_if_ops if_ops = {
38 .write = write_packet,
41 static DEFINE_MUTEX(spi_lock);
43 static void open_data_path(void)
45 atomic_set(&tx_pending, 0);
47 data_path = OPEN_DATAPATH;
50 static void close_data_path(void)
52 data_path = CLOSE_DATAPATH;
56 static irqreturn_t spi_data_ready_interrupt_handler(int irq, void *dev)
58 /* ESP peripheral has queued buffer for transmission */
59 if (spi_context.spi_workqueue)
60 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
65 static irqreturn_t spi_interrupt_handler(int irq, void *dev)
67 /* ESP peripheral is ready for next SPI transaction */
68 if (spi_context.spi_workqueue)
69 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
74 static struct sk_buff *read_packet(struct esp_adapter *adapter)
76 struct esp_spi_context *context;
77 struct sk_buff *skb = NULL;
83 if (!adapter || !adapter->if_context) {
84 esp_err("Invalid args\n");
88 context = adapter->if_context;
90 if (context->esp_spi_dev) {
91 skb = skb_dequeue(&(context->rx_q[PRIO_Q_HIGH]));
93 skb = skb_dequeue(&(context->rx_q[PRIO_Q_MID]));
95 skb = skb_dequeue(&(context->rx_q[PRIO_Q_LOW]));
97 esp_err("Invalid args\n");
104 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
106 u32 max_pkt_size = SPI_BUF_SIZE - sizeof(struct esp_payload_header);
107 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
108 struct esp_skb_cb *cb = NULL;
110 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
111 esp_err("Invalid args\n");
119 if (skb->len > max_pkt_size) {
120 esp_err("Drop pkt of len[%u] > max spi transport len[%u]\n",
121 skb->len, max_pkt_size);
127 esp_info("%u datapath closed\n", __LINE__);
132 cb = (struct esp_skb_cb *)skb->cb;
133 if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
134 esp_tx_pause(cb->priv);
137 /*esp_err("TX Pause busy");*/
138 if (spi_context.spi_workqueue)
139 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
143 /* Enqueue SKB in tx_q */
144 if (payload_header->if_type == ESP_INTERNAL_IF) {
145 skb_queue_tail(&spi_context.tx_q[PRIO_Q_HIGH], skb);
146 } else if (payload_header->if_type == ESP_HCI_IF) {
147 skb_queue_tail(&spi_context.tx_q[PRIO_Q_MID], skb);
149 skb_queue_tail(&spi_context.tx_q[PRIO_Q_LOW], skb);
150 atomic_inc(&tx_pending);
153 if (spi_context.spi_workqueue)
154 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
159 int esp_validate_chipset(struct esp_adapter *adapter, u8 chipset)
164 case ESP_FIRMWARE_CHIP_ESP32:
165 case ESP_FIRMWARE_CHIP_ESP32S2:
166 case ESP_FIRMWARE_CHIP_ESP32S3:
167 case ESP_FIRMWARE_CHIP_ESP32C2:
168 case ESP_FIRMWARE_CHIP_ESP32C3:
169 case ESP_FIRMWARE_CHIP_ESP32C6:
170 adapter->chipset = chipset;
171 esp_info("Chipset=%s ID=%02x detected over SPI\n", esp_chipname_from_id(chipset), chipset);
174 esp_err("Unrecognized chipset ID=%02x\n", chipset);
175 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
182 int esp_deinit_module(struct esp_adapter *adapter)
184 /* Second & onward bootup cleanup:
186 * SPI is software and not a hardware based module.
187 * When bootup event is received, we should discard all prior commands,
188 * old messages pending at network and re-initialize everything.
190 uint8_t prio_q_idx, iface_idx;
192 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
193 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
196 for (iface_idx = 0; iface_idx < ESP_MAX_INTERFACE; iface_idx++) {
197 struct esp_wifi_device *priv = adapter->priv[iface_idx];
198 esp_mark_scan_done_and_disconnect(priv, true);
201 esp_remove_card(adapter);
203 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
204 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
210 static int process_rx_buf(struct sk_buff *skb)
212 struct esp_payload_header *header;
219 header = (struct esp_payload_header *) skb->data;
221 if (header->if_type >= ESP_MAX_IF) {
225 offset = le16_to_cpu(header->offset);
227 /* Validate received SKB. Check len and offset fields */
228 if (offset != sizeof(struct esp_payload_header)) {
232 len = le16_to_cpu(header->len);
237 len += sizeof(struct esp_payload_header);
239 if (len > SPI_BUF_SIZE) {
243 /* Trim SKB to actual size */
248 /*esp_info("%u datapath closed\n", __LINE__);*/
252 /* enqueue skb for read_packet to pick it */
253 if (header->if_type == ESP_INTERNAL_IF)
254 skb_queue_tail(&spi_context.rx_q[PRIO_Q_HIGH], skb);
255 else if (header->if_type == ESP_HCI_IF)
256 skb_queue_tail(&spi_context.rx_q[PRIO_Q_MID], skb);
258 skb_queue_tail(&spi_context.rx_q[PRIO_Q_LOW], skb);
260 /* indicate reception of new packet */
261 esp_process_new_packet_intr(spi_context.adapter);
266 static void esp_spi_work(struct work_struct *work)
268 struct spi_transfer trans;
269 struct sk_buff *tx_skb = NULL, *rx_skb = NULL;
270 struct esp_skb_cb *cb = NULL;
273 volatile int trans_ready, rx_pending;
275 mutex_lock(&spi_lock);
277 trans_ready = gpio_get_value(HANDSHAKE_PIN);
278 rx_pending = gpio_get_value(SPI_DATA_READY_PIN);
282 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
284 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
286 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
288 if (atomic_read(&tx_pending))
289 atomic_dec(&tx_pending);
291 /* resume network tx queue if bearable load */
292 cb = (struct esp_skb_cb *)tx_skb->cb;
293 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
294 esp_tx_resume(cb->priv);
296 esp_raw_tp_queue_resume();
302 if (rx_pending || tx_skb) {
303 memset(&trans, 0, sizeof(trans));
304 trans.speed_hz = spi_context.spi_clk_mhz * NUMBER_1M;
306 /* Setup and execute SPI transaction
307 * Tx_buf: Check if tx_q has valid buffer for transmission,
310 * Rx_buf: Allocate memory for incoming data. This will be freed
311 * immediately if received buffer is invalid.
312 * If it is a valid buffer, upper layer will free it.
315 /* Configure TX buffer if available */
318 trans.tx_buf = tx_skb->data;
319 /*print_hex_dump(KERN_ERR, "tx: ", DUMP_PREFIX_ADDRESS, 16, 1, trans.tx_buf, 32, 1);*/
321 tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
322 trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
323 memset((void *)trans.tx_buf, 0, SPI_BUF_SIZE);
326 /* Configure RX buffer */
327 rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
328 rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);
330 memset(rx_buf, 0, SPI_BUF_SIZE);
332 trans.rx_buf = rx_buf;
333 trans.len = SPI_BUF_SIZE;
335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
336 if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
341 ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
343 esp_err("SPI Transaction failed: %d", ret);
344 dev_kfree_skb(rx_skb);
345 dev_kfree_skb(tx_skb);
348 /* Free rx_skb if received data is not valid */
349 if (process_rx_buf(rx_skb)) {
350 dev_kfree_skb(rx_skb);
354 dev_kfree_skb(tx_skb);
359 mutex_unlock(&spi_lock);
362 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
363 #include <linux/platform_device.h>
364 static int __spi_controller_match(struct device *dev, const void *data)
366 struct spi_controller *ctlr;
367 const u16 *bus_num = data;
369 ctlr = container_of(dev, struct spi_controller, dev);
375 return ctlr->bus_num == *bus_num;
378 static struct spi_controller *spi_busnum_to_master(u16 bus_num)
380 struct platform_device *pdev = NULL;
381 struct spi_master *master = NULL;
382 struct spi_controller *ctlr = NULL;
383 struct device *dev = NULL;
385 pdev = platform_device_alloc("pdev", PLATFORM_DEVID_NONE);
386 pdev->num_resources = 0;
387 platform_device_add(pdev);
389 master = spi_alloc_master(&pdev->dev, sizeof(void *));
391 pr_err("Error: failed to allocate SPI master device\n");
392 platform_device_del(pdev);
393 platform_device_put(pdev);
397 dev = class_find_device(master->dev.class, NULL, &bus_num, __spi_controller_match);
399 ctlr = container_of(dev, struct spi_controller, dev);
402 spi_master_put(master);
403 platform_device_del(pdev);
404 platform_device_put(pdev);
410 static int spi_dev_init(int spi_clk_mhz)
413 struct spi_board_info esp_board = {{0}};
414 struct spi_master *master = NULL;
416 strlcpy(esp_board.modalias, "esp_spi", sizeof(esp_board.modalias));
417 esp_board.mode = SPI_MODE_2;
418 esp_board.max_speed_hz = spi_clk_mhz * NUMBER_1M;
419 esp_board.bus_num = 0;
420 esp_board.chip_select = 0;
422 master = spi_busnum_to_master(esp_board.bus_num);
424 esp_err("Failed to obtain SPI master handle\n");
428 spi_context.esp_spi_dev = spi_new_device(master, &esp_board);
430 if (!spi_context.esp_spi_dev) {
431 esp_err("Failed to add new SPI device\n");
435 status = spi_setup(spi_context.esp_spi_dev);
438 esp_err("Failed to setup new SPI device");
442 esp_info("ESP32 peripheral is registered to SPI bus [%d],chip select [%d], SPI Clock [%d]\n",
444 esp_board.chip_select, spi_clk_mhz);
446 status = gpio_request(HANDSHAKE_PIN, "SPI_HANDSHAKE_PIN");
449 esp_err("Failed to obtain GPIO for Handshake pin, err:%d\n", status);
453 status = gpio_direction_input(HANDSHAKE_PIN);
456 gpio_free(HANDSHAKE_PIN);
457 esp_err("Failed to set GPIO direction of Handshake pin, err: %d\n", status);
461 status = request_irq(SPI_IRQ, spi_interrupt_handler,
462 IRQF_SHARED | IRQF_TRIGGER_RISING,
463 "ESP_SPI", spi_context.esp_spi_dev);
465 gpio_free(HANDSHAKE_PIN);
466 esp_err("Failed to request IRQ for Handshake pin, err:%d\n", status);
470 status = gpio_request(SPI_DATA_READY_PIN, "SPI_DATA_READY_PIN");
472 gpio_free(HANDSHAKE_PIN);
473 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
474 esp_err("Failed to obtain GPIO for Data ready pin, err:%d\n", status);
478 status = gpio_direction_input(SPI_DATA_READY_PIN);
480 gpio_free(HANDSHAKE_PIN);
481 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
482 gpio_free(SPI_DATA_READY_PIN);
483 esp_err("Failed to set GPIO direction of Data ready pin\n");
487 status = request_irq(SPI_DATA_READY_IRQ, spi_data_ready_interrupt_handler,
488 IRQF_SHARED | IRQF_TRIGGER_RISING,
489 "ESP_SPI_DATA_READY", spi_context.esp_spi_dev);
491 gpio_free(HANDSHAKE_PIN);
492 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
493 gpio_free(SPI_DATA_READY_PIN);
494 esp_err("Failed to request IRQ for Data ready pin, err:%d\n", status);
497 spi_context.spi_gpio_enabled = 1;
504 static int spi_init(void)
507 uint8_t prio_q_idx = 0;
508 struct esp_adapter *adapter;
510 spi_context.spi_workqueue = create_workqueue("ESP_SPI_WORK_QUEUE");
512 if (!spi_context.spi_workqueue) {
513 esp_err("spi workqueue failed to create\n");
518 INIT_WORK(&spi_context.spi_work, esp_spi_work);
520 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
521 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
522 skb_queue_head_init(&spi_context.rx_q[prio_q_idx]);
525 status = spi_dev_init(spi_context.spi_clk_mhz);
528 esp_err("Failed Init SPI device\n");
532 adapter = spi_context.adapter;
539 adapter->dev = &spi_context.esp_spi_dev->dev;
544 static void spi_exit(void)
546 uint8_t prio_q_idx = 0;
548 disable_irq(SPI_IRQ);
549 disable_irq(SPI_DATA_READY_IRQ);
553 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
554 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
555 skb_queue_purge(&spi_context.rx_q[prio_q_idx]);
558 if (spi_context.spi_workqueue) {
559 flush_scheduled_work();
560 destroy_workqueue(spi_context.spi_workqueue);
561 spi_context.spi_workqueue = NULL;
564 esp_remove_card(spi_context.adapter);
566 if (spi_context.adapter->hcidev)
567 esp_deinit_bt(spi_context.adapter);
569 if (spi_context.spi_gpio_enabled) {
570 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
571 free_irq(SPI_DATA_READY_IRQ, spi_context.esp_spi_dev);
573 gpio_free(HANDSHAKE_PIN);
574 gpio_free(SPI_DATA_READY_PIN);
577 if (spi_context.esp_spi_dev)
578 spi_unregister_device(spi_context.esp_spi_dev);
580 memset(&spi_context, 0, sizeof(spi_context));
583 static void adjust_spi_clock(u8 spi_clk_mhz)
585 if ((spi_clk_mhz) && (spi_clk_mhz != spi_context.spi_clk_mhz)) {
586 esp_info("ESP Reconfigure SPI CLK to %u MHz\n", spi_clk_mhz);
587 spi_context.spi_clk_mhz = spi_clk_mhz;
588 spi_context.esp_spi_dev->max_speed_hz = spi_clk_mhz * NUMBER_1M;
592 int esp_adjust_spi_clock(struct esp_adapter *adapter, u8 spi_clk_mhz)
594 adjust_spi_clock(spi_clk_mhz);
599 int esp_init_interface_layer(struct esp_adapter *adapter, u32 speed)
604 memset(&spi_context, 0, sizeof(spi_context));
606 adapter->if_context = &spi_context;
607 adapter->if_ops = &if_ops;
608 adapter->if_type = ESP_IF_TYPE_SPI;
609 spi_context.adapter = adapter;
611 spi_context.spi_clk_mhz = speed;
613 spi_context.spi_clk_mhz = SPI_INITIAL_CLK_MHZ;
618 void esp_deinit_interface_layer(void)