1 // SPDX-License-Identifier: GPL-2.0-only
3 * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
7 #include <linux/spi/spi.h>
8 #include <linux/gpio.h>
9 #include <linux/mutex.h>
10 #include <linux/delay.h>
14 #include "esp_bt_api.h"
15 #include "esp_kernel_port.h"
16 #include "esp_stats.h"
17 #include "esp_utils.h"
18 #include "esp_cfg80211.h"
20 #define SPI_INITIAL_CLK_MHZ 10
21 #define TX_MAX_PENDING_COUNT 100
22 #define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
24 extern u32 raw_tp_mode;
25 static struct sk_buff *read_packet(struct esp_adapter *adapter);
26 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
27 static void spi_exit(void);
28 static int spi_init(void);
29 static void adjust_spi_clock(u8 spi_clk_mhz);
31 volatile u8 data_path;
32 volatile u8 host_sleep;
33 static struct esp_spi_context spi_context;
34 static char hardware_type = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
35 static atomic_t tx_pending;
37 static struct esp_if_ops if_ops = {
39 .write = write_packet,
42 static DEFINE_MUTEX(spi_lock);
44 static void open_data_path(void)
46 atomic_set(&tx_pending, 0);
48 data_path = OPEN_DATAPATH;
51 static void close_data_path(void)
53 data_path = CLOSE_DATAPATH;
57 static irqreturn_t spi_data_ready_interrupt_handler(int irq, void *dev)
59 /* ESP peripheral has queued buffer for transmission */
60 if (spi_context.spi_workqueue)
61 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
66 static irqreturn_t spi_interrupt_handler(int irq, void *dev)
68 /* ESP peripheral is ready for next SPI transaction */
69 if (spi_context.spi_workqueue)
70 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
75 static struct sk_buff *read_packet(struct esp_adapter *adapter)
77 struct esp_spi_context *context;
78 struct sk_buff *skb = NULL;
84 if (!adapter || !adapter->if_context) {
85 esp_err("Invalid args\n");
89 context = adapter->if_context;
91 if (context->esp_spi_dev) {
92 skb = skb_dequeue(&(context->rx_q[PRIO_Q_HIGH]));
94 skb = skb_dequeue(&(context->rx_q[PRIO_Q_MID]));
96 skb = skb_dequeue(&(context->rx_q[PRIO_Q_LOW]));
98 esp_err("Invalid args\n");
105 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
107 u32 max_pkt_size = SPI_BUF_SIZE - sizeof(struct esp_payload_header);
108 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
109 struct esp_skb_cb *cb = NULL;
111 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
112 esp_err("Invalid args\n");
120 if (skb->len > max_pkt_size) {
121 esp_err("Drop pkt of len[%u] > max spi transport len[%u]\n",
122 skb->len, max_pkt_size);
128 esp_info("%u datapath closed\n", __LINE__);
133 cb = (struct esp_skb_cb *)skb->cb;
134 if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
135 esp_tx_pause(cb->priv);
138 /*esp_err("TX Pause busy");*/
139 if (spi_context.spi_workqueue)
140 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
144 /* Enqueue SKB in tx_q */
145 if (payload_header->if_type == ESP_INTERNAL_IF) {
146 skb_queue_tail(&spi_context.tx_q[PRIO_Q_HIGH], skb);
147 } else if (payload_header->if_type == ESP_HCI_IF) {
148 skb_queue_tail(&spi_context.tx_q[PRIO_Q_MID], skb);
150 skb_queue_tail(&spi_context.tx_q[PRIO_Q_LOW], skb);
151 atomic_inc(&tx_pending);
154 if (spi_context.spi_workqueue)
155 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
160 int esp_validate_chipset(struct esp_adapter *adapter, u8 chipset)
165 case ESP_FIRMWARE_CHIP_ESP32:
166 case ESP_FIRMWARE_CHIP_ESP32S2:
167 case ESP_FIRMWARE_CHIP_ESP32S3:
168 case ESP_FIRMWARE_CHIP_ESP32C2:
169 case ESP_FIRMWARE_CHIP_ESP32C3:
170 case ESP_FIRMWARE_CHIP_ESP32C6:
171 adapter->chipset = chipset;
172 esp_info("Chipset=%s ID=%02x detected over SPI\n", esp_chipname_from_id(chipset), chipset);
175 esp_err("Unrecognized chipset ID=%02x\n", chipset);
176 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
183 int esp_deinit_module(struct esp_adapter *adapter)
185 /* Second & onward bootup cleanup:
187 * SPI is software and not a hardware based module.
188 * When bootup event is received, we should discard all prior commands,
189 * old messages pending at network and re-initialize everything.
191 uint8_t prio_q_idx, iface_idx;
193 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
194 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
197 for (iface_idx = 0; iface_idx < ESP_MAX_INTERFACE; iface_idx++) {
198 struct esp_wifi_device *priv = adapter->priv[iface_idx];
199 esp_mark_scan_done_and_disconnect(priv, true);
202 esp_remove_card(adapter);
204 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
205 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
211 static int process_rx_buf(struct sk_buff *skb)
213 struct esp_payload_header *header;
220 header = (struct esp_payload_header *) skb->data;
222 if (header->if_type >= ESP_MAX_IF) {
226 offset = le16_to_cpu(header->offset);
228 /* Validate received SKB. Check len and offset fields */
229 if (offset != sizeof(struct esp_payload_header)) {
233 len = le16_to_cpu(header->len);
238 len += sizeof(struct esp_payload_header);
240 if (len > SPI_BUF_SIZE) {
244 /* Trim SKB to actual size */
249 /*esp_info("%u datapath closed\n", __LINE__);*/
253 /* enqueue skb for read_packet to pick it */
254 if (header->if_type == ESP_INTERNAL_IF)
255 skb_queue_tail(&spi_context.rx_q[PRIO_Q_HIGH], skb);
256 else if (header->if_type == ESP_HCI_IF)
257 skb_queue_tail(&spi_context.rx_q[PRIO_Q_MID], skb);
259 skb_queue_tail(&spi_context.rx_q[PRIO_Q_LOW], skb);
261 /* indicate reception of new packet */
262 esp_process_new_packet_intr(spi_context.adapter);
267 static void esp_spi_work(struct work_struct *work)
269 struct spi_transfer trans;
270 struct sk_buff *tx_skb = NULL, *rx_skb = NULL;
271 struct esp_skb_cb *cb = NULL;
274 volatile int trans_ready, rx_pending;
276 mutex_lock(&spi_lock);
278 trans_ready = gpio_get_value(HANDSHAKE_PIN);
279 rx_pending = gpio_get_value(SPI_DATA_READY_PIN);
283 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
285 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
287 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
289 if (atomic_read(&tx_pending))
290 atomic_dec(&tx_pending);
292 /* resume network tx queue if bearable load */
293 cb = (struct esp_skb_cb *)tx_skb->cb;
294 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
295 esp_tx_resume(cb->priv);
297 if (raw_tp_mode != 0) {
298 esp_raw_tp_queue_resume();
305 if (rx_pending || tx_skb) {
306 memset(&trans, 0, sizeof(trans));
307 trans.speed_hz = spi_context.spi_clk_mhz * NUMBER_1M;
309 /* Setup and execute SPI transaction
310 * Tx_buf: Check if tx_q has valid buffer for transmission,
313 * Rx_buf: Allocate memory for incoming data. This will be freed
314 * immediately if received buffer is invalid.
315 * If it is a valid buffer, upper layer will free it.
318 /* Configure TX buffer if available */
321 trans.tx_buf = tx_skb->data;
322 /*print_hex_dump(KERN_ERR, "tx: ", DUMP_PREFIX_ADDRESS, 16, 1, trans.tx_buf, 32, 1);*/
324 tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
325 trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
326 memset((void *)trans.tx_buf, 0, SPI_BUF_SIZE);
329 /* Configure RX buffer */
330 rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
331 rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);
333 memset(rx_buf, 0, SPI_BUF_SIZE);
335 trans.rx_buf = rx_buf;
336 trans.len = SPI_BUF_SIZE;
338 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
339 if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
344 ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
346 esp_err("SPI Transaction failed: %d", ret);
347 dev_kfree_skb(rx_skb);
348 dev_kfree_skb(tx_skb);
351 /* Free rx_skb if received data is not valid */
352 if (process_rx_buf(rx_skb)) {
353 dev_kfree_skb(rx_skb);
357 dev_kfree_skb(tx_skb);
362 mutex_unlock(&spi_lock);
365 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
366 #include <linux/platform_device.h>
367 static int __spi_controller_match(struct device *dev, const void *data)
369 struct spi_controller *ctlr;
370 const u16 *bus_num = data;
372 ctlr = container_of(dev, struct spi_controller, dev);
378 return ctlr->bus_num == *bus_num;
381 static struct spi_controller *spi_busnum_to_master(u16 bus_num)
383 struct platform_device *pdev = NULL;
384 struct spi_master *master = NULL;
385 struct spi_controller *ctlr = NULL;
386 struct device *dev = NULL;
388 pdev = platform_device_alloc("pdev", PLATFORM_DEVID_NONE);
389 pdev->num_resources = 0;
390 platform_device_add(pdev);
392 master = spi_alloc_master(&pdev->dev, sizeof(void *));
394 pr_err("Error: failed to allocate SPI master device\n");
395 platform_device_del(pdev);
396 platform_device_put(pdev);
400 dev = class_find_device(master->dev.class, NULL, &bus_num, __spi_controller_match);
402 ctlr = container_of(dev, struct spi_controller, dev);
405 spi_master_put(master);
406 platform_device_del(pdev);
407 platform_device_put(pdev);
413 static int spi_dev_init(int spi_clk_mhz)
416 struct spi_board_info esp_board = {{0}};
417 struct spi_master *master = NULL;
419 strscpy(esp_board.modalias, "esp_spi", sizeof(esp_board.modalias));
420 esp_board.mode = SPI_MODE_2;
421 esp_board.max_speed_hz = spi_clk_mhz * NUMBER_1M;
422 esp_board.bus_num = 0;
423 esp_board.chip_select = 0;
425 master = spi_busnum_to_master(esp_board.bus_num);
427 esp_err("Failed to obtain SPI master handle\n");
431 spi_context.esp_spi_dev = spi_new_device(master, &esp_board);
433 if (!spi_context.esp_spi_dev) {
434 esp_err("Failed to add new SPI device\n");
438 status = spi_setup(spi_context.esp_spi_dev);
441 esp_err("Failed to setup new SPI device");
445 esp_info("ESP32 peripheral is registered to SPI bus [%d],chip select [%d], SPI Clock [%d]\n",
447 esp_board.chip_select, spi_clk_mhz);
449 status = gpio_request(HANDSHAKE_PIN, "SPI_HANDSHAKE_PIN");
452 esp_err("Failed to obtain GPIO for Handshake pin, err:%d\n", status);
456 status = gpio_direction_input(HANDSHAKE_PIN);
459 gpio_free(HANDSHAKE_PIN);
460 esp_err("Failed to set GPIO direction of Handshake pin, err: %d\n", status);
464 status = request_irq(SPI_IRQ, spi_interrupt_handler,
465 IRQF_SHARED | IRQF_TRIGGER_RISING,
466 "ESP_SPI", spi_context.esp_spi_dev);
468 gpio_free(HANDSHAKE_PIN);
469 esp_err("Failed to request IRQ for Handshake pin, err:%d\n", status);
473 status = gpio_request(SPI_DATA_READY_PIN, "SPI_DATA_READY_PIN");
475 gpio_free(HANDSHAKE_PIN);
476 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
477 esp_err("Failed to obtain GPIO for Data ready pin, err:%d\n", status);
481 status = gpio_direction_input(SPI_DATA_READY_PIN);
483 gpio_free(HANDSHAKE_PIN);
484 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
485 gpio_free(SPI_DATA_READY_PIN);
486 esp_err("Failed to set GPIO direction of Data ready pin\n");
490 status = request_irq(SPI_DATA_READY_IRQ, spi_data_ready_interrupt_handler,
491 IRQF_SHARED | IRQF_TRIGGER_RISING,
492 "ESP_SPI_DATA_READY", spi_context.esp_spi_dev);
494 gpio_free(HANDSHAKE_PIN);
495 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
496 gpio_free(SPI_DATA_READY_PIN);
497 esp_err("Failed to request IRQ for Data ready pin, err:%d\n", status);
500 spi_context.spi_gpio_enabled = 1;
507 static int spi_init(void)
510 uint8_t prio_q_idx = 0;
511 struct esp_adapter *adapter;
513 spi_context.spi_workqueue = create_workqueue("ESP_SPI_WORK_QUEUE");
515 if (!spi_context.spi_workqueue) {
516 esp_err("spi workqueue failed to create\n");
521 INIT_WORK(&spi_context.spi_work, esp_spi_work);
523 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
524 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
525 skb_queue_head_init(&spi_context.rx_q[prio_q_idx]);
528 status = spi_dev_init(spi_context.spi_clk_mhz);
531 esp_err("Failed Init SPI device\n");
535 adapter = spi_context.adapter;
542 adapter->dev = &spi_context.esp_spi_dev->dev;
547 static void spi_exit(void)
549 uint8_t prio_q_idx = 0;
551 disable_irq(SPI_IRQ);
552 disable_irq(SPI_DATA_READY_IRQ);
556 for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
557 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
558 skb_queue_purge(&spi_context.rx_q[prio_q_idx]);
561 if (spi_context.spi_workqueue) {
562 flush_scheduled_work();
563 destroy_workqueue(spi_context.spi_workqueue);
564 spi_context.spi_workqueue = NULL;
567 esp_remove_card(spi_context.adapter);
569 if (spi_context.adapter->hcidev)
570 esp_deinit_bt(spi_context.adapter);
572 if (spi_context.spi_gpio_enabled) {
573 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
574 free_irq(SPI_DATA_READY_IRQ, spi_context.esp_spi_dev);
576 gpio_free(HANDSHAKE_PIN);
577 gpio_free(SPI_DATA_READY_PIN);
580 if (spi_context.esp_spi_dev)
581 spi_unregister_device(spi_context.esp_spi_dev);
583 memset(&spi_context, 0, sizeof(spi_context));
586 static void adjust_spi_clock(u8 spi_clk_mhz)
588 if ((spi_clk_mhz) && (spi_clk_mhz != spi_context.spi_clk_mhz)) {
589 esp_info("ESP Reconfigure SPI CLK to %u MHz\n", spi_clk_mhz);
590 spi_context.spi_clk_mhz = spi_clk_mhz;
591 spi_context.esp_spi_dev->max_speed_hz = spi_clk_mhz * NUMBER_1M;
595 int esp_adjust_spi_clock(struct esp_adapter *adapter, u8 spi_clk_mhz)
597 adjust_spi_clock(spi_clk_mhz);
602 int esp_init_interface_layer(struct esp_adapter *adapter, u32 speed)
607 memset(&spi_context, 0, sizeof(spi_context));
609 adapter->if_context = &spi_context;
610 adapter->if_ops = &if_ops;
611 adapter->if_type = ESP_IF_TYPE_SPI;
612 spi_context.adapter = adapter;
614 spi_context.spi_clk_mhz = speed;
616 spi_context.spi_clk_mhz = SPI_INITIAL_CLK_MHZ;
621 void esp_deinit_interface_layer(void)