]> Git Repo - esp-hosted.git/blob - esp_hosted_ng/host/spi/esp_spi.c
fix(esp_hosted_ng): Substitute strlcpy() with strscpy()
[esp-hosted.git] / esp_hosted_ng / host / spi / esp_spi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
4  *
5  */
6 #include "utils.h"
7 #include <linux/spi/spi.h>
8 #include <linux/gpio.h>
9 #include <linux/mutex.h>
10 #include <linux/delay.h>
11 #include "esp_spi.h"
12 #include "esp_if.h"
13 #include "esp_api.h"
14 #include "esp_bt_api.h"
15 #include "esp_kernel_port.h"
16 #include "esp_stats.h"
17 #include "esp_utils.h"
18 #include "esp_cfg80211.h"
19
20 #define SPI_INITIAL_CLK_MHZ     10
21 #define TX_MAX_PENDING_COUNT    100
22 #define TX_RESUME_THRESHOLD     (TX_MAX_PENDING_COUNT/5)
23
24 extern u32 raw_tp_mode;
25 static struct sk_buff *read_packet(struct esp_adapter *adapter);
26 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
27 static void spi_exit(void);
28 static int spi_init(void);
29 static void adjust_spi_clock(u8 spi_clk_mhz);
30
31 volatile u8 data_path;
32 volatile u8 host_sleep;
33 static struct esp_spi_context spi_context;
34 static char hardware_type = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
35 static atomic_t tx_pending;
36
37 static struct esp_if_ops if_ops = {
38         .read           = read_packet,
39         .write          = write_packet,
40 };
41
42 static DEFINE_MUTEX(spi_lock);
43
44 static void open_data_path(void)
45 {
46         atomic_set(&tx_pending, 0);
47         msleep(200);
48         data_path = OPEN_DATAPATH;
49 }
50
51 static void close_data_path(void)
52 {
53         data_path = CLOSE_DATAPATH;
54         msleep(200);
55 }
56
57 static irqreturn_t spi_data_ready_interrupt_handler(int irq, void *dev)
58 {
59         /* ESP peripheral has queued buffer for transmission */
60         if (spi_context.spi_workqueue)
61                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
62
63         return IRQ_HANDLED;
64  }
65
66 static irqreturn_t spi_interrupt_handler(int irq, void *dev)
67 {
68         /* ESP peripheral is ready for next SPI transaction */
69         if (spi_context.spi_workqueue)
70                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
71
72         return IRQ_HANDLED;
73 }
74
75 static struct sk_buff *read_packet(struct esp_adapter *adapter)
76 {
77         struct esp_spi_context *context;
78         struct sk_buff *skb = NULL;
79
80         if (!data_path) {
81                 return NULL;
82         }
83
84         if (!adapter || !adapter->if_context) {
85                 esp_err("Invalid args\n");
86                 return NULL;
87         }
88
89         context = adapter->if_context;
90
91         if (context->esp_spi_dev) {
92                 skb = skb_dequeue(&(context->rx_q[PRIO_Q_HIGH]));
93                 if (!skb)
94                         skb = skb_dequeue(&(context->rx_q[PRIO_Q_MID]));
95                 if (!skb)
96                         skb = skb_dequeue(&(context->rx_q[PRIO_Q_LOW]));
97         } else {
98                 esp_err("Invalid args\n");
99                 return NULL;
100         }
101
102         return skb;
103 }
104
105 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
106 {
107         u32 max_pkt_size = SPI_BUF_SIZE - sizeof(struct esp_payload_header);
108         struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
109         struct esp_skb_cb *cb = NULL;
110
111         if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
112                 esp_err("Invalid args\n");
113                 if (skb) {
114                         dev_kfree_skb(skb);
115                         skb = NULL;
116                 }
117                 return -EINVAL;
118         }
119
120         if (skb->len > max_pkt_size) {
121                 esp_err("Drop pkt of len[%u] > max spi transport len[%u]\n",
122                                 skb->len, max_pkt_size);
123                 dev_kfree_skb(skb);
124                 return -EPERM;
125         }
126
127         if (!data_path) {
128                 esp_info("%u datapath closed\n", __LINE__);
129                 dev_kfree_skb(skb);
130                 return -EPERM;
131         }
132
133         cb = (struct esp_skb_cb *)skb->cb;
134         if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
135                 esp_tx_pause(cb->priv);
136                 dev_kfree_skb(skb);
137                 skb = NULL;
138                 /*esp_err("TX Pause busy");*/
139                 if (spi_context.spi_workqueue)
140                         queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
141                 return -EBUSY;
142         }
143
144         /* Enqueue SKB in tx_q */
145         if (payload_header->if_type == ESP_INTERNAL_IF) {
146                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_HIGH], skb);
147         } else if (payload_header->if_type == ESP_HCI_IF) {
148                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_MID], skb);
149         } else {
150                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_LOW], skb);
151                 atomic_inc(&tx_pending);
152         }
153
154         if (spi_context.spi_workqueue)
155                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
156
157         return 0;
158 }
159
160 int esp_validate_chipset(struct esp_adapter *adapter, u8 chipset)
161 {
162         int ret = 0;
163
164         switch(chipset) {
165         case ESP_FIRMWARE_CHIP_ESP32:
166         case ESP_FIRMWARE_CHIP_ESP32S2:
167         case ESP_FIRMWARE_CHIP_ESP32S3:
168         case ESP_FIRMWARE_CHIP_ESP32C2:
169         case ESP_FIRMWARE_CHIP_ESP32C3:
170         case ESP_FIRMWARE_CHIP_ESP32C6:
171                 adapter->chipset = chipset;
172                 esp_info("Chipset=%s ID=%02x detected over SPI\n", esp_chipname_from_id(chipset), chipset);
173                 break;
174         default:
175                 esp_err("Unrecognized chipset ID=%02x\n", chipset);
176                 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
177                 break;
178         }
179
180         return ret;
181 }
182
183 int esp_deinit_module(struct esp_adapter *adapter)
184 {
185         /* Second & onward bootup cleanup:
186          *
187          * SPI is software and not a hardware based module.
188          * When bootup event is received, we should discard all prior commands,
189          * old messages pending at network and re-initialize everything.
190          */
191         uint8_t prio_q_idx, iface_idx;
192
193         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
194                 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
195         }
196
197         for (iface_idx = 0; iface_idx < ESP_MAX_INTERFACE; iface_idx++) {
198                 struct esp_wifi_device *priv = adapter->priv[iface_idx];
199                 esp_mark_scan_done_and_disconnect(priv, true);
200         }
201
202         esp_remove_card(adapter);
203
204         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
205                 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
206         }
207
208         return 0;
209 }
210
211 static int process_rx_buf(struct sk_buff *skb)
212 {
213         struct esp_payload_header *header;
214         u16 len = 0;
215         u16 offset = 0;
216
217         if (!skb)
218                 return -EINVAL;
219
220         header = (struct esp_payload_header *) skb->data;
221
222         if (header->if_type >= ESP_MAX_IF) {
223                 return -EINVAL;
224         }
225
226         offset = le16_to_cpu(header->offset);
227
228         /* Validate received SKB. Check len and offset fields */
229         if (offset != sizeof(struct esp_payload_header)) {
230                 return -EINVAL;
231         }
232
233         len = le16_to_cpu(header->len);
234         if (!len) {
235                 return -EINVAL;
236         }
237
238         len += sizeof(struct esp_payload_header);
239
240         if (len > SPI_BUF_SIZE) {
241                 return -EINVAL;
242         }
243
244         /* Trim SKB to actual size */
245         skb_trim(skb, len);
246
247
248         if (!data_path) {
249                 /*esp_info("%u datapath closed\n", __LINE__);*/
250                 return -EPERM;
251         }
252
253         /* enqueue skb for read_packet to pick it */
254         if (header->if_type == ESP_INTERNAL_IF)
255                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_HIGH], skb);
256         else if (header->if_type == ESP_HCI_IF)
257                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_MID], skb);
258         else
259                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_LOW], skb);
260
261         /* indicate reception of new packet */
262         esp_process_new_packet_intr(spi_context.adapter);
263
264         return 0;
265 }
266
267 static void esp_spi_work(struct work_struct *work)
268 {
269         struct spi_transfer trans;
270         struct sk_buff *tx_skb = NULL, *rx_skb = NULL;
271         struct esp_skb_cb *cb = NULL;
272         u8 *rx_buf = NULL;
273         int ret = 0;
274         volatile int trans_ready, rx_pending;
275
276         mutex_lock(&spi_lock);
277
278         trans_ready = gpio_get_value(HANDSHAKE_PIN);
279         rx_pending = gpio_get_value(SPI_DATA_READY_PIN);
280
281         if (trans_ready) {
282                 if (data_path) {
283                         tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
284                         if (!tx_skb)
285                                 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
286                         if (!tx_skb)
287                                 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
288                         if (tx_skb) {
289                                 if (atomic_read(&tx_pending))
290                                         atomic_dec(&tx_pending);
291
292                                 /* resume network tx queue if bearable load */
293                                 cb = (struct esp_skb_cb *)tx_skb->cb;
294                                 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
295                                         esp_tx_resume(cb->priv);
296 #if TEST_RAW_TP
297                                         if (raw_tp_mode != 0) {
298                                                 esp_raw_tp_queue_resume();
299                                         }
300 #endif
301                                 }
302                         }
303                 }
304
305                 if (rx_pending || tx_skb) {
306                         memset(&trans, 0, sizeof(trans));
307                         trans.speed_hz = spi_context.spi_clk_mhz * NUMBER_1M;
308
309                         /* Setup and execute SPI transaction
310                          *      Tx_buf: Check if tx_q has valid buffer for transmission,
311                          *              else keep it blank
312                          *
313                          *      Rx_buf: Allocate memory for incoming data. This will be freed
314                          *              immediately if received buffer is invalid.
315                          *              If it is a valid buffer, upper layer will free it.
316                          * */
317
318                         /* Configure TX buffer if available */
319
320                         if (tx_skb) {
321                                 trans.tx_buf = tx_skb->data;
322                                 /*print_hex_dump(KERN_ERR, "tx: ", DUMP_PREFIX_ADDRESS, 16, 1, trans.tx_buf, 32, 1);*/
323                         } else {
324                                 tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
325                                 trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
326                                 memset((void *)trans.tx_buf, 0, SPI_BUF_SIZE);
327                         }
328
329                         /* Configure RX buffer */
330                         rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
331                         rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);
332
333                         memset(rx_buf, 0, SPI_BUF_SIZE);
334
335                         trans.rx_buf = rx_buf;
336                         trans.len = SPI_BUF_SIZE;
337
338 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
339                         if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
340                                 trans.cs_change = 1;
341                         }
342 #endif
343
344                         ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
345                         if (ret) {
346                                 esp_err("SPI Transaction failed: %d", ret);
347                                 dev_kfree_skb(rx_skb);
348                                 dev_kfree_skb(tx_skb);
349                         } else {
350
351                                 /* Free rx_skb if received data is not valid */
352                                 if (process_rx_buf(rx_skb)) {
353                                         dev_kfree_skb(rx_skb);
354                                 }
355
356                                 if (tx_skb)
357                                         dev_kfree_skb(tx_skb);
358                         }
359                 }
360         }
361
362         mutex_unlock(&spi_lock);
363 }
364
365 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
366 #include <linux/platform_device.h>
367 static int __spi_controller_match(struct device *dev, const void *data)
368 {
369         struct spi_controller *ctlr;
370         const u16 *bus_num = data;
371
372         ctlr = container_of(dev, struct spi_controller, dev);
373
374         if (!ctlr) {
375                 return 0;
376         }
377
378         return ctlr->bus_num == *bus_num;
379 }
380
381 static struct spi_controller *spi_busnum_to_master(u16 bus_num)
382 {
383         struct platform_device *pdev = NULL;
384         struct spi_master *master = NULL;
385         struct spi_controller *ctlr = NULL;
386         struct device *dev = NULL;
387
388         pdev = platform_device_alloc("pdev", PLATFORM_DEVID_NONE);
389         pdev->num_resources = 0;
390         platform_device_add(pdev);
391
392         master = spi_alloc_master(&pdev->dev, sizeof(void *));
393         if (!master) {
394                 pr_err("Error: failed to allocate SPI master device\n");
395                 platform_device_del(pdev);
396                 platform_device_put(pdev);
397                 return NULL;
398         }
399
400         dev = class_find_device(master->dev.class, NULL, &bus_num, __spi_controller_match);
401         if (dev) {
402                 ctlr = container_of(dev, struct spi_controller, dev);
403         }
404
405         spi_master_put(master);
406         platform_device_del(pdev);
407         platform_device_put(pdev);
408
409         return ctlr;
410 }
411 #endif
412
413 static int spi_dev_init(int spi_clk_mhz)
414 {
415         int status = 0;
416         struct spi_board_info esp_board = {{0}};
417         struct spi_master *master = NULL;
418
419         strscpy(esp_board.modalias, "esp_spi", sizeof(esp_board.modalias));
420         esp_board.mode = SPI_MODE_2;
421         esp_board.max_speed_hz = spi_clk_mhz * NUMBER_1M;
422         esp_board.bus_num = 0;
423         esp_board.chip_select = 0;
424
425         master = spi_busnum_to_master(esp_board.bus_num);
426         if (!master) {
427                 esp_err("Failed to obtain SPI master handle\n");
428                 return -ENODEV;
429         }
430
431         spi_context.esp_spi_dev = spi_new_device(master, &esp_board);
432
433         if (!spi_context.esp_spi_dev) {
434                 esp_err("Failed to add new SPI device\n");
435                 return -ENODEV;
436         }
437
438         status = spi_setup(spi_context.esp_spi_dev);
439
440         if (status) {
441                 esp_err("Failed to setup new SPI device");
442                 return status;
443         }
444
445         esp_info("ESP32 peripheral is registered to SPI bus [%d],chip select [%d], SPI Clock [%d]\n",
446                         esp_board.bus_num,
447                         esp_board.chip_select, spi_clk_mhz);
448
449         status = gpio_request(HANDSHAKE_PIN, "SPI_HANDSHAKE_PIN");
450
451         if (status) {
452                 esp_err("Failed to obtain GPIO for Handshake pin, err:%d\n", status);
453                 return status;
454         }
455
456         status = gpio_direction_input(HANDSHAKE_PIN);
457
458         if (status) {
459                 gpio_free(HANDSHAKE_PIN);
460                 esp_err("Failed to set GPIO direction of Handshake pin, err: %d\n", status);
461                 return status;
462         }
463
464         status = request_irq(SPI_IRQ, spi_interrupt_handler,
465                         IRQF_SHARED | IRQF_TRIGGER_RISING,
466                         "ESP_SPI", spi_context.esp_spi_dev);
467         if (status) {
468                 gpio_free(HANDSHAKE_PIN);
469                 esp_err("Failed to request IRQ for Handshake pin, err:%d\n", status);
470                 return status;
471         }
472
473         status = gpio_request(SPI_DATA_READY_PIN, "SPI_DATA_READY_PIN");
474         if (status) {
475                 gpio_free(HANDSHAKE_PIN);
476                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
477                 esp_err("Failed to obtain GPIO for Data ready pin, err:%d\n", status);
478                 return status;
479         }
480
481         status = gpio_direction_input(SPI_DATA_READY_PIN);
482         if (status) {
483                 gpio_free(HANDSHAKE_PIN);
484                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
485                 gpio_free(SPI_DATA_READY_PIN);
486                 esp_err("Failed to set GPIO direction of Data ready pin\n");
487                 return status;
488         }
489
490         status = request_irq(SPI_DATA_READY_IRQ, spi_data_ready_interrupt_handler,
491                         IRQF_SHARED | IRQF_TRIGGER_RISING,
492                         "ESP_SPI_DATA_READY", spi_context.esp_spi_dev);
493         if (status) {
494                 gpio_free(HANDSHAKE_PIN);
495                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
496                 gpio_free(SPI_DATA_READY_PIN);
497                 esp_err("Failed to request IRQ for Data ready pin, err:%d\n", status);
498                 return status;
499         }
500         spi_context.spi_gpio_enabled = 1;
501
502         open_data_path();
503
504         return 0;
505 }
506
507 static int spi_init(void)
508 {
509         int status = 0;
510         uint8_t prio_q_idx = 0;
511         struct esp_adapter *adapter;
512
513         spi_context.spi_workqueue = create_workqueue("ESP_SPI_WORK_QUEUE");
514
515         if (!spi_context.spi_workqueue) {
516                 esp_err("spi workqueue failed to create\n");
517                 spi_exit();
518                 return -EFAULT;
519         }
520
521         INIT_WORK(&spi_context.spi_work, esp_spi_work);
522
523         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
524                 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
525                 skb_queue_head_init(&spi_context.rx_q[prio_q_idx]);
526         }
527
528         status = spi_dev_init(spi_context.spi_clk_mhz);
529         if (status) {
530                 spi_exit();
531                 esp_err("Failed Init SPI device\n");
532                 return status;
533         }
534
535         adapter = spi_context.adapter;
536
537         if (!adapter) {
538                 spi_exit();
539                 return -EFAULT;
540         }
541
542         adapter->dev = &spi_context.esp_spi_dev->dev;
543
544         return status;
545 }
546
547 static void spi_exit(void)
548 {
549         uint8_t prio_q_idx = 0;
550
551         disable_irq(SPI_IRQ);
552         disable_irq(SPI_DATA_READY_IRQ);
553         close_data_path();
554         msleep(200);
555
556         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
557                 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
558                 skb_queue_purge(&spi_context.rx_q[prio_q_idx]);
559         }
560
561         if (spi_context.spi_workqueue) {
562                 flush_scheduled_work();
563                 destroy_workqueue(spi_context.spi_workqueue);
564                 spi_context.spi_workqueue = NULL;
565         }
566
567         esp_remove_card(spi_context.adapter);
568
569         if (spi_context.adapter->hcidev)
570                 esp_deinit_bt(spi_context.adapter);
571
572         if (spi_context.spi_gpio_enabled) {
573                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
574                 free_irq(SPI_DATA_READY_IRQ, spi_context.esp_spi_dev);
575
576                 gpio_free(HANDSHAKE_PIN);
577                 gpio_free(SPI_DATA_READY_PIN);
578         }
579
580         if (spi_context.esp_spi_dev)
581                 spi_unregister_device(spi_context.esp_spi_dev);
582
583         memset(&spi_context, 0, sizeof(spi_context));
584 }
585
586 static void adjust_spi_clock(u8 spi_clk_mhz)
587 {
588         if ((spi_clk_mhz) && (spi_clk_mhz != spi_context.spi_clk_mhz)) {
589                 esp_info("ESP Reconfigure SPI CLK to %u MHz\n", spi_clk_mhz);
590                 spi_context.spi_clk_mhz = spi_clk_mhz;
591                 spi_context.esp_spi_dev->max_speed_hz = spi_clk_mhz * NUMBER_1M;
592         }
593 }
594
595 int esp_adjust_spi_clock(struct esp_adapter *adapter, u8 spi_clk_mhz)
596 {
597         adjust_spi_clock(spi_clk_mhz);
598
599         return 0;
600 }
601
602 int esp_init_interface_layer(struct esp_adapter *adapter, u32 speed)
603 {
604         if (!adapter)
605                 return -EINVAL;
606
607         memset(&spi_context, 0, sizeof(spi_context));
608
609         adapter->if_context = &spi_context;
610         adapter->if_ops = &if_ops;
611         adapter->if_type = ESP_IF_TYPE_SPI;
612         spi_context.adapter = adapter;
613         if (speed)
614                 spi_context.spi_clk_mhz = speed;
615         else
616                 spi_context.spi_clk_mhz = SPI_INITIAL_CLK_MHZ;
617
618         return spi_init();
619 }
620
621 void esp_deinit_interface_layer(void)
622 {
623         spi_exit();
624 }
This page took 0.058757 seconds and 4 git commands to generate.