]> Git Repo - esp-hosted.git/blob - esp_hosted_ng/host/spi/esp_spi.c
Merge branch 'bugfix/hosted_ng_cleanup' into 'master'
[esp-hosted.git] / esp_hosted_ng / host / spi / esp_spi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
4  *
5  */
6 #include "utils.h"
7 #include <linux/spi/spi.h>
8 #include <linux/gpio.h>
9 #include <linux/mutex.h>
10 #include <linux/delay.h>
11 #include "esp_spi.h"
12 #include "esp_if.h"
13 #include "esp_api.h"
14 #include "esp_bt_api.h"
15 #include "esp_kernel_port.h"
16 #include "esp_stats.h"
17 #include "esp_utils.h"
18 #include "esp_cfg80211.h"
19
20 #define SPI_INITIAL_CLK_MHZ     10
21 #define TX_MAX_PENDING_COUNT    100
22 #define TX_RESUME_THRESHOLD     (TX_MAX_PENDING_COUNT/5)
23
24 static struct sk_buff *read_packet(struct esp_adapter *adapter);
25 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
26 static void spi_exit(void);
27 static int spi_init(void);
28 static void adjust_spi_clock(u8 spi_clk_mhz);
29
30 volatile u8 data_path;
31 volatile u8 host_sleep;
32 static struct esp_spi_context spi_context;
33 static char hardware_type = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
34 static atomic_t tx_pending;
35
36 static struct esp_if_ops if_ops = {
37         .read           = read_packet,
38         .write          = write_packet,
39 };
40
41 static DEFINE_MUTEX(spi_lock);
42
43 static void open_data_path(void)
44 {
45         atomic_set(&tx_pending, 0);
46         msleep(200);
47         data_path = OPEN_DATAPATH;
48 }
49
50 static void close_data_path(void)
51 {
52         data_path = CLOSE_DATAPATH;
53         msleep(200);
54 }
55
56 static irqreturn_t spi_data_ready_interrupt_handler(int irq, void *dev)
57 {
58         /* ESP peripheral has queued buffer for transmission */
59         if (spi_context.spi_workqueue)
60                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
61
62         return IRQ_HANDLED;
63  }
64
65 static irqreturn_t spi_interrupt_handler(int irq, void *dev)
66 {
67         /* ESP peripheral is ready for next SPI transaction */
68         if (spi_context.spi_workqueue)
69                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
70
71         return IRQ_HANDLED;
72 }
73
74 static struct sk_buff *read_packet(struct esp_adapter *adapter)
75 {
76         struct esp_spi_context *context;
77         struct sk_buff *skb = NULL;
78
79         if (!data_path) {
80                 return NULL;
81         }
82
83         if (!adapter || !adapter->if_context) {
84                 esp_err("Invalid args\n");
85                 return NULL;
86         }
87
88         context = adapter->if_context;
89
90         if (context->esp_spi_dev) {
91                 skb = skb_dequeue(&(context->rx_q[PRIO_Q_HIGH]));
92                 if (!skb)
93                         skb = skb_dequeue(&(context->rx_q[PRIO_Q_MID]));
94                 if (!skb)
95                         skb = skb_dequeue(&(context->rx_q[PRIO_Q_LOW]));
96         } else {
97                 esp_err("Invalid args\n");
98                 return NULL;
99         }
100
101         return skb;
102 }
103
104 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
105 {
106         u32 max_pkt_size = SPI_BUF_SIZE - sizeof(struct esp_payload_header);
107         struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
108         struct esp_skb_cb *cb = NULL;
109
110         if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
111                 esp_err("Invalid args\n");
112                 if (skb) {
113                         dev_kfree_skb(skb);
114                         skb = NULL;
115                 }
116                 return -EINVAL;
117         }
118
119         if (skb->len > max_pkt_size) {
120                 esp_err("Drop pkt of len[%u] > max spi transport len[%u]\n",
121                                 skb->len, max_pkt_size);
122                 dev_kfree_skb(skb);
123                 return -EPERM;
124         }
125
126         if (!data_path) {
127                 esp_info("%u datapath closed\n", __LINE__);
128                 dev_kfree_skb(skb);
129                 return -EPERM;
130         }
131
132         cb = (struct esp_skb_cb *)skb->cb;
133         if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
134                 esp_tx_pause(cb->priv);
135                 dev_kfree_skb(skb);
136                 skb = NULL;
137                 /*esp_err("TX Pause busy");*/
138                 if (spi_context.spi_workqueue)
139                         queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
140                 return -EBUSY;
141         }
142
143         /* Enqueue SKB in tx_q */
144         if (payload_header->if_type == ESP_INTERNAL_IF) {
145                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_HIGH], skb);
146         } else if (payload_header->if_type == ESP_HCI_IF) {
147                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_MID], skb);
148         } else {
149                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_LOW], skb);
150                 atomic_inc(&tx_pending);
151         }
152
153         if (spi_context.spi_workqueue)
154                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
155
156         return 0;
157 }
158
159 int esp_validate_chipset(struct esp_adapter *adapter, u8 chipset)
160 {
161         int ret = 0;
162
163         switch(chipset) {
164         case ESP_FIRMWARE_CHIP_ESP32:
165         case ESP_FIRMWARE_CHIP_ESP32S2:
166         case ESP_FIRMWARE_CHIP_ESP32S3:
167         case ESP_FIRMWARE_CHIP_ESP32C2:
168         case ESP_FIRMWARE_CHIP_ESP32C3:
169         case ESP_FIRMWARE_CHIP_ESP32C6:
170                 adapter->chipset = chipset;
171                 esp_info("Chipset=%s ID=%02x detected over SPI\n", esp_chipname_from_id(chipset), chipset);
172                 break;
173         default:
174                 esp_err("Unrecognized chipset ID=%02x\n", chipset);
175                 adapter->chipset = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
176                 break;
177         }
178
179         return ret;
180 }
181
182 int esp_deinit_module(struct esp_adapter *adapter)
183 {
184         /* Second & onward bootup cleanup:
185          *
186          * SPI is software and not a hardware based module.
187          * When bootup event is received, we should discard all prior commands,
188          * old messages pending at network and re-initialize everything.
189          */
190         uint8_t prio_q_idx, iface_idx;
191
192         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
193                 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
194         }
195
196         for (iface_idx = 0; iface_idx < ESP_MAX_INTERFACE; iface_idx++) {
197                 struct esp_wifi_device *priv = adapter->priv[iface_idx];
198                 esp_mark_scan_done_and_disconnect(priv, true);
199         }
200
201         esp_remove_card(adapter);
202
203         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
204                 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
205         }
206
207         return 0;
208 }
209
210 static int process_rx_buf(struct sk_buff *skb)
211 {
212         struct esp_payload_header *header;
213         u16 len = 0;
214         u16 offset = 0;
215
216         if (!skb)
217                 return -EINVAL;
218
219         header = (struct esp_payload_header *) skb->data;
220
221         if (header->if_type >= ESP_MAX_IF) {
222                 return -EINVAL;
223         }
224
225         offset = le16_to_cpu(header->offset);
226
227         /* Validate received SKB. Check len and offset fields */
228         if (offset != sizeof(struct esp_payload_header)) {
229                 return -EINVAL;
230         }
231
232         len = le16_to_cpu(header->len);
233         if (!len) {
234                 return -EINVAL;
235         }
236
237         len += sizeof(struct esp_payload_header);
238
239         if (len > SPI_BUF_SIZE) {
240                 return -EINVAL;
241         }
242
243         /* Trim SKB to actual size */
244         skb_trim(skb, len);
245
246
247         if (!data_path) {
248                 /*esp_info("%u datapath closed\n", __LINE__);*/
249                 return -EPERM;
250         }
251
252         /* enqueue skb for read_packet to pick it */
253         if (header->if_type == ESP_INTERNAL_IF)
254                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_HIGH], skb);
255         else if (header->if_type == ESP_HCI_IF)
256                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_MID], skb);
257         else
258                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_LOW], skb);
259
260         /* indicate reception of new packet */
261         esp_process_new_packet_intr(spi_context.adapter);
262
263         return 0;
264 }
265
266 static void esp_spi_work(struct work_struct *work)
267 {
268         struct spi_transfer trans;
269         struct sk_buff *tx_skb = NULL, *rx_skb = NULL;
270         struct esp_skb_cb *cb = NULL;
271         u8 *rx_buf = NULL;
272         int ret = 0;
273         volatile int trans_ready, rx_pending;
274
275         mutex_lock(&spi_lock);
276
277         trans_ready = gpio_get_value(HANDSHAKE_PIN);
278         rx_pending = gpio_get_value(SPI_DATA_READY_PIN);
279
280         if (trans_ready) {
281                 if (data_path) {
282                         tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
283                         if (!tx_skb)
284                                 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
285                         if (!tx_skb)
286                                 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
287                         if (tx_skb) {
288                                 if (atomic_read(&tx_pending))
289                                         atomic_dec(&tx_pending);
290
291                                 /* resume network tx queue if bearable load */
292                                 cb = (struct esp_skb_cb *)tx_skb->cb;
293                                 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
294                                         esp_tx_resume(cb->priv);
295                                         #if TEST_RAW_TP
296                                                 esp_raw_tp_queue_resume();
297                                         #endif
298                                 }
299                         }
300                 }
301
302                 if (rx_pending || tx_skb) {
303                         memset(&trans, 0, sizeof(trans));
304                         trans.speed_hz = spi_context.spi_clk_mhz * NUMBER_1M;
305
306                         /* Setup and execute SPI transaction
307                          *      Tx_buf: Check if tx_q has valid buffer for transmission,
308                          *              else keep it blank
309                          *
310                          *      Rx_buf: Allocate memory for incoming data. This will be freed
311                          *              immediately if received buffer is invalid.
312                          *              If it is a valid buffer, upper layer will free it.
313                          * */
314
315                         /* Configure TX buffer if available */
316
317                         if (tx_skb) {
318                                 trans.tx_buf = tx_skb->data;
319                                 /*print_hex_dump(KERN_ERR, "tx: ", DUMP_PREFIX_ADDRESS, 16, 1, trans.tx_buf, 32, 1);*/
320                         } else {
321                                 tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
322                                 trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
323                                 memset((void *)trans.tx_buf, 0, SPI_BUF_SIZE);
324                         }
325
326                         /* Configure RX buffer */
327                         rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
328                         rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);
329
330                         memset(rx_buf, 0, SPI_BUF_SIZE);
331
332                         trans.rx_buf = rx_buf;
333                         trans.len = SPI_BUF_SIZE;
334
335 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
336                         if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
337                                 trans.cs_change = 1;
338                         }
339 #endif
340
341                         ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
342                         if (ret) {
343                                 esp_err("SPI Transaction failed: %d", ret);
344                                 dev_kfree_skb(rx_skb);
345                                 dev_kfree_skb(tx_skb);
346                         } else {
347
348                                 /* Free rx_skb if received data is not valid */
349                                 if (process_rx_buf(rx_skb)) {
350                                         dev_kfree_skb(rx_skb);
351                                 }
352
353                                 if (tx_skb)
354                                         dev_kfree_skb(tx_skb);
355                         }
356                 }
357         }
358
359         mutex_unlock(&spi_lock);
360 }
361
362 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
363 #include <linux/platform_device.h>
364 static int __spi_controller_match(struct device *dev, const void *data)
365 {
366         struct spi_controller *ctlr;
367         const u16 *bus_num = data;
368
369         ctlr = container_of(dev, struct spi_controller, dev);
370
371         if (!ctlr) {
372                 return 0;
373         }
374
375         return ctlr->bus_num == *bus_num;
376 }
377
378 static struct spi_controller *spi_busnum_to_master(u16 bus_num)
379 {
380         struct platform_device *pdev = NULL;
381         struct spi_master *master = NULL;
382         struct spi_controller *ctlr = NULL;
383         struct device *dev = NULL;
384
385         pdev = platform_device_alloc("pdev", PLATFORM_DEVID_NONE);
386         pdev->num_resources = 0;
387         platform_device_add(pdev);
388
389         master = spi_alloc_master(&pdev->dev, sizeof(void *));
390         if (!master) {
391                 pr_err("Error: failed to allocate SPI master device\n");
392                 platform_device_del(pdev);
393                 platform_device_put(pdev);
394                 return NULL;
395         }
396
397         dev = class_find_device(master->dev.class, NULL, &bus_num, __spi_controller_match);
398         if (dev) {
399                 ctlr = container_of(dev, struct spi_controller, dev);
400         }
401
402         spi_master_put(master);
403         platform_device_del(pdev);
404         platform_device_put(pdev);
405
406         return ctlr;
407 }
408 #endif
409
410 static int spi_dev_init(int spi_clk_mhz)
411 {
412         int status = 0;
413         struct spi_board_info esp_board = {{0}};
414         struct spi_master *master = NULL;
415
416         strlcpy(esp_board.modalias, "esp_spi", sizeof(esp_board.modalias));
417         esp_board.mode = SPI_MODE_2;
418         esp_board.max_speed_hz = spi_clk_mhz * NUMBER_1M;
419         esp_board.bus_num = 0;
420         esp_board.chip_select = 0;
421
422         master = spi_busnum_to_master(esp_board.bus_num);
423         if (!master) {
424                 esp_err("Failed to obtain SPI master handle\n");
425                 return -ENODEV;
426         }
427
428         spi_context.esp_spi_dev = spi_new_device(master, &esp_board);
429
430         if (!spi_context.esp_spi_dev) {
431                 esp_err("Failed to add new SPI device\n");
432                 return -ENODEV;
433         }
434
435         status = spi_setup(spi_context.esp_spi_dev);
436
437         if (status) {
438                 esp_err("Failed to setup new SPI device");
439                 return status;
440         }
441
442         esp_info("ESP32 peripheral is registered to SPI bus [%d],chip select [%d], SPI Clock [%d]\n",
443                         esp_board.bus_num,
444                         esp_board.chip_select, spi_clk_mhz);
445
446         status = gpio_request(HANDSHAKE_PIN, "SPI_HANDSHAKE_PIN");
447
448         if (status) {
449                 esp_err("Failed to obtain GPIO for Handshake pin, err:%d\n", status);
450                 return status;
451         }
452
453         status = gpio_direction_input(HANDSHAKE_PIN);
454
455         if (status) {
456                 gpio_free(HANDSHAKE_PIN);
457                 esp_err("Failed to set GPIO direction of Handshake pin, err: %d\n", status);
458                 return status;
459         }
460
461         status = request_irq(SPI_IRQ, spi_interrupt_handler,
462                         IRQF_SHARED | IRQF_TRIGGER_RISING,
463                         "ESP_SPI", spi_context.esp_spi_dev);
464         if (status) {
465                 gpio_free(HANDSHAKE_PIN);
466                 esp_err("Failed to request IRQ for Handshake pin, err:%d\n", status);
467                 return status;
468         }
469
470         status = gpio_request(SPI_DATA_READY_PIN, "SPI_DATA_READY_PIN");
471         if (status) {
472                 gpio_free(HANDSHAKE_PIN);
473                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
474                 esp_err("Failed to obtain GPIO for Data ready pin, err:%d\n", status);
475                 return status;
476         }
477
478         status = gpio_direction_input(SPI_DATA_READY_PIN);
479         if (status) {
480                 gpio_free(HANDSHAKE_PIN);
481                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
482                 gpio_free(SPI_DATA_READY_PIN);
483                 esp_err("Failed to set GPIO direction of Data ready pin\n");
484                 return status;
485         }
486
487         status = request_irq(SPI_DATA_READY_IRQ, spi_data_ready_interrupt_handler,
488                         IRQF_SHARED | IRQF_TRIGGER_RISING,
489                         "ESP_SPI_DATA_READY", spi_context.esp_spi_dev);
490         if (status) {
491                 gpio_free(HANDSHAKE_PIN);
492                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
493                 gpio_free(SPI_DATA_READY_PIN);
494                 esp_err("Failed to request IRQ for Data ready pin, err:%d\n", status);
495                 return status;
496         }
497         spi_context.spi_gpio_enabled = 1;
498
499         open_data_path();
500
501         return 0;
502 }
503
504 static int spi_init(void)
505 {
506         int status = 0;
507         uint8_t prio_q_idx = 0;
508         struct esp_adapter *adapter;
509
510         spi_context.spi_workqueue = create_workqueue("ESP_SPI_WORK_QUEUE");
511
512         if (!spi_context.spi_workqueue) {
513                 esp_err("spi workqueue failed to create\n");
514                 spi_exit();
515                 return -EFAULT;
516         }
517
518         INIT_WORK(&spi_context.spi_work, esp_spi_work);
519
520         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
521                 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
522                 skb_queue_head_init(&spi_context.rx_q[prio_q_idx]);
523         }
524
525         status = spi_dev_init(spi_context.spi_clk_mhz);
526         if (status) {
527                 spi_exit();
528                 esp_err("Failed Init SPI device\n");
529                 return status;
530         }
531
532         adapter = spi_context.adapter;
533
534         if (!adapter) {
535                 spi_exit();
536                 return -EFAULT;
537         }
538
539         adapter->dev = &spi_context.esp_spi_dev->dev;
540
541         return status;
542 }
543
544 static void spi_exit(void)
545 {
546         uint8_t prio_q_idx = 0;
547
548         disable_irq(SPI_IRQ);
549         disable_irq(SPI_DATA_READY_IRQ);
550         close_data_path();
551         msleep(200);
552
553         for (prio_q_idx = 0; prio_q_idx < MAX_PRIORITY_QUEUES; prio_q_idx++) {
554                 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
555                 skb_queue_purge(&spi_context.rx_q[prio_q_idx]);
556         }
557
558         if (spi_context.spi_workqueue) {
559                 flush_scheduled_work();
560                 destroy_workqueue(spi_context.spi_workqueue);
561                 spi_context.spi_workqueue = NULL;
562         }
563
564         esp_remove_card(spi_context.adapter);
565
566         if (spi_context.adapter->hcidev)
567                 esp_deinit_bt(spi_context.adapter);
568
569         if (spi_context.spi_gpio_enabled) {
570                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
571                 free_irq(SPI_DATA_READY_IRQ, spi_context.esp_spi_dev);
572
573                 gpio_free(HANDSHAKE_PIN);
574                 gpio_free(SPI_DATA_READY_PIN);
575         }
576
577         if (spi_context.esp_spi_dev)
578                 spi_unregister_device(spi_context.esp_spi_dev);
579
580         memset(&spi_context, 0, sizeof(spi_context));
581 }
582
583 static void adjust_spi_clock(u8 spi_clk_mhz)
584 {
585         if ((spi_clk_mhz) && (spi_clk_mhz != spi_context.spi_clk_mhz)) {
586                 esp_info("ESP Reconfigure SPI CLK to %u MHz\n", spi_clk_mhz);
587                 spi_context.spi_clk_mhz = spi_clk_mhz;
588                 spi_context.esp_spi_dev->max_speed_hz = spi_clk_mhz * NUMBER_1M;
589         }
590 }
591
592 int esp_adjust_spi_clock(struct esp_adapter *adapter, u8 spi_clk_mhz)
593 {
594         adjust_spi_clock(spi_clk_mhz);
595
596         return 0;
597 }
598
599 int esp_init_interface_layer(struct esp_adapter *adapter, u32 speed)
600 {
601         if (!adapter)
602                 return -EINVAL;
603
604         memset(&spi_context, 0, sizeof(spi_context));
605
606         adapter->if_context = &spi_context;
607         adapter->if_ops = &if_ops;
608         adapter->if_type = ESP_IF_TYPE_SPI;
609         spi_context.adapter = adapter;
610         if (speed)
611                 spi_context.spi_clk_mhz = speed;
612         else
613                 spi_context.spi_clk_mhz = SPI_INITIAL_CLK_MHZ;
614
615         return spi_init();
616 }
617
618 void esp_deinit_interface_layer(void)
619 {
620         spi_exit();
621 }
This page took 0.060037 seconds and 4 git commands to generate.