]> Git Repo - esp-hosted.git/blob - host/spi/esp_spi.c
ESP-Hosted cfg80211 support
[esp-hosted.git] / host / spi / esp_spi.c
1 /*
2  * Copyright (C) 2015-2021 Espressif Systems (Shanghai) PTE LTD
3  *
4  * This software file (the "File") is distributed by Espressif Systems (Shanghai)
5  * PTE LTD under the terms of the GNU General Public License Version 2, June 1991
6  * (the "License").  You may use, redistribute and/or modify this File in
7  * accordance with the terms and conditions of the License, a copy of which
8  * is available by writing to the Free Software Foundation, Inc.,
9  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
10  * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
11  *
12  * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
13  * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
14  * ARE EXPRESSLY DISCLAIMED.  The License provides additional details about
15  * this warranty disclaimer.
16  */
17 #include <linux/device.h>
18 #include <linux/spi/spi.h>
19 #include <linux/gpio.h>
20 #include <linux/mutex.h>
21 #include <linux/delay.h>
22 #include "esp_spi.h"
23 #include "esp_if.h"
24 #include "esp_api.h"
25 #include "esp_bt_api.h"
26
27 #define SPI_INITIAL_CLK_MHZ     10
28 #define NUMBER_1M               1000000
29 #define TX_MAX_PENDING_COUNT    100
30 #define TX_RESUME_THRESHOLD     (TX_MAX_PENDING_COUNT/5)
31
32 static struct sk_buff * read_packet(struct esp_adapter *adapter);
33 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
34 static void spi_exit(void);
35 static int spi_init(void);
36 static void adjust_spi_clock(u8 spi_clk_mhz);
37
38 volatile u8 data_path = 0;
39 static struct esp_spi_context spi_context;
40 static char hardware_type = 0;
41 static atomic_t tx_pending;
42 static uint8_t esp_reset_after_module_load;
43
44 static struct esp_if_ops if_ops = {
45         .read           = read_packet,
46         .write          = write_packet,
47 };
48
49 static DEFINE_MUTEX(spi_lock);
50
51 static void open_data_path(void)
52 {
53         atomic_set(&tx_pending, 0);
54         msleep(200);
55         data_path = OPEN_DATAPATH;
56 }
57
58 static void close_data_path(void)
59 {
60         data_path = CLOSE_DATAPATH;
61         msleep(200);
62 }
63
64 static irqreturn_t spi_data_ready_interrupt_handler(int irq, void * dev)
65 {
66         /* ESP peripheral has queued buffer for transmission */
67         if (spi_context.spi_workqueue)
68                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
69
70         return IRQ_HANDLED;
71  }
72
73 static irqreturn_t spi_interrupt_handler(int irq, void * dev)
74 {
75         /* ESP peripheral is ready for next SPI transaction */
76         if (spi_context.spi_workqueue)
77                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
78
79         return IRQ_HANDLED;
80 }
81
82 static struct sk_buff * read_packet(struct esp_adapter *adapter)
83 {
84         struct esp_spi_context *context;
85         struct sk_buff *skb = NULL;
86
87         if (!data_path) {
88                 return NULL;
89         }
90
91         if (!adapter || !adapter->if_context) {
92                 printk (KERN_ERR "%s: Invalid args\n", __func__);
93                 return NULL;
94         }
95
96         context = adapter->if_context;
97
98         if (context->esp_spi_dev) {
99                 skb = skb_dequeue(&(context->rx_q[PRIO_Q_HIGH]));
100                 if (!skb)
101                         skb = skb_dequeue(&(context->rx_q[PRIO_Q_MID]));
102                 if (!skb)
103                         skb = skb_dequeue(&(context->rx_q[PRIO_Q_LOW]));
104         } else {
105                 printk (KERN_ERR "%s: Invalid args\n", __func__);
106                 return NULL;
107         }
108
109         return skb;
110 }
111
112 static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
113 {
114         u32 max_pkt_size = SPI_BUF_SIZE - sizeof(struct esp_payload_header);
115         struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
116         struct esp_skb_cb * cb = NULL;
117
118         if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
119                 printk (KERN_ERR "%s: Invalid args\n", __func__);
120                 if(skb) {
121                         dev_kfree_skb(skb);
122                         skb = NULL;
123                 }
124                 return -EINVAL;
125         }
126
127         if (skb->len > max_pkt_size) {
128                 printk (KERN_ERR "%s: Drop pkt of len[%u] > max spi transport len[%u]\n",
129                                 __func__, skb->len, max_pkt_size);
130                 dev_kfree_skb(skb);
131                 return -EPERM;
132         }
133
134         if (!data_path) {
135                 /*printk(KERN_INFO "%s:%u datapath closed\n",__func__,__LINE__);*/
136                 dev_kfree_skb(skb);
137                 return -EPERM;
138         }
139
140         cb = (struct esp_skb_cb *)skb->cb;
141         if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
142                 esp_tx_pause(cb->priv);
143                 dev_kfree_skb(skb);
144                 skb = NULL;
145                 /* printk(KERN_ERR "%s: TX Pause busy", __func__);*/
146                 if (spi_context.spi_workqueue)
147                         queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
148                 return -EBUSY;
149         }
150
151         /* Enqueue SKB in tx_q */
152         if (payload_header->if_type == ESP_INTERNAL_IF) {
153                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_HIGH], skb);
154         } else if (payload_header->if_type == ESP_HCI_IF) {
155                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_MID], skb);
156         } else {
157                 skb_queue_tail(&spi_context.tx_q[PRIO_Q_LOW], skb);
158                 atomic_inc(&tx_pending);
159         }
160
161         if (spi_context.spi_workqueue)
162                 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
163
164         return 0;
165 }
166
167 static void network_cmd_reinit(struct work_struct *work)
168 {
169         struct esp_adapter * adapter = esp_get_adapter();
170
171         if (!adapter) {
172                 printk(KERN_INFO "adapter not yet init\n");
173                 return;
174         }
175
176         esp_remove_card(adapter);
177         esp_add_card(adapter);
178 }
179
180 void process_event_esp_bootup(struct esp_adapter *adapter, u8 *evt_buf, u8 len)
181 {
182         /* Bootup event will be received whenever ESP is booted.
183          * It is termed 'First bootup' when this event is received
184          * the first time module loaded. It is termed 'Second & onward bootup' when 
185          * there is ESP reset (reason may be manual reset of ESP or any crash at ESP)
186          */
187         u8 len_left = len, tag_len;
188         u8 *pos;
189         uint8_t iface_idx = 0;
190
191         if (!adapter)
192                 return;
193
194         if (!evt_buf)
195                 return;
196
197         /* Second & onward bootup, cleanup and re-init the driver */
198         if (esp_reset_after_module_load)
199                 set_bit(ESP_CLEANUP_IN_PROGRESS, &adapter->state_flags);
200
201         pos = evt_buf;
202
203         while (len_left) {
204
205                 tag_len = *(pos + 1);
206
207                 printk(KERN_INFO "EVENT: %d\n", *pos);
208
209                 if (*pos == ESP_BOOTUP_CAPABILITY) {
210
211                         adapter->capabilities = *(pos + 2);
212
213                 } else if (*pos == ESP_BOOTUP_FW_DATA) {
214
215                         if (tag_len != sizeof(struct fw_data))
216                                 printk(KERN_INFO "Length not matching to firmware data size\n");
217                         else
218                                 if (process_fw_data((struct fw_data*)(pos + 2))) {
219                                         esp_remove_card(spi_context.adapter);
220                                         return;
221                                 }
222
223                 } else if (*pos == ESP_BOOTUP_SPI_CLK_MHZ){
224
225                         adjust_spi_clock(*(pos + 2));
226
227                 } else if (*pos == ESP_BOOTUP_FIRMWARE_CHIP_ID){
228
229                         hardware_type = *(pos+2);
230
231                 } else {
232                         printk (KERN_WARNING "Unsupported tag in event");
233                 }
234
235                 pos += (tag_len+2);
236                 len_left -= (tag_len+2);
237         }
238
239         if ((hardware_type != ESP_FIRMWARE_CHIP_ESP32) &&
240             (hardware_type != ESP_FIRMWARE_CHIP_ESP32S2) &&
241             (hardware_type != ESP_FIRMWARE_CHIP_ESP32C3) &&
242             (hardware_type != ESP_FIRMWARE_CHIP_ESP32S3)) {
243                 printk(KERN_INFO "ESP chipset not recognized, ignoring [%d]\n", hardware_type);
244                 hardware_type = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
245         } else {
246                 printk(KERN_INFO "ESP chipset detected [%s]\n", 
247                                 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32 ? "esp32":
248                                 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32S2 ? "esp32-s2" :
249                                 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32C3 ? "esp32-c3" :
250                                 *(pos+2)==ESP_FIRMWARE_CHIP_ESP32S3 ? "esp32-s3" :
251                                 "unknown");
252         }
253
254         if (esp_reset_after_module_load) {
255
256                 /* Second & onward bootup:
257                  *
258                  * SPI is software and not a hardware based module.
259                  * When bootup event is received, we should discard all prior commands, 
260                  * old messages pending at network and re-initialize everything.
261                  *
262                  * Such handling is not required
263                  * 1. for SDIO
264                  *   as Removal of SDIO triggers complete Deinit and on SDIO insertion/
265                  *   detection, i.e., after probing, initialization is triggered
266                  * 
267                  * 2. On first bootup (if counterpart of this else)
268                  *   First bootup event is received immediately after module insertion.
269                  *   As all network or cmds are init and clean for the first time,
270                  *   there is no need to re-init them
271                  */
272
273
274                 for (iface_idx=0; iface_idx < ESP_MAX_INTERFACE; iface_idx++) {
275
276                         struct esp_wifi_device *priv = adapter->priv[iface_idx];
277
278                         if (!priv)
279                                 continue;
280
281                         if (priv->scan_in_progress) {
282                                 if (priv->request) {
283                                         struct cfg80211_scan_info info = {
284                                                 .aborted = false,
285                                         };
286                                         /* scan completion */
287                                         cfg80211_scan_done(priv->request, &info);
288                                         priv->request = NULL;
289                                 }
290                                 priv->scan_in_progress = false;
291                         }
292
293                         if (priv->ndev &&
294                             priv->wdev.current_bss) {
295
296                                 esp_port_close(priv);
297                                 cfg80211_disconnected(priv->ndev,
298                                                 0, NULL, 0, false, GFP_KERNEL);
299                         }
300                 }
301
302                 esp_remove_card(adapter);
303         }
304
305         if (esp_add_card(adapter)) {
306                 printk(KERN_ERR "network iterface init failed\n");
307         }
308
309         process_capabilities(adapter);
310         print_capabilities(adapter->capabilities);
311
312
313         esp_reset_after_module_load = 1;
314 }
315
316
317 static int process_rx_buf(struct sk_buff *skb)
318 {
319         struct esp_payload_header *header;
320         u16 len = 0;
321         u16 offset = 0;
322
323         if (!skb)
324                 return -EINVAL;
325
326         header = (struct esp_payload_header *) skb->data;
327
328         if (header->if_type >= ESP_MAX_IF) {
329                 return -EINVAL;
330         }
331
332         offset = le16_to_cpu(header->offset);
333
334         /* Validate received SKB. Check len and offset fields */
335         if (offset != sizeof(struct esp_payload_header)) {
336                 return -EINVAL;
337         }
338
339         len = le16_to_cpu(header->len);
340         if (!len) {
341                 return -EINVAL;
342         }
343
344         len += sizeof(struct esp_payload_header);
345
346         if (len > SPI_BUF_SIZE) {
347                 return -EINVAL;
348         }
349
350         /* Trim SKB to actual size */
351         skb_trim(skb, len);
352
353
354         if (!data_path) {
355                 /*printk(KERN_INFO "%s:%u datapath closed\n",__func__,__LINE__);*/
356                 return -EPERM;
357         }
358
359         /* enqueue skb for read_packet to pick it */
360         if (header->if_type == ESP_INTERNAL_IF)
361                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_HIGH], skb);
362         else if (header->if_type == ESP_HCI_IF)
363                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_MID], skb);
364         else
365                 skb_queue_tail(&spi_context.rx_q[PRIO_Q_LOW], skb);
366
367         /* indicate reception of new packet */
368         esp_process_new_packet_intr(spi_context.adapter);
369
370         return 0;
371 }
372
373 static void esp_spi_work(struct work_struct *work)
374 {
375         struct spi_transfer trans;
376         struct sk_buff *tx_skb = NULL, *rx_skb = NULL;
377         struct esp_skb_cb * cb = NULL;
378         u8 *rx_buf = NULL;
379         int ret = 0;
380         volatile int trans_ready, rx_pending;
381
382         mutex_lock(&spi_lock);
383
384         trans_ready = gpio_get_value(HANDSHAKE_PIN);
385         rx_pending = gpio_get_value(SPI_DATA_READY_PIN);
386
387         if (trans_ready) {
388                 if (data_path) {
389                         tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
390                         if (!tx_skb)
391                                 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
392                         if (!tx_skb)
393                                 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
394                         if (tx_skb) {
395                                 if (atomic_read(&tx_pending))
396                                         atomic_dec(&tx_pending);
397
398                                 /* resume network tx queue if bearable load */
399                                 cb = (struct esp_skb_cb *)tx_skb->cb;
400                                 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
401                                         esp_tx_resume(cb->priv);
402                                 }
403                         }
404                 }
405
406                 if (rx_pending || tx_skb) {
407                         memset(&trans, 0, sizeof(trans));
408
409                         /* Setup and execute SPI transaction
410                          *      Tx_buf: Check if tx_q has valid buffer for transmission,
411                          *              else keep it blank
412                          *
413                          *      Rx_buf: Allocate memory for incoming data. This will be freed
414                          *              immediately if received buffer is invalid.
415                          *              If it is a valid buffer, upper layer will free it.
416                          * */
417
418                         /* Configure TX buffer if available */
419
420                         if (tx_skb) {
421                                 trans.tx_buf = tx_skb->data;
422                                 /*print_hex_dump(KERN_ERR, "tx: ", DUMP_PREFIX_ADDRESS, 16, 1, trans.tx_buf, 32, 1);*/
423                         } else {
424                                 tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
425                                 trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
426                                 memset((void*)trans.tx_buf, 0, SPI_BUF_SIZE);
427                         }
428
429                         /* Configure RX buffer */
430                         rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
431                         rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);
432
433                         memset(rx_buf, 0, SPI_BUF_SIZE);
434
435                         trans.rx_buf = rx_buf;
436                         trans.len = SPI_BUF_SIZE;
437
438 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
439                         if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
440                                 trans.cs_change = 1;
441                         }
442 #endif
443
444                         ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
445                         if (ret) {
446                                 printk(KERN_ERR "SPI Transaction failed: %d", ret);
447                                 dev_kfree_skb(rx_skb);
448                                 dev_kfree_skb(tx_skb);
449                         } else {
450
451                                 /* Free rx_skb if received data is not valid */
452                                 if (process_rx_buf(rx_skb)) {
453                                         dev_kfree_skb(rx_skb);
454                                 }
455
456                                 if (tx_skb)
457                                         dev_kfree_skb(tx_skb);
458                         }
459                 }
460         }
461
462         mutex_unlock(&spi_lock);
463 }
464
465 static int spi_dev_init(int spi_clk_mhz)
466 {
467         int status = 0;
468         struct spi_board_info esp_board = {{0}};
469         struct spi_master *master = NULL;
470
471         strlcpy(esp_board.modalias, "esp_spi", sizeof(esp_board.modalias));
472         esp_board.mode = SPI_MODE_2;
473         esp_board.max_speed_hz = spi_clk_mhz * NUMBER_1M;
474         esp_board.bus_num = 0;
475         esp_board.chip_select = 0;
476
477         master = spi_busnum_to_master(esp_board.bus_num);
478         if (!master) {
479                 printk(KERN_ERR "Failed to obtain SPI master handle\n");
480                 return -ENODEV;
481         }
482
483         spi_context.esp_spi_dev = spi_new_device(master, &esp_board);
484
485         if (!spi_context.esp_spi_dev) {
486                 printk(KERN_ERR "Failed to add new SPI device\n");
487                 return -ENODEV;
488         }
489
490         status = spi_setup(spi_context.esp_spi_dev);
491
492         if (status) {
493                 printk (KERN_ERR "Failed to setup new SPI device");
494                 return status;
495         }
496
497         printk (KERN_INFO "ESP32 peripheral is registered to SPI bus [%d]"
498                         ",chip select [%d], SPI Clock [%d]\n", esp_board.bus_num,
499                         esp_board.chip_select, spi_clk_mhz);
500
501         status = gpio_request(HANDSHAKE_PIN, "SPI_HANDSHAKE_PIN");
502
503         if (status) {
504                 printk (KERN_ERR "Failed to obtain GPIO for Handshake pin, err:%d\n",status);
505                 return status;
506         }
507
508         status = gpio_direction_input(HANDSHAKE_PIN);
509
510         if (status) {
511                 gpio_free(HANDSHAKE_PIN);
512                 printk (KERN_ERR "Failed to set GPIO direction of Handshake pin, err: %d\n",status);
513                 return status;
514         }
515
516         status = request_irq(SPI_IRQ, spi_interrupt_handler,
517                         IRQF_SHARED | IRQF_TRIGGER_RISING,
518                         "ESP_SPI", spi_context.esp_spi_dev);
519         if (status) {
520                 gpio_free(HANDSHAKE_PIN);
521                 printk (KERN_ERR "Failed to request IRQ for Handshake pin, err:%d\n",status);
522                 return status;
523         }
524
525         status = gpio_request(SPI_DATA_READY_PIN, "SPI_DATA_READY_PIN");
526         if (status) {
527                 gpio_free(HANDSHAKE_PIN);
528                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
529                 printk (KERN_ERR "Failed to obtain GPIO for Data ready pin, err:%d\n",status);
530                 return status;
531         }
532
533         status = gpio_direction_input(SPI_DATA_READY_PIN);
534         if (status) {
535                 gpio_free(HANDSHAKE_PIN);
536                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
537                 gpio_free(SPI_DATA_READY_PIN);
538                 printk (KERN_ERR "Failed to set GPIO direction of Data ready pin\n");
539                 return status;
540         }
541
542         status = request_irq(SPI_DATA_READY_IRQ, spi_data_ready_interrupt_handler,
543                         IRQF_SHARED | IRQF_TRIGGER_RISING,
544                         "ESP_SPI_DATA_READY", spi_context.esp_spi_dev);
545         if (status) {
546                 gpio_free(HANDSHAKE_PIN);
547                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
548                 gpio_free(SPI_DATA_READY_PIN);
549                 printk (KERN_ERR "Failed to request IRQ for Data ready pin, err:%d\n",status);
550                 return status;
551         }
552         spi_context.spi_gpio_enabled = 1;
553
554         open_data_path();
555
556         return 0;
557 }
558
559 static int spi_init(void)
560 {
561         int status = 0;
562         uint8_t prio_q_idx = 0;
563         struct esp_adapter *adapter;
564
565         spi_context.spi_workqueue = create_workqueue("ESP_SPI_WORK_QUEUE");
566
567         if (!spi_context.spi_workqueue) {
568                 printk(KERN_ERR "spi workqueue failed to create\n");
569                 spi_exit();
570                 return -EFAULT;
571         }
572
573         INIT_WORK(&spi_context.spi_work, esp_spi_work);
574
575         for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
576                 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
577                 skb_queue_head_init(&spi_context.rx_q[prio_q_idx]);
578         }
579
580         status = spi_dev_init(spi_context.spi_clk_mhz);
581         if (status) {
582                 spi_exit();
583                 printk (KERN_ERR "Failed Init SPI device\n");
584                 return status;
585         }
586
587         adapter = spi_context.adapter;
588
589         if (!adapter) {
590                 spi_exit();
591                 return -EFAULT;
592         }
593
594         adapter->dev = &spi_context.esp_spi_dev->dev;
595
596         return status;
597 }
598
599 static void spi_exit(void)
600 {
601         uint8_t prio_q_idx = 0;
602
603         disable_irq(SPI_IRQ);
604         disable_irq(SPI_DATA_READY_IRQ);
605         close_data_path();
606         msleep(200);
607
608         for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
609                 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
610                 skb_queue_purge(&spi_context.rx_q[prio_q_idx]);
611         }
612
613         if (spi_context.spi_workqueue) {
614                 flush_scheduled_work();
615                 destroy_workqueue(spi_context.spi_workqueue);
616                 spi_context.spi_workqueue = NULL;
617         }
618
619         esp_remove_card(spi_context.adapter);
620
621         if (spi_context.adapter->hcidev)
622                 esp_deinit_bt(spi_context.adapter);
623
624         if (spi_context.spi_gpio_enabled) {
625                 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
626                 free_irq(SPI_DATA_READY_IRQ, spi_context.esp_spi_dev);
627
628                 gpio_free(HANDSHAKE_PIN);
629                 gpio_free(SPI_DATA_READY_PIN);
630         }
631
632         if (spi_context.esp_spi_dev)
633                 spi_unregister_device(spi_context.esp_spi_dev);
634
635         memset(&spi_context, 0, sizeof(spi_context));
636 }
637
638 static void adjust_spi_clock(u8 spi_clk_mhz)
639 {
640         if ((spi_clk_mhz) && (spi_clk_mhz != spi_context.spi_clk_mhz)) {
641                 printk(KERN_INFO "ESP Reconfigure SPI CLK to %u MHz\n",spi_clk_mhz);
642                 spi_context.spi_clk_mhz = spi_clk_mhz;
643         }
644 }
645
646 int esp_init_interface_layer(struct esp_adapter *adapter)
647 {
648         if (!adapter)
649                 return -EINVAL;
650
651         memset(&spi_context, 0, sizeof(spi_context));
652
653         adapter->if_context = &spi_context;
654         adapter->if_ops = &if_ops;
655         adapter->if_type = ESP_IF_TYPE_SPI;
656         spi_context.adapter = adapter;
657         spi_context.spi_clk_mhz = SPI_INITIAL_CLK_MHZ;
658
659         return spi_init();
660 }
661
662 void esp_deinit_interface_layer(void)
663 {
664         spi_exit();
665 }
This page took 0.061677 seconds and 4 git commands to generate.