]> Git Repo - esp-hosted.git/blame - esp_hosted_ng/host/spi/esp_spi.c
esp_hosted_ng: Update LICENCE in SPDX format
[esp-hosted.git] / esp_hosted_ng / host / spi / esp_spi.c
CommitLineData
5e72bc8b 1/*
d7215282 2 * SPDX-FileCopyrightText: 2015-2023 Espressif Systems (Shanghai) CO LTD
5e72bc8b 3 *
d7215282 4 * SPDX-License-Identifier: GPL-2.0-only
5e72bc8b 5 */
5e72bc8b
MM
6#include <linux/spi/spi.h>
7#include <linux/gpio.h>
a9096fbc
MM
8#include <linux/mutex.h>
9#include <linux/delay.h>
5e72bc8b
MM
10#include "esp_spi.h"
11#include "esp_if.h"
12#include "esp_api.h"
13#include "esp_bt_api.h"
aad2ae2e 14#include "esp_kernel_port.h"
bf3d6cb6 15#include "esp_stats.h"
5e72bc8b 16
23929e08
MM
17#define SPI_INITIAL_CLK_MHZ 10
18#define NUMBER_1M 1000000
8e29337c
MM
19#define TX_MAX_PENDING_COUNT 100
20#define TX_RESUME_THRESHOLD (TX_MAX_PENDING_COUNT/5)
23929e08 21
5e72bc8b 22static struct sk_buff * read_packet(struct esp_adapter *adapter);
03e9f879 23static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb);
5e72bc8b 24static void spi_exit(void);
774e9b2e 25static int spi_init(void);
23929e08 26static void adjust_spi_clock(u8 spi_clk_mhz);
5e72bc8b 27
c746d295 28volatile u8 data_path = 0;
1e59efb5 29volatile u8 host_sleep = 0;
c746d295 30static struct esp_spi_context spi_context;
fadb81aa 31static char hardware_type = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
8e29337c 32static atomic_t tx_pending;
774e9b2e 33static uint8_t esp_reset_after_module_load;
c746d295 34
5e72bc8b
MM
35static struct esp_if_ops if_ops = {
36 .read = read_packet,
37 .write = write_packet,
38};
39
a9096fbc 40static DEFINE_MUTEX(spi_lock);
c746d295
MM
41
42static void open_data_path(void)
43{
8e29337c 44 atomic_set(&tx_pending, 0);
c746d295
MM
45 msleep(200);
46 data_path = OPEN_DATAPATH;
47}
48
49static void close_data_path(void)
50{
51 data_path = CLOSE_DATAPATH;
52 msleep(200);
53}
5e72bc8b 54
a9096fbc
MM
55static irqreturn_t spi_data_ready_interrupt_handler(int irq, void * dev)
56{
57 /* ESP peripheral has queued buffer for transmission */
58 if (spi_context.spi_workqueue)
59 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
60
61 return IRQ_HANDLED;
62 }
63
5e72bc8b
MM
64static irqreturn_t spi_interrupt_handler(int irq, void * dev)
65{
a9096fbc 66 /* ESP peripheral is ready for next SPI transaction */
5e72bc8b
MM
67 if (spi_context.spi_workqueue)
68 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
69
70 return IRQ_HANDLED;
71}
72
73static struct sk_buff * read_packet(struct esp_adapter *adapter)
74{
75 struct esp_spi_context *context;
76 struct sk_buff *skb = NULL;
77
c746d295
MM
78 if (!data_path) {
79 return NULL;
80 }
81
5e72bc8b
MM
82 if (!adapter || !adapter->if_context) {
83 printk (KERN_ERR "%s: Invalid args\n", __func__);
84 return NULL;
85 }
86
87 context = adapter->if_context;
88
89 if (context->esp_spi_dev) {
774e9b2e 90 skb = skb_dequeue(&(context->rx_q[PRIO_Q_HIGH]));
4848e879 91 if (!skb)
774e9b2e 92 skb = skb_dequeue(&(context->rx_q[PRIO_Q_MID]));
4848e879 93 if (!skb)
774e9b2e 94 skb = skb_dequeue(&(context->rx_q[PRIO_Q_LOW]));
5e72bc8b
MM
95 } else {
96 printk (KERN_ERR "%s: Invalid args\n", __func__);
97 return NULL;
98 }
99
100 return skb;
101}
102
03e9f879 103static int write_packet(struct esp_adapter *adapter, struct sk_buff *skb)
5e72bc8b 104{
774e9b2e 105 u32 max_pkt_size = SPI_BUF_SIZE - sizeof(struct esp_payload_header);
4848e879 106 struct esp_payload_header *payload_header = (struct esp_payload_header *) skb->data;
774e9b2e 107 struct esp_skb_cb * cb = NULL;
5e72bc8b 108
03e9f879 109 if (!adapter || !adapter->if_context || !skb || !skb->data || !skb->len) {
5e72bc8b 110 printk (KERN_ERR "%s: Invalid args\n", __func__);
774e9b2e
MM
111 if(skb) {
112 dev_kfree_skb(skb);
113 skb = NULL;
114 }
5e72bc8b
MM
115 return -EINVAL;
116 }
117
03e9f879
YM
118 if (skb->len > max_pkt_size) {
119 printk (KERN_ERR "%s: Drop pkt of len[%u] > max spi transport len[%u]\n",
120 __func__, skb->len, max_pkt_size);
121 dev_kfree_skb(skb);
122 return -EPERM;
123 }
124
c746d295 125 if (!data_path) {
225e14eb 126 printk(KERN_INFO "esp32: %s:%u datapath closed\n",__func__,__LINE__);
03e9f879 127 dev_kfree_skb(skb);
c746d295
MM
128 return -EPERM;
129 }
130
774e9b2e
MM
131 cb = (struct esp_skb_cb *)skb->cb;
132 if (cb && cb->priv && (atomic_read(&tx_pending) >= TX_MAX_PENDING_COUNT)) {
133 esp_tx_pause(cb->priv);
134 dev_kfree_skb(skb);
135 skb = NULL;
225e14eb 136 /*printk(KERN_ERR "esp32: %s: TX Pause busy", __func__);*/
774e9b2e
MM
137 if (spi_context.spi_workqueue)
138 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
139 return -EBUSY;
140 }
5e72bc8b 141
5e72bc8b 142 /* Enqueue SKB in tx_q */
774e9b2e
MM
143 if (payload_header->if_type == ESP_INTERNAL_IF) {
144 skb_queue_tail(&spi_context.tx_q[PRIO_Q_HIGH], skb);
4848e879 145 } else if (payload_header->if_type == ESP_HCI_IF) {
774e9b2e 146 skb_queue_tail(&spi_context.tx_q[PRIO_Q_MID], skb);
4848e879 147 } else {
774e9b2e 148 skb_queue_tail(&spi_context.tx_q[PRIO_Q_LOW], skb);
4848e879
YM
149 atomic_inc(&tx_pending);
150 }
ff57bafc 151
a9096fbc
MM
152 if (spi_context.spi_workqueue)
153 queue_work(spi_context.spi_workqueue, &spi_context.spi_work);
154
5e72bc8b
MM
155 return 0;
156}
157
774e9b2e 158void process_event_esp_bootup(struct esp_adapter *adapter, u8 *evt_buf, u8 len)
062be972 159{
774e9b2e
MM
160 /* Bootup event will be received whenever ESP is booted.
161 * It is termed 'First bootup' when this event is received
5a47b073 162 * the first time module loaded. It is termed 'Second & onward bootup' when
774e9b2e
MM
163 * there is ESP reset (reason may be manual reset of ESP or any crash at ESP)
164 */
062be972
MM
165 u8 len_left = len, tag_len;
166 u8 *pos;
774e9b2e 167 uint8_t iface_idx = 0;
059da5be 168 uint8_t prio_q_idx = 0;
774e9b2e
MM
169
170 if (!adapter)
171 return;
062be972
MM
172
173 if (!evt_buf)
174 return;
175
774e9b2e
MM
176 /* Second & onward bootup, cleanup and re-init the driver */
177 if (esp_reset_after_module_load)
178 set_bit(ESP_CLEANUP_IN_PROGRESS, &adapter->state_flags);
179
062be972
MM
180 pos = evt_buf;
181
182 while (len_left) {
774e9b2e 183
062be972 184 tag_len = *(pos + 1);
774e9b2e 185
23929e08 186 printk(KERN_INFO "EVENT: %d\n", *pos);
774e9b2e
MM
187
188 if (*pos == ESP_BOOTUP_CAPABILITY) {
189
190 adapter->capabilities = *(pos + 2);
191
192 } else if (*pos == ESP_BOOTUP_FW_DATA) {
193
194 if (tag_len != sizeof(struct fw_data))
195 printk(KERN_INFO "Length not matching to firmware data size\n");
196 else
197 if (process_fw_data((struct fw_data*)(pos + 2))) {
198 esp_remove_card(spi_context.adapter);
199 return;
200 }
201
202 } else if (*pos == ESP_BOOTUP_SPI_CLK_MHZ){
203
23929e08 204 adjust_spi_clock(*(pos + 2));
fadb81aa 205 adapter->dev = &spi_context.esp_spi_dev->dev;
774e9b2e
MM
206
207 } else if (*pos == ESP_BOOTUP_FIRMWARE_CHIP_ID){
208
753b5129 209 hardware_type = *(pos+2);
774e9b2e 210
bf3d6cb6
SR
211 } else if(*pos == ESP_BOOTUP_TEST_RAW_TP) {
212 process_test_capabilities(*(pos + 2));
062be972
MM
213 } else {
214 printk (KERN_WARNING "Unsupported tag in event");
215 }
774e9b2e 216
23929e08
MM
217 pos += (tag_len+2);
218 len_left -= (tag_len+2);
062be972 219 }
774e9b2e
MM
220
221 if ((hardware_type != ESP_FIRMWARE_CHIP_ESP32) &&
222 (hardware_type != ESP_FIRMWARE_CHIP_ESP32S2) &&
223 (hardware_type != ESP_FIRMWARE_CHIP_ESP32C3) &&
224 (hardware_type != ESP_FIRMWARE_CHIP_ESP32S3)) {
225 printk(KERN_INFO "ESP chipset not recognized, ignoring [%d]\n", hardware_type);
226 hardware_type = ESP_FIRMWARE_CHIP_UNRECOGNIZED;
227 } else {
5a47b073 228 printk(KERN_INFO "ESP chipset detected [%s]\n",
fadb81aa
SR
229 hardware_type==ESP_FIRMWARE_CHIP_ESP32 ? "esp32":
230 hardware_type==ESP_FIRMWARE_CHIP_ESP32S2 ? "esp32-s2" :
231 hardware_type==ESP_FIRMWARE_CHIP_ESP32C3 ? "esp32-c3" :
232 hardware_type==ESP_FIRMWARE_CHIP_ESP32S3 ? "esp32-s3" :
774e9b2e
MM
233 "unknown");
234 }
235
236 if (esp_reset_after_module_load) {
237
238 /* Second & onward bootup:
239 *
240 * SPI is software and not a hardware based module.
5a47b073 241 * When bootup event is received, we should discard all prior commands,
774e9b2e
MM
242 * old messages pending at network and re-initialize everything.
243 *
244 * Such handling is not required
245 * 1. for SDIO
246 * as Removal of SDIO triggers complete Deinit and on SDIO insertion/
247 * detection, i.e., after probing, initialization is triggered
5a47b073 248 *
774e9b2e
MM
249 * 2. On first bootup (if counterpart of this else)
250 * First bootup event is received immediately after module insertion.
251 * As all network or cmds are init and clean for the first time,
252 * there is no need to re-init them
253 */
254
059da5be
YM
255 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
256 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
257 }
774e9b2e
MM
258
259 for (iface_idx=0; iface_idx < ESP_MAX_INTERFACE; iface_idx++) {
260
261 struct esp_wifi_device *priv = adapter->priv[iface_idx];
262
263 if (!priv)
264 continue;
265
aad2ae2e 266 if (priv->scan_in_progress)
0af9c02e 267 ESP_MARK_SCAN_DONE(priv, true);
774e9b2e
MM
268
269 if (priv->ndev &&
1294f8ac 270 wireless_dev_current_bss_exists(&priv->wdev)) {
aad2ae2e 271 CFG80211_DISCONNECTED(priv->ndev,
774e9b2e
MM
272 0, NULL, 0, false, GFP_KERNEL);
273 }
274 }
275
276 esp_remove_card(adapter);
059da5be
YM
277
278 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
279 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
280 }
774e9b2e
MM
281 }
282
283 if (esp_add_card(adapter)) {
284 printk(KERN_ERR "network iterface init failed\n");
753b5129 285 }
774e9b2e
MM
286
287 process_capabilities(adapter);
288 print_capabilities(adapter->capabilities);
289
290
291 esp_reset_after_module_load = 1;
062be972
MM
292}
293
062be972 294
5e72bc8b
MM
295static int process_rx_buf(struct sk_buff *skb)
296{
297 struct esp_payload_header *header;
298 u16 len = 0;
299 u16 offset = 0;
300
301 if (!skb)
302 return -EINVAL;
303
304 header = (struct esp_payload_header *) skb->data;
305
a9096fbc
MM
306 if (header->if_type >= ESP_MAX_IF) {
307 return -EINVAL;
308 }
309
5e72bc8b
MM
310 offset = le16_to_cpu(header->offset);
311
312 /* Validate received SKB. Check len and offset fields */
a9096fbc 313 if (offset != sizeof(struct esp_payload_header)) {
5e72bc8b 314 return -EINVAL;
a9096fbc 315 }
5e72bc8b
MM
316
317 len = le16_to_cpu(header->len);
a9096fbc 318 if (!len) {
5e72bc8b 319 return -EINVAL;
a9096fbc 320 }
5e72bc8b
MM
321
322 len += sizeof(struct esp_payload_header);
323
a9096fbc 324 if (len > SPI_BUF_SIZE) {
5e72bc8b 325 return -EINVAL;
a9096fbc
MM
326 }
327
5e72bc8b
MM
328 /* Trim SKB to actual size */
329 skb_trim(skb, len);
330
062be972 331
774e9b2e
MM
332 if (!data_path) {
333 /*printk(KERN_INFO "%s:%u datapath closed\n",__func__,__LINE__);*/
062be972 334 return -EPERM;
774e9b2e 335 }
062be972 336
5e72bc8b 337 /* enqueue skb for read_packet to pick it */
774e9b2e
MM
338 if (header->if_type == ESP_INTERNAL_IF)
339 skb_queue_tail(&spi_context.rx_q[PRIO_Q_HIGH], skb);
4848e879 340 else if (header->if_type == ESP_HCI_IF)
774e9b2e 341 skb_queue_tail(&spi_context.rx_q[PRIO_Q_MID], skb);
4848e879 342 else
774e9b2e 343 skb_queue_tail(&spi_context.rx_q[PRIO_Q_LOW], skb);
5e72bc8b
MM
344
345 /* indicate reception of new packet */
346 esp_process_new_packet_intr(spi_context.adapter);
347
348 return 0;
349}
350
351static void esp_spi_work(struct work_struct *work)
352{
353 struct spi_transfer trans;
a9096fbc 354 struct sk_buff *tx_skb = NULL, *rx_skb = NULL;
774e9b2e
MM
355 struct esp_skb_cb * cb = NULL;
356 u8 *rx_buf = NULL;
5e72bc8b 357 int ret = 0;
8e29337c 358 volatile int trans_ready, rx_pending;
5e72bc8b 359
a9096fbc 360 mutex_lock(&spi_lock);
5e72bc8b 361
a9096fbc
MM
362 trans_ready = gpio_get_value(HANDSHAKE_PIN);
363 rx_pending = gpio_get_value(SPI_DATA_READY_PIN);
5e72bc8b 364
a9096fbc 365 if (trans_ready) {
8e29337c 366 if (data_path) {
774e9b2e 367 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_HIGH]);
4848e879 368 if (!tx_skb)
774e9b2e 369 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_MID]);
4848e879 370 if (!tx_skb)
774e9b2e 371 tx_skb = skb_dequeue(&spi_context.tx_q[PRIO_Q_LOW]);
8e29337c
MM
372 if (tx_skb) {
373 if (atomic_read(&tx_pending))
374 atomic_dec(&tx_pending);
375
774e9b2e
MM
376 /* resume network tx queue if bearable load */
377 cb = (struct esp_skb_cb *)tx_skb->cb;
378 if (cb && cb->priv && atomic_read(&tx_pending) < TX_RESUME_THRESHOLD) {
379 esp_tx_resume(cb->priv);
bf3d6cb6
SR
380 #if TEST_RAW_TP
381 esp_raw_tp_queue_resume();
382 #endif
8e29337c
MM
383 }
384 }
385 }
5e72bc8b 386
a9096fbc
MM
387 if (rx_pending || tx_skb) {
388 memset(&trans, 0, sizeof(trans));
fadb81aa 389 trans.speed_hz = spi_context.spi_clk_mhz * NUMBER_1M;
5e72bc8b 390
a9096fbc
MM
391 /* Setup and execute SPI transaction
392 * Tx_buf: Check if tx_q has valid buffer for transmission,
393 * else keep it blank
394 *
395 * Rx_buf: Allocate memory for incoming data. This will be freed
396 * immediately if received buffer is invalid.
397 * If it is a valid buffer, upper layer will free it.
398 * */
5e72bc8b 399
a9096fbc 400 /* Configure TX buffer if available */
5e72bc8b 401
a9096fbc
MM
402 if (tx_skb) {
403 trans.tx_buf = tx_skb->data;
774e9b2e 404 /*print_hex_dump(KERN_ERR, "tx: ", DUMP_PREFIX_ADDRESS, 16, 1, trans.tx_buf, 32, 1);*/
a9096fbc
MM
405 } else {
406 tx_skb = esp_alloc_skb(SPI_BUF_SIZE);
407 trans.tx_buf = skb_put(tx_skb, SPI_BUF_SIZE);
312f6f11 408 memset((void*)trans.tx_buf, 0, SPI_BUF_SIZE);
a9096fbc 409 }
5e72bc8b 410
a9096fbc
MM
411 /* Configure RX buffer */
412 rx_skb = esp_alloc_skb(SPI_BUF_SIZE);
413 rx_buf = skb_put(rx_skb, SPI_BUF_SIZE);
414
415 memset(rx_buf, 0, SPI_BUF_SIZE);
416
417 trans.rx_buf = rx_buf;
418 trans.len = SPI_BUF_SIZE;
419
753b5129 420#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0))
774e9b2e 421 if (hardware_type == ESP_FIRMWARE_CHIP_ESP32) {
753b5129
YM
422 trans.cs_change = 1;
423 }
424#endif
a9096fbc 425
753b5129 426 ret = spi_sync_transfer(spi_context.esp_spi_dev, &trans, 1);
a9096fbc
MM
427 if (ret) {
428 printk(KERN_ERR "SPI Transaction failed: %d", ret);
429 dev_kfree_skb(rx_skb);
430 dev_kfree_skb(tx_skb);
431 } else {
432
433 /* Free rx_skb if received data is not valid */
434 if (process_rx_buf(rx_skb)) {
435 dev_kfree_skb(rx_skb);
436 }
5e72bc8b 437
a9096fbc
MM
438 if (tx_skb)
439 dev_kfree_skb(tx_skb);
440 }
441 }
5e72bc8b
MM
442 }
443
a9096fbc 444 mutex_unlock(&spi_lock);
5e72bc8b
MM
445}
446
1294f8ac
KG
447#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0))
448#include <linux/platform_device.h>
449static int __spi_controller_match(struct device *dev, const void *data)
450{
451 struct spi_controller *ctlr;
452 const u16 *bus_num = data;
453
454 ctlr = container_of(dev, struct spi_controller, dev);
455
456 if(!ctlr) {
457 return 0;
458 }
459
460 return ctlr->bus_num == *bus_num;
461}
462
463static struct spi_controller *spi_busnum_to_master(u16 bus_num)
464{
465 struct platform_device *pdev = NULL;
466 struct spi_master *master = NULL;
467 struct spi_controller *ctlr = NULL;
468 struct device *dev = NULL;
469
470 pdev = platform_device_alloc("pdev", PLATFORM_DEVID_NONE);
471 pdev->num_resources = 0;
472 platform_device_add(pdev);
473
474 master = spi_alloc_master(&pdev->dev, sizeof(void *));
475 if (!master) {
476 pr_err("Error: failed to allocate SPI master device\n");
f6685283 477 platform_device_del(pdev);
1294f8ac
KG
478 platform_device_put(pdev);
479 return NULL;
480 }
481
482 dev = class_find_device(master->dev.class, NULL, &bus_num, __spi_controller_match);
483 if (dev) {
484 ctlr = container_of(dev, struct spi_controller, dev);
485 }
486
487 spi_master_put(master);
f6685283 488 platform_device_del(pdev);
1294f8ac
KG
489 platform_device_put(pdev);
490
491 return ctlr;
492}
493#endif
494
23929e08 495static int spi_dev_init(int spi_clk_mhz)
5e72bc8b
MM
496{
497 int status = 0;
498 struct spi_board_info esp_board = {{0}};
499 struct spi_master *master = NULL;
500
501 strlcpy(esp_board.modalias, "esp_spi", sizeof(esp_board.modalias));
a9096fbc 502 esp_board.mode = SPI_MODE_2;
23929e08 503 esp_board.max_speed_hz = spi_clk_mhz * NUMBER_1M;
5e72bc8b
MM
504 esp_board.bus_num = 0;
505 esp_board.chip_select = 0;
506
5e72bc8b 507 master = spi_busnum_to_master(esp_board.bus_num);
5e72bc8b
MM
508 if (!master) {
509 printk(KERN_ERR "Failed to obtain SPI master handle\n");
5e72bc8b
MM
510 return -ENODEV;
511 }
512
513 spi_context.esp_spi_dev = spi_new_device(master, &esp_board);
514
515 if (!spi_context.esp_spi_dev) {
516 printk(KERN_ERR "Failed to add new SPI device\n");
5e72bc8b
MM
517 return -ENODEV;
518 }
519
520 status = spi_setup(spi_context.esp_spi_dev);
521
522 if (status) {
523 printk (KERN_ERR "Failed to setup new SPI device");
5e72bc8b
MM
524 return status;
525 }
526
23929e08 527 printk (KERN_INFO "ESP32 peripheral is registered to SPI bus [%d]"
82d0e3eb
YM
528 ",chip select [%d], SPI Clock [%d]\n", esp_board.bus_num,
529 esp_board.chip_select, spi_clk_mhz);
5e72bc8b
MM
530
531 status = gpio_request(HANDSHAKE_PIN, "SPI_HANDSHAKE_PIN");
532
533 if (status) {
82d0e3eb 534 printk (KERN_ERR "Failed to obtain GPIO for Handshake pin, err:%d\n",status);
5e72bc8b
MM
535 return status;
536 }
537
538 status = gpio_direction_input(HANDSHAKE_PIN);
539
540 if (status) {
774e9b2e 541 gpio_free(HANDSHAKE_PIN);
82d0e3eb 542 printk (KERN_ERR "Failed to set GPIO direction of Handshake pin, err: %d\n",status);
5e72bc8b
MM
543 return status;
544 }
545
546 status = request_irq(SPI_IRQ, spi_interrupt_handler,
547 IRQF_SHARED | IRQF_TRIGGER_RISING,
548 "ESP_SPI", spi_context.esp_spi_dev);
549 if (status) {
774e9b2e 550 gpio_free(HANDSHAKE_PIN);
82d0e3eb 551 printk (KERN_ERR "Failed to request IRQ for Handshake pin, err:%d\n",status);
5e72bc8b
MM
552 return status;
553 }
554
a9096fbc
MM
555 status = gpio_request(SPI_DATA_READY_PIN, "SPI_DATA_READY_PIN");
556 if (status) {
774e9b2e
MM
557 gpio_free(HANDSHAKE_PIN);
558 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
82d0e3eb 559 printk (KERN_ERR "Failed to obtain GPIO for Data ready pin, err:%d\n",status);
a9096fbc
MM
560 return status;
561 }
562
563 status = gpio_direction_input(SPI_DATA_READY_PIN);
564 if (status) {
774e9b2e
MM
565 gpio_free(HANDSHAKE_PIN);
566 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
567 gpio_free(SPI_DATA_READY_PIN);
23929e08 568 printk (KERN_ERR "Failed to set GPIO direction of Data ready pin\n");
a9096fbc
MM
569 return status;
570 }
571
572 status = request_irq(SPI_DATA_READY_IRQ, spi_data_ready_interrupt_handler,
573 IRQF_SHARED | IRQF_TRIGGER_RISING,
574 "ESP_SPI_DATA_READY", spi_context.esp_spi_dev);
575 if (status) {
774e9b2e
MM
576 gpio_free(HANDSHAKE_PIN);
577 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
578 gpio_free(SPI_DATA_READY_PIN);
82d0e3eb 579 printk (KERN_ERR "Failed to request IRQ for Data ready pin, err:%d\n",status);
a9096fbc
MM
580 return status;
581 }
774e9b2e 582 spi_context.spi_gpio_enabled = 1;
a9096fbc 583
c746d295 584 open_data_path();
5e72bc8b 585
23929e08
MM
586 return 0;
587}
588
23929e08
MM
589static int spi_init(void)
590{
591 int status = 0;
4848e879 592 uint8_t prio_q_idx = 0;
774e9b2e 593 struct esp_adapter *adapter;
23929e08
MM
594
595 spi_context.spi_workqueue = create_workqueue("ESP_SPI_WORK_QUEUE");
596
597 if (!spi_context.spi_workqueue) {
598 printk(KERN_ERR "spi workqueue failed to create\n");
599 spi_exit();
600 return -EFAULT;
601 }
602
603 INIT_WORK(&spi_context.spi_work, esp_spi_work);
604
4848e879
YM
605 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
606 skb_queue_head_init(&spi_context.tx_q[prio_q_idx]);
607 skb_queue_head_init(&spi_context.rx_q[prio_q_idx]);
608 }
609
774e9b2e 610 status = spi_dev_init(spi_context.spi_clk_mhz);
23929e08
MM
611 if (status) {
612 spi_exit();
613 printk (KERN_ERR "Failed Init SPI device\n");
614 return status;
615 }
616
774e9b2e 617 adapter = spi_context.adapter;
5e72bc8b 618
774e9b2e 619 if (!adapter) {
5e72bc8b 620 spi_exit();
774e9b2e 621 return -EFAULT;
5e72bc8b
MM
622 }
623
774e9b2e 624 adapter->dev = &spi_context.esp_spi_dev->dev;
5e72bc8b
MM
625
626 return status;
627}
628
629static void spi_exit(void)
630{
4848e879
YM
631 uint8_t prio_q_idx = 0;
632
c746d295 633 disable_irq(SPI_IRQ);
a9096fbc 634 disable_irq(SPI_DATA_READY_IRQ);
c746d295
MM
635 close_data_path();
636 msleep(200);
637
4848e879
YM
638 for (prio_q_idx=0; prio_q_idx<MAX_PRIORITY_QUEUES; prio_q_idx++) {
639 skb_queue_purge(&spi_context.tx_q[prio_q_idx]);
640 skb_queue_purge(&spi_context.rx_q[prio_q_idx]);
641 }
c746d295 642
5e72bc8b 643 if (spi_context.spi_workqueue) {
23929e08 644 flush_scheduled_work();
5e72bc8b 645 destroy_workqueue(spi_context.spi_workqueue);
c746d295 646 spi_context.spi_workqueue = NULL;
5e72bc8b
MM
647 }
648
5e72bc8b
MM
649 esp_remove_card(spi_context.adapter);
650
651 if (spi_context.adapter->hcidev)
652 esp_deinit_bt(spi_context.adapter);
653
774e9b2e
MM
654 if (spi_context.spi_gpio_enabled) {
655 free_irq(SPI_IRQ, spi_context.esp_spi_dev);
656 free_irq(SPI_DATA_READY_IRQ, spi_context.esp_spi_dev);
c746d295 657
774e9b2e
MM
658 gpio_free(HANDSHAKE_PIN);
659 gpio_free(SPI_DATA_READY_PIN);
660 }
5e72bc8b
MM
661
662 if (spi_context.esp_spi_dev)
663 spi_unregister_device(spi_context.esp_spi_dev);
664
665 memset(&spi_context, 0, sizeof(spi_context));
666}
667
23929e08
MM
668static void adjust_spi_clock(u8 spi_clk_mhz)
669{
774e9b2e 670 if ((spi_clk_mhz) && (spi_clk_mhz != spi_context.spi_clk_mhz)) {
23929e08 671 printk(KERN_INFO "ESP Reconfigure SPI CLK to %u MHz\n",spi_clk_mhz);
774e9b2e 672 spi_context.spi_clk_mhz = spi_clk_mhz;
fadb81aa 673 spi_context.esp_spi_dev->max_speed_hz = spi_clk_mhz * NUMBER_1M;
23929e08
MM
674 }
675}
676
5e72bc8b
MM
677int esp_init_interface_layer(struct esp_adapter *adapter)
678{
679 if (!adapter)
680 return -EINVAL;
681
682 memset(&spi_context, 0, sizeof(spi_context));
683
684 adapter->if_context = &spi_context;
685 adapter->if_ops = &if_ops;
062be972 686 adapter->if_type = ESP_IF_TYPE_SPI;
5e72bc8b 687 spi_context.adapter = adapter;
774e9b2e 688 spi_context.spi_clk_mhz = SPI_INITIAL_CLK_MHZ;
5e72bc8b
MM
689
690 return spi_init();
691}
692
693void esp_deinit_interface_layer(void)
694{
695 spi_exit();
696}
This page took 0.173659 seconds and 4 git commands to generate.