]> Git Repo - linux.git/blob - drivers/net/wireless/mediatek/mt76/usb.c
efi/x86: add headroom to decompressor BSS to account for setup block
[linux.git] / drivers / net / wireless / mediatek / mt76 / usb.c
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2018 Lorenzo Bianconi <[email protected]>
4  */
5
6 #include <linux/module.h>
7 #include "mt76.h"
8 #include "usb_trace.h"
9 #include "dma.h"
10
11 #define MT_VEND_REQ_MAX_RETRY   10
12 #define MT_VEND_REQ_TOUT_MS     300
13
14 static bool disable_usb_sg;
15 module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
16 MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
17
18 static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
19                                   u8 req_type, u16 val, u16 offset,
20                                   void *buf, size_t len)
21 {
22         struct usb_interface *uintf = to_usb_interface(dev->dev);
23         struct usb_device *udev = interface_to_usbdev(uintf);
24         unsigned int pipe;
25         int i, ret;
26
27         lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
28
29         pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
30                                        : usb_sndctrlpipe(udev, 0);
31         for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
32                 if (test_bit(MT76_REMOVED, &dev->state))
33                         return -EIO;
34
35                 ret = usb_control_msg(udev, pipe, req, req_type, val,
36                                       offset, buf, len, MT_VEND_REQ_TOUT_MS);
37                 if (ret == -ENODEV)
38                         set_bit(MT76_REMOVED, &dev->state);
39                 if (ret >= 0 || ret == -ENODEV)
40                         return ret;
41                 usleep_range(5000, 10000);
42         }
43
44         dev_err(dev->dev, "vendor request req:%02x off:%04x failed:%d\n",
45                 req, offset, ret);
46         return ret;
47 }
48
49 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
50                          u8 req_type, u16 val, u16 offset,
51                          void *buf, size_t len)
52 {
53         int ret;
54
55         mutex_lock(&dev->usb.usb_ctrl_mtx);
56         ret = __mt76u_vendor_request(dev, req, req_type,
57                                      val, offset, buf, len);
58         trace_usb_reg_wr(dev, offset, val);
59         mutex_unlock(&dev->usb.usb_ctrl_mtx);
60
61         return ret;
62 }
63 EXPORT_SYMBOL_GPL(mt76u_vendor_request);
64
65 static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
66 {
67         struct mt76_usb *usb = &dev->usb;
68         u32 data = ~0;
69         u16 offset;
70         int ret;
71         u8 req;
72
73         switch (addr & MT_VEND_TYPE_MASK) {
74         case MT_VEND_TYPE_EEPROM:
75                 req = MT_VEND_READ_EEPROM;
76                 break;
77         case MT_VEND_TYPE_CFG:
78                 req = MT_VEND_READ_CFG;
79                 break;
80         default:
81                 req = MT_VEND_MULTI_READ;
82                 break;
83         }
84         offset = addr & ~MT_VEND_TYPE_MASK;
85
86         ret = __mt76u_vendor_request(dev, req,
87                                      USB_DIR_IN | USB_TYPE_VENDOR,
88                                      0, offset, &usb->reg_val, sizeof(__le32));
89         if (ret == sizeof(__le32))
90                 data = le32_to_cpu(usb->reg_val);
91         trace_usb_reg_rr(dev, addr, data);
92
93         return data;
94 }
95
96 static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
97 {
98         u32 ret;
99
100         mutex_lock(&dev->usb.usb_ctrl_mtx);
101         ret = __mt76u_rr(dev, addr);
102         mutex_unlock(&dev->usb.usb_ctrl_mtx);
103
104         return ret;
105 }
106
107 static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
108 {
109         struct mt76_usb *usb = &dev->usb;
110         u16 offset;
111         u8 req;
112
113         switch (addr & MT_VEND_TYPE_MASK) {
114         case MT_VEND_TYPE_CFG:
115                 req = MT_VEND_WRITE_CFG;
116                 break;
117         default:
118                 req = MT_VEND_MULTI_WRITE;
119                 break;
120         }
121         offset = addr & ~MT_VEND_TYPE_MASK;
122
123         usb->reg_val = cpu_to_le32(val);
124         __mt76u_vendor_request(dev, req,
125                                USB_DIR_OUT | USB_TYPE_VENDOR, 0,
126                                offset, &usb->reg_val, sizeof(__le32));
127         trace_usb_reg_wr(dev, addr, val);
128 }
129
130 static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
131 {
132         mutex_lock(&dev->usb.usb_ctrl_mtx);
133         __mt76u_wr(dev, addr, val);
134         mutex_unlock(&dev->usb.usb_ctrl_mtx);
135 }
136
137 static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr,
138                      u32 mask, u32 val)
139 {
140         mutex_lock(&dev->usb.usb_ctrl_mtx);
141         val |= __mt76u_rr(dev, addr) & ~mask;
142         __mt76u_wr(dev, addr, val);
143         mutex_unlock(&dev->usb.usb_ctrl_mtx);
144
145         return val;
146 }
147
148 static void mt76u_copy(struct mt76_dev *dev, u32 offset,
149                        const void *data, int len)
150 {
151         struct mt76_usb *usb = &dev->usb;
152         const u32 *val = data;
153         int i, ret;
154
155         mutex_lock(&usb->usb_ctrl_mtx);
156         for (i = 0; i < DIV_ROUND_UP(len, 4); i++) {
157                 put_unaligned(val[i], (u32 *)usb->data);
158                 ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
159                                              USB_DIR_OUT | USB_TYPE_VENDOR,
160                                              0, offset + i * 4, usb->data,
161                                              sizeof(u32));
162                 if (ret < 0)
163                         break;
164         }
165         mutex_unlock(&usb->usb_ctrl_mtx);
166 }
167
168 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
169                      const u16 offset, const u32 val)
170 {
171         mutex_lock(&dev->usb.usb_ctrl_mtx);
172         __mt76u_vendor_request(dev, req,
173                                USB_DIR_OUT | USB_TYPE_VENDOR,
174                                val & 0xffff, offset, NULL, 0);
175         __mt76u_vendor_request(dev, req,
176                                USB_DIR_OUT | USB_TYPE_VENDOR,
177                                val >> 16, offset + 2, NULL, 0);
178         mutex_unlock(&dev->usb.usb_ctrl_mtx);
179 }
180 EXPORT_SYMBOL_GPL(mt76u_single_wr);
181
182 static int
183 mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
184                 const struct mt76_reg_pair *data, int len)
185 {
186         struct mt76_usb *usb = &dev->usb;
187
188         mutex_lock(&usb->usb_ctrl_mtx);
189         while (len > 0) {
190                 __mt76u_wr(dev, base + data->reg, data->value);
191                 len--;
192                 data++;
193         }
194         mutex_unlock(&usb->usb_ctrl_mtx);
195
196         return 0;
197 }
198
199 static int
200 mt76u_wr_rp(struct mt76_dev *dev, u32 base,
201             const struct mt76_reg_pair *data, int n)
202 {
203         if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
204                 return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
205         else
206                 return mt76u_req_wr_rp(dev, base, data, n);
207 }
208
209 static int
210 mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
211                 int len)
212 {
213         struct mt76_usb *usb = &dev->usb;
214
215         mutex_lock(&usb->usb_ctrl_mtx);
216         while (len > 0) {
217                 data->value = __mt76u_rr(dev, base + data->reg);
218                 len--;
219                 data++;
220         }
221         mutex_unlock(&usb->usb_ctrl_mtx);
222
223         return 0;
224 }
225
226 static int
227 mt76u_rd_rp(struct mt76_dev *dev, u32 base,
228             struct mt76_reg_pair *data, int n)
229 {
230         if (test_bit(MT76_STATE_MCU_RUNNING, &dev->state))
231                 return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
232         else
233                 return mt76u_req_rd_rp(dev, base, data, n);
234 }
235
236 static bool mt76u_check_sg(struct mt76_dev *dev)
237 {
238         struct usb_interface *uintf = to_usb_interface(dev->dev);
239         struct usb_device *udev = interface_to_usbdev(uintf);
240
241         return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
242                 (udev->bus->no_sg_constraint ||
243                  udev->speed == USB_SPEED_WIRELESS));
244 }
245
246 static int
247 mt76u_set_endpoints(struct usb_interface *intf,
248                     struct mt76_usb *usb)
249 {
250         struct usb_host_interface *intf_desc = intf->cur_altsetting;
251         struct usb_endpoint_descriptor *ep_desc;
252         int i, in_ep = 0, out_ep = 0;
253
254         for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
255                 ep_desc = &intf_desc->endpoint[i].desc;
256
257                 if (usb_endpoint_is_bulk_in(ep_desc) &&
258                     in_ep < __MT_EP_IN_MAX) {
259                         usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
260                         in_ep++;
261                 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
262                            out_ep < __MT_EP_OUT_MAX) {
263                         usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
264                         out_ep++;
265                 }
266         }
267
268         if (in_ep != __MT_EP_IN_MAX || out_ep != __MT_EP_OUT_MAX)
269                 return -EINVAL;
270         return 0;
271 }
272
273 static int
274 mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
275                  int nsgs, gfp_t gfp)
276 {
277         int i;
278
279         for (i = 0; i < nsgs; i++) {
280                 struct page *page;
281                 void *data;
282                 int offset;
283
284                 data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
285                 if (!data)
286                         break;
287
288                 page = virt_to_head_page(data);
289                 offset = data - page_address(page);
290                 sg_set_page(&urb->sg[i], page, q->buf_size, offset);
291         }
292
293         if (i < nsgs) {
294                 int j;
295
296                 for (j = nsgs; j < urb->num_sgs; j++)
297                         skb_free_frag(sg_virt(&urb->sg[j]));
298                 urb->num_sgs = i;
299         }
300
301         urb->num_sgs = max_t(int, i, urb->num_sgs);
302         urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
303         sg_init_marker(urb->sg, urb->num_sgs);
304
305         return i ? : -ENOMEM;
306 }
307
308 static int
309 mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
310 {
311         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
312
313         if (dev->usb.sg_en)
314                 return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
315
316         urb->transfer_buffer_length = q->buf_size;
317         urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
318
319         return urb->transfer_buffer ? 0 : -ENOMEM;
320 }
321
322 static int
323 mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
324                 int sg_max_size)
325 {
326         unsigned int size = sizeof(struct urb);
327
328         if (dev->usb.sg_en)
329                 size += sg_max_size * sizeof(struct scatterlist);
330
331         e->urb = kzalloc(size, GFP_KERNEL);
332         if (!e->urb)
333                 return -ENOMEM;
334
335         usb_init_urb(e->urb);
336
337         if (dev->usb.sg_en)
338                 e->urb->sg = (struct scatterlist *)(e->urb + 1);
339
340         return 0;
341 }
342
343 static int
344 mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
345 {
346         int err;
347
348         err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
349         if (err)
350                 return err;
351
352         return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
353                                GFP_KERNEL);
354 }
355
356 static void mt76u_urb_free(struct urb *urb)
357 {
358         int i;
359
360         for (i = 0; i < urb->num_sgs; i++)
361                 skb_free_frag(sg_virt(&urb->sg[i]));
362
363         if (urb->transfer_buffer)
364                 skb_free_frag(urb->transfer_buffer);
365
366         usb_free_urb(urb);
367 }
368
369 static void
370 mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
371                     struct urb *urb, usb_complete_t complete_fn,
372                     void *context)
373 {
374         struct usb_interface *uintf = to_usb_interface(dev->dev);
375         struct usb_device *udev = interface_to_usbdev(uintf);
376         unsigned int pipe;
377
378         if (dir == USB_DIR_IN)
379                 pipe = usb_rcvbulkpipe(udev, dev->usb.in_ep[index]);
380         else
381                 pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
382
383         urb->dev = udev;
384         urb->pipe = pipe;
385         urb->complete = complete_fn;
386         urb->context = context;
387 }
388
389 static inline struct urb *
390 mt76u_get_next_rx_entry(struct mt76_dev *dev)
391 {
392         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
393         struct urb *urb = NULL;
394         unsigned long flags;
395
396         spin_lock_irqsave(&q->lock, flags);
397         if (q->queued > 0) {
398                 urb = q->entry[q->head].urb;
399                 q->head = (q->head + 1) % q->ndesc;
400                 q->queued--;
401         }
402         spin_unlock_irqrestore(&q->lock, flags);
403
404         return urb;
405 }
406
407 static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
408 {
409         u16 dma_len, min_len;
410
411         dma_len = get_unaligned_le16(data);
412         min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
413                   MT_FCE_INFO_LEN;
414
415         if (data_len < min_len || !dma_len ||
416             dma_len + MT_DMA_HDR_LEN > data_len ||
417             (dma_len & 0x3))
418                 return -EINVAL;
419         return dma_len;
420 }
421
422 static struct sk_buff *
423 mt76u_build_rx_skb(void *data, int len, int buf_size)
424 {
425         struct sk_buff *skb;
426
427         if (SKB_WITH_OVERHEAD(buf_size) < MT_DMA_HDR_LEN + len) {
428                 struct page *page;
429
430                 /* slow path, not enough space for data and
431                  * skb_shared_info
432                  */
433                 skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
434                 if (!skb)
435                         return NULL;
436
437                 skb_put_data(skb, data + MT_DMA_HDR_LEN, MT_SKB_HEAD_LEN);
438                 data += (MT_DMA_HDR_LEN + MT_SKB_HEAD_LEN);
439                 page = virt_to_head_page(data);
440                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
441                                 page, data - page_address(page),
442                                 len - MT_SKB_HEAD_LEN, buf_size);
443
444                 return skb;
445         }
446
447         /* fast path */
448         skb = build_skb(data, buf_size);
449         if (!skb)
450                 return NULL;
451
452         skb_reserve(skb, MT_DMA_HDR_LEN);
453         __skb_put(skb, len);
454
455         return skb;
456 }
457
458 static int
459 mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
460 {
461         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
462         u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
463         int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
464         int len, nsgs = 1;
465         struct sk_buff *skb;
466
467         if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
468                 return 0;
469
470         len = mt76u_get_rx_entry_len(data, urb->actual_length);
471         if (len < 0)
472                 return 0;
473
474         data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
475         skb = mt76u_build_rx_skb(data, data_len, q->buf_size);
476         if (!skb)
477                 return 0;
478
479         len -= data_len;
480         while (len > 0 && nsgs < urb->num_sgs) {
481                 data_len = min_t(int, len, urb->sg[nsgs].length);
482                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
483                                 sg_page(&urb->sg[nsgs]),
484                                 urb->sg[nsgs].offset,
485                                 data_len, q->buf_size);
486                 len -= data_len;
487                 nsgs++;
488         }
489         dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb);
490
491         return nsgs;
492 }
493
494 static void mt76u_complete_rx(struct urb *urb)
495 {
496         struct mt76_dev *dev = urb->context;
497         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
498         unsigned long flags;
499
500         trace_rx_urb(dev, urb);
501
502         switch (urb->status) {
503         case -ECONNRESET:
504         case -ESHUTDOWN:
505         case -ENOENT:
506                 return;
507         default:
508                 dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
509                                     urb->status);
510                 /* fall through */
511         case 0:
512                 break;
513         }
514
515         spin_lock_irqsave(&q->lock, flags);
516         if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
517                 goto out;
518
519         q->tail = (q->tail + 1) % q->ndesc;
520         q->queued++;
521         tasklet_schedule(&dev->usb.rx_tasklet);
522 out:
523         spin_unlock_irqrestore(&q->lock, flags);
524 }
525
526 static int
527 mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
528 {
529         mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
530                             mt76u_complete_rx, dev);
531         trace_submit_urb(dev, urb);
532
533         return usb_submit_urb(urb, GFP_ATOMIC);
534 }
535
536 static void mt76u_rx_tasklet(unsigned long data)
537 {
538         struct mt76_dev *dev = (struct mt76_dev *)data;
539         struct urb *urb;
540         int err, count;
541
542         rcu_read_lock();
543
544         while (true) {
545                 urb = mt76u_get_next_rx_entry(dev);
546                 if (!urb)
547                         break;
548
549                 count = mt76u_process_rx_entry(dev, urb);
550                 if (count > 0) {
551                         err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
552                         if (err < 0)
553                                 break;
554                 }
555                 mt76u_submit_rx_buf(dev, urb);
556         }
557         mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
558
559         rcu_read_unlock();
560 }
561
562 static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
563 {
564         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
565         unsigned long flags;
566         int i, err = 0;
567
568         spin_lock_irqsave(&q->lock, flags);
569         for (i = 0; i < q->ndesc; i++) {
570                 err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
571                 if (err < 0)
572                         break;
573         }
574         q->head = q->tail = 0;
575         q->queued = 0;
576         spin_unlock_irqrestore(&q->lock, flags);
577
578         return err;
579 }
580
581 static int mt76u_alloc_rx(struct mt76_dev *dev)
582 {
583         struct mt76_usb *usb = &dev->usb;
584         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
585         int i, err;
586
587         usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL);
588         if (!usb->mcu.data)
589                 return -ENOMEM;
590
591         spin_lock_init(&q->lock);
592         q->entry = devm_kcalloc(dev->dev,
593                                 MT_NUM_RX_ENTRIES, sizeof(*q->entry),
594                                 GFP_KERNEL);
595         if (!q->entry)
596                 return -ENOMEM;
597
598         q->ndesc = MT_NUM_RX_ENTRIES;
599         q->buf_size = PAGE_SIZE;
600
601         for (i = 0; i < q->ndesc; i++) {
602                 err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
603                 if (err < 0)
604                         return err;
605         }
606
607         return mt76u_submit_rx_buffers(dev);
608 }
609
610 static void mt76u_free_rx(struct mt76_dev *dev)
611 {
612         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
613         struct page *page;
614         int i;
615
616         for (i = 0; i < q->ndesc; i++)
617                 mt76u_urb_free(q->entry[i].urb);
618
619         if (!q->rx_page.va)
620                 return;
621
622         page = virt_to_page(q->rx_page.va);
623         __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
624         memset(&q->rx_page, 0, sizeof(q->rx_page));
625 }
626
627 void mt76u_stop_rx(struct mt76_dev *dev)
628 {
629         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
630         int i;
631
632         for (i = 0; i < q->ndesc; i++)
633                 usb_poison_urb(q->entry[i].urb);
634
635         tasklet_kill(&dev->usb.rx_tasklet);
636 }
637 EXPORT_SYMBOL_GPL(mt76u_stop_rx);
638
639 int mt76u_resume_rx(struct mt76_dev *dev)
640 {
641         struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
642         int i;
643
644         for (i = 0; i < q->ndesc; i++)
645                 usb_unpoison_urb(q->entry[i].urb);
646
647         return mt76u_submit_rx_buffers(dev);
648 }
649 EXPORT_SYMBOL_GPL(mt76u_resume_rx);
650
651 static void mt76u_tx_tasklet(unsigned long data)
652 {
653         struct mt76_dev *dev = (struct mt76_dev *)data;
654         struct mt76_queue_entry entry;
655         struct mt76_sw_queue *sq;
656         struct mt76_queue *q;
657         bool wake;
658         int i;
659
660         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
661                 u32 n_dequeued = 0, n_sw_dequeued = 0;
662
663                 sq = &dev->q_tx[i];
664                 q = sq->q;
665
666                 while (q->queued > n_dequeued) {
667                         if (!q->entry[q->head].done)
668                                 break;
669
670                         if (q->entry[q->head].schedule) {
671                                 q->entry[q->head].schedule = false;
672                                 n_sw_dequeued++;
673                         }
674
675                         entry = q->entry[q->head];
676                         q->entry[q->head].done = false;
677                         q->head = (q->head + 1) % q->ndesc;
678                         n_dequeued++;
679
680                         dev->drv->tx_complete_skb(dev, i, &entry);
681                 }
682
683                 spin_lock_bh(&q->lock);
684
685                 sq->swq_queued -= n_sw_dequeued;
686                 q->queued -= n_dequeued;
687
688                 wake = q->stopped && q->queued < q->ndesc - 8;
689                 if (wake)
690                         q->stopped = false;
691
692                 if (!q->queued)
693                         wake_up(&dev->tx_wait);
694
695                 spin_unlock_bh(&q->lock);
696
697                 mt76_txq_schedule(dev, i);
698
699                 if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
700                         queue_work(dev->usb.stat_wq, &dev->usb.stat_work);
701                 if (wake)
702                         ieee80211_wake_queue(dev->hw, i);
703         }
704 }
705
706 static void mt76u_tx_status_data(struct work_struct *work)
707 {
708         struct mt76_usb *usb;
709         struct mt76_dev *dev;
710         u8 update = 1;
711         u16 count = 0;
712
713         usb = container_of(work, struct mt76_usb, stat_work);
714         dev = container_of(usb, struct mt76_dev, usb);
715
716         while (true) {
717                 if (test_bit(MT76_REMOVED, &dev->state))
718                         break;
719
720                 if (!dev->drv->tx_status_data(dev, &update))
721                         break;
722                 count++;
723         }
724
725         if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
726                 queue_work(usb->stat_wq, &usb->stat_work);
727         else
728                 clear_bit(MT76_READING_STATS, &dev->state);
729 }
730
731 static void mt76u_complete_tx(struct urb *urb)
732 {
733         struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
734         struct mt76_queue_entry *e = urb->context;
735
736         if (mt76u_urb_error(urb))
737                 dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
738         e->done = true;
739
740         tasklet_schedule(&dev->tx_tasklet);
741 }
742
743 static int
744 mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
745                        struct urb *urb)
746 {
747         urb->transfer_buffer_length = skb->len;
748
749         if (!dev->usb.sg_en) {
750                 urb->transfer_buffer = skb->data;
751                 return 0;
752         }
753
754         sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
755         urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
756         if (!urb->num_sgs)
757                 return -ENOMEM;
758
759         return urb->num_sgs;
760 }
761
762 static int
763 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
764                    struct sk_buff *skb, struct mt76_wcid *wcid,
765                    struct ieee80211_sta *sta)
766 {
767         struct mt76_queue *q = dev->q_tx[qid].q;
768         struct mt76_tx_info tx_info = {
769                 .skb = skb,
770         };
771         u16 idx = q->tail;
772         int err;
773
774         if (q->queued == q->ndesc)
775                 return -ENOSPC;
776
777         skb->prev = skb->next = NULL;
778         err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
779         if (err < 0)
780                 return err;
781
782         err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
783         if (err < 0)
784                 return err;
785
786         mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
787                             q->entry[idx].urb, mt76u_complete_tx,
788                             &q->entry[idx]);
789
790         q->tail = (q->tail + 1) % q->ndesc;
791         q->entry[idx].skb = tx_info.skb;
792         q->queued++;
793
794         return idx;
795 }
796
797 static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
798 {
799         struct urb *urb;
800         int err;
801
802         while (q->first != q->tail) {
803                 urb = q->entry[q->first].urb;
804
805                 trace_submit_urb(dev, urb);
806                 err = usb_submit_urb(urb, GFP_ATOMIC);
807                 if (err < 0) {
808                         if (err == -ENODEV)
809                                 set_bit(MT76_REMOVED, &dev->state);
810                         else
811                                 dev_err(dev->dev, "tx urb submit failed:%d\n",
812                                         err);
813                         break;
814                 }
815                 q->first = (q->first + 1) % q->ndesc;
816         }
817 }
818
819 static int mt76u_alloc_tx(struct mt76_dev *dev)
820 {
821         struct mt76_queue *q;
822         int i, j, err;
823
824         for (i = 0; i <= MT_TXQ_PSD; i++) {
825                 INIT_LIST_HEAD(&dev->q_tx[i].swq);
826
827                 if (i >= IEEE80211_NUM_ACS) {
828                         dev->q_tx[i].q = dev->q_tx[0].q;
829                         continue;
830                 }
831
832                 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
833                 if (!q)
834                         return -ENOMEM;
835
836                 spin_lock_init(&q->lock);
837                 q->hw_idx = mt76_ac_to_hwq(i);
838                 dev->q_tx[i].q = q;
839
840                 q->entry = devm_kcalloc(dev->dev,
841                                         MT_NUM_TX_ENTRIES, sizeof(*q->entry),
842                                         GFP_KERNEL);
843                 if (!q->entry)
844                         return -ENOMEM;
845
846                 q->ndesc = MT_NUM_TX_ENTRIES;
847                 for (j = 0; j < q->ndesc; j++) {
848                         err = mt76u_urb_alloc(dev, &q->entry[j],
849                                               MT_TX_SG_MAX_SIZE);
850                         if (err < 0)
851                                 return err;
852                 }
853         }
854         return 0;
855 }
856
857 static void mt76u_free_tx(struct mt76_dev *dev)
858 {
859         struct mt76_queue *q;
860         int i, j;
861
862         for (i = 0; i < IEEE80211_NUM_ACS; i++) {
863                 q = dev->q_tx[i].q;
864                 for (j = 0; j < q->ndesc; j++)
865                         usb_free_urb(q->entry[j].urb);
866         }
867 }
868
869 void mt76u_stop_tx(struct mt76_dev *dev)
870 {
871         struct mt76_queue_entry entry;
872         struct mt76_queue *q;
873         int i, j, ret;
874
875         ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev),
876                                  HZ / 5);
877         if (!ret) {
878                 dev_err(dev->dev, "timed out waiting for pending tx\n");
879
880                 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
881                         q = dev->q_tx[i].q;
882                         for (j = 0; j < q->ndesc; j++)
883                                 usb_kill_urb(q->entry[j].urb);
884                 }
885
886                 tasklet_kill(&dev->tx_tasklet);
887
888                 /* On device removal we maight queue skb's, but mt76u_tx_kick()
889                  * will fail to submit urb, cleanup those skb's manually.
890                  */
891                 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
892                         q = dev->q_tx[i].q;
893
894                         /* Assure we are in sync with killed tasklet. */
895                         spin_lock_bh(&q->lock);
896                         while (q->queued) {
897                                 entry = q->entry[q->head];
898                                 q->head = (q->head + 1) % q->ndesc;
899                                 q->queued--;
900
901                                 dev->drv->tx_complete_skb(dev, i, &entry);
902                         }
903                         spin_unlock_bh(&q->lock);
904                 }
905         }
906
907         cancel_work_sync(&dev->usb.stat_work);
908         clear_bit(MT76_READING_STATS, &dev->state);
909
910         mt76_tx_status_check(dev, NULL, true);
911 }
912 EXPORT_SYMBOL_GPL(mt76u_stop_tx);
913
914 void mt76u_queues_deinit(struct mt76_dev *dev)
915 {
916         mt76u_stop_rx(dev);
917         mt76u_stop_tx(dev);
918
919         mt76u_free_rx(dev);
920         mt76u_free_tx(dev);
921 }
922 EXPORT_SYMBOL_GPL(mt76u_queues_deinit);
923
924 int mt76u_alloc_queues(struct mt76_dev *dev)
925 {
926         int err;
927
928         err = mt76u_alloc_rx(dev);
929         if (err < 0)
930                 return err;
931
932         return mt76u_alloc_tx(dev);
933 }
934 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
935
936 static const struct mt76_queue_ops usb_queue_ops = {
937         .tx_queue_skb = mt76u_tx_queue_skb,
938         .kick = mt76u_tx_kick,
939 };
940
941 int mt76u_init(struct mt76_dev *dev,
942                struct usb_interface *intf)
943 {
944         static const struct mt76_bus_ops mt76u_ops = {
945                 .rr = mt76u_rr,
946                 .wr = mt76u_wr,
947                 .rmw = mt76u_rmw,
948                 .write_copy = mt76u_copy,
949                 .wr_rp = mt76u_wr_rp,
950                 .rd_rp = mt76u_rd_rp,
951                 .type = MT76_BUS_USB,
952         };
953         struct usb_device *udev = interface_to_usbdev(intf);
954         struct mt76_usb *usb = &dev->usb;
955
956         tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
957         tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
958         INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
959         skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
960
961         usb->stat_wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0);
962         if (!usb->stat_wq)
963                 return -ENOMEM;
964
965         mutex_init(&usb->mcu.mutex);
966
967         mutex_init(&usb->usb_ctrl_mtx);
968         dev->bus = &mt76u_ops;
969         dev->queue_ops = &usb_queue_ops;
970
971         dev_set_drvdata(&udev->dev, dev);
972
973         usb->sg_en = mt76u_check_sg(dev);
974
975         return mt76u_set_endpoints(intf, usb);
976 }
977 EXPORT_SYMBOL_GPL(mt76u_init);
978
979 void mt76u_deinit(struct mt76_dev *dev)
980 {
981         if (dev->usb.stat_wq) {
982                 destroy_workqueue(dev->usb.stat_wq);
983                 dev->usb.stat_wq = NULL;
984         }
985 }
986 EXPORT_SYMBOL_GPL(mt76u_deinit);
987
988 MODULE_AUTHOR("Lorenzo Bianconi <[email protected]>");
989 MODULE_LICENSE("Dual BSD/GPL");
This page took 0.091215 seconds and 4 git commands to generate.