1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
20 #include <linux/atomic.h>
21 #include <linux/device.h>
22 #include <linux/gfp.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/netdev_features.h>
29 #include <linux/netdevice.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/skbuff.h>
32 #include <linux/types.h>
33 #include <linux/wwan.h>
35 #include <net/pkt_sched.h>
37 #include "t7xx_hif_dpmaif_rx.h"
38 #include "t7xx_hif_dpmaif_tx.h"
39 #include "t7xx_netdev.h"
41 #include "t7xx_port_proxy.h"
42 #include "t7xx_state_monitor.h"
44 #define IP_MUX_SESSION_DEFAULT 0
45 #define SBD_PACKET_TYPE_MASK GENMASK(7, 4)
47 static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
49 struct dpmaif_ctrl *ctrl;
52 ctrl = ctlb->hif_ctrl;
57 for (i = 0; i < RXQ_NUM; i++) {
58 /* The usage count has to be bumped every time before calling
59 * napi_schedule. It will be decresed in the poll routine,
60 * right after napi_complete_done is called.
62 ret = pm_runtime_resume_and_get(ctrl->dev);
64 dev_err(ctrl->dev, "Failed to resume device: %d\n",
68 napi_enable(ctlb->napi[i]);
69 napi_schedule(ctlb->napi[i]);
71 ctlb->is_napi_en = true;
74 static void t7xx_ccmni_disable_napi(struct t7xx_ccmni_ctrl *ctlb)
78 if (!ctlb->is_napi_en)
81 for (i = 0; i < RXQ_NUM; i++) {
82 napi_synchronize(ctlb->napi[i]);
83 napi_disable(ctlb->napi[i]);
86 ctlb->is_napi_en = false;
89 static int t7xx_ccmni_open(struct net_device *dev)
91 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
92 struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
94 netif_carrier_on(dev);
95 netif_tx_start_all_queues(dev);
96 if (!atomic_fetch_inc(&ccmni_ctl->napi_usr_refcnt))
97 t7xx_ccmni_enable_napi(ccmni_ctl);
99 atomic_inc(&ccmni->usage);
103 static int t7xx_ccmni_close(struct net_device *dev)
105 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
106 struct t7xx_ccmni_ctrl *ccmni_ctl = ccmni->ctlb;
108 atomic_dec(&ccmni->usage);
109 if (atomic_dec_and_test(&ccmni_ctl->napi_usr_refcnt))
110 t7xx_ccmni_disable_napi(ccmni_ctl);
112 netif_carrier_off(dev);
113 netif_tx_disable(dev);
117 static int t7xx_ccmni_send_packet(struct t7xx_ccmni *ccmni, struct sk_buff *skb,
118 unsigned int txq_number)
120 struct t7xx_ccmni_ctrl *ctlb = ccmni->ctlb;
121 struct t7xx_skb_cb *skb_cb = T7XX_SKB_CB(skb);
123 skb_cb->netif_idx = ccmni->index;
125 if (t7xx_dpmaif_tx_send_skb(ctlb->hif_ctrl, txq_number, skb))
126 return NETDEV_TX_BUSY;
131 static netdev_tx_t t7xx_ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
133 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
134 int skb_len = skb->len;
136 /* If MTU is changed or there is no headroom, drop the packet */
137 if (skb->len > dev->mtu || skb_headroom(skb) < sizeof(struct ccci_header)) {
139 dev->stats.tx_dropped++;
143 if (t7xx_ccmni_send_packet(ccmni, skb, DPMAIF_TX_DEFAULT_QUEUE))
144 return NETDEV_TX_BUSY;
146 dev->stats.tx_packets++;
147 dev->stats.tx_bytes += skb_len;
152 static void t7xx_ccmni_tx_timeout(struct net_device *dev, unsigned int __always_unused txqueue)
154 struct t7xx_ccmni *ccmni = netdev_priv(dev);
156 dev->stats.tx_errors++;
158 if (atomic_read(&ccmni->usage) > 0)
159 netif_tx_wake_all_queues(dev);
162 static const struct net_device_ops ccmni_netdev_ops = {
163 .ndo_open = t7xx_ccmni_open,
164 .ndo_stop = t7xx_ccmni_close,
165 .ndo_start_xmit = t7xx_ccmni_start_xmit,
166 .ndo_tx_timeout = t7xx_ccmni_tx_timeout,
169 static void t7xx_ccmni_start(struct t7xx_ccmni_ctrl *ctlb)
171 struct t7xx_ccmni *ccmni;
174 for (i = 0; i < ctlb->nic_dev_num; i++) {
175 ccmni = ctlb->ccmni_inst[i];
179 if (atomic_read(&ccmni->usage) > 0) {
180 netif_tx_start_all_queues(ccmni->dev);
181 netif_carrier_on(ccmni->dev);
185 if (atomic_read(&ctlb->napi_usr_refcnt))
186 t7xx_ccmni_enable_napi(ctlb);
189 static void t7xx_ccmni_pre_stop(struct t7xx_ccmni_ctrl *ctlb)
191 struct t7xx_ccmni *ccmni;
194 for (i = 0; i < ctlb->nic_dev_num; i++) {
195 ccmni = ctlb->ccmni_inst[i];
199 if (atomic_read(&ccmni->usage) > 0)
200 netif_tx_disable(ccmni->dev);
204 static void t7xx_ccmni_post_stop(struct t7xx_ccmni_ctrl *ctlb)
206 struct t7xx_ccmni *ccmni;
209 if (atomic_read(&ctlb->napi_usr_refcnt))
210 t7xx_ccmni_disable_napi(ctlb);
212 for (i = 0; i < ctlb->nic_dev_num; i++) {
213 ccmni = ctlb->ccmni_inst[i];
217 if (atomic_read(&ccmni->usage) > 0)
218 netif_carrier_off(ccmni->dev);
222 static void t7xx_ccmni_wwan_setup(struct net_device *dev)
224 dev->needed_headroom += sizeof(struct ccci_header);
226 dev->mtu = ETH_DATA_LEN;
227 dev->max_mtu = CCMNI_MTU_MAX;
228 BUILD_BUG_ON(CCMNI_MTU_MAX > DPMAIF_HW_MTU_SIZE);
230 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
231 dev->watchdog_timeo = CCMNI_NETDEV_WDT_TO;
233 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
235 dev->features = NETIF_F_VLAN_CHALLENGED;
237 dev->features |= NETIF_F_SG;
238 dev->hw_features |= NETIF_F_SG;
240 dev->features |= NETIF_F_HW_CSUM;
241 dev->hw_features |= NETIF_F_HW_CSUM;
243 dev->features |= NETIF_F_RXCSUM;
244 dev->hw_features |= NETIF_F_RXCSUM;
246 dev->features |= NETIF_F_GRO;
247 dev->hw_features |= NETIF_F_GRO;
249 dev->needs_free_netdev = true;
251 dev->type = ARPHRD_NONE;
253 dev->netdev_ops = &ccmni_netdev_ops;
256 static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
260 /* one HW, but shared with multiple net devices,
261 * so add a dummy device for NAPI.
263 init_dummy_netdev(&ctlb->dummy_dev);
264 atomic_set(&ctlb->napi_usr_refcnt, 0);
265 ctlb->is_napi_en = false;
267 for (i = 0; i < RXQ_NUM; i++) {
268 ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
269 netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
270 NIC_NAPI_POLL_BUDGET);
274 static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
278 for (i = 0; i < RXQ_NUM; i++) {
279 netif_napi_del(ctlb->napi[i]);
280 ctlb->napi[i] = NULL;
284 static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
285 struct netlink_ext_ack *extack)
287 struct t7xx_ccmni_ctrl *ctlb = ctxt;
288 struct t7xx_ccmni *ccmni;
291 if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
294 ccmni = wwan_netdev_drvpriv(dev);
295 ccmni->index = if_id;
298 atomic_set(&ccmni->usage, 0);
299 ctlb->ccmni_inst[if_id] = ccmni;
301 ret = register_netdevice(dev);
305 netif_device_attach(dev);
309 static void t7xx_ccmni_wwan_dellink(void *ctxt, struct net_device *dev, struct list_head *head)
311 struct t7xx_ccmni *ccmni = wwan_netdev_drvpriv(dev);
312 struct t7xx_ccmni_ctrl *ctlb = ctxt;
313 u8 if_id = ccmni->index;
315 if (if_id >= ARRAY_SIZE(ctlb->ccmni_inst))
318 if (WARN_ON(ctlb->ccmni_inst[if_id] != ccmni))
321 unregister_netdevice(dev);
324 static const struct wwan_ops ccmni_wwan_ops = {
325 .priv_size = sizeof(struct t7xx_ccmni),
326 .setup = t7xx_ccmni_wwan_setup,
327 .newlink = t7xx_ccmni_wwan_newlink,
328 .dellink = t7xx_ccmni_wwan_dellink,
331 static int t7xx_ccmni_register_wwan(struct t7xx_ccmni_ctrl *ctlb)
333 struct device *dev = ctlb->hif_ctrl->dev;
336 if (ctlb->wwan_is_registered)
339 /* WWAN core will create a netdev for the default IP MUX channel */
340 ret = wwan_register_ops(dev, &ccmni_wwan_ops, ctlb, IP_MUX_SESSION_DEFAULT);
342 dev_err(dev, "Unable to register WWAN ops, %d\n", ret);
346 ctlb->wwan_is_registered = true;
350 static int t7xx_ccmni_md_state_callback(enum md_state state, void *para)
352 struct t7xx_ccmni_ctrl *ctlb = para;
356 dev = ctlb->hif_ctrl->dev;
357 ctlb->md_sta = state;
361 ret = t7xx_ccmni_register_wwan(ctlb);
363 t7xx_ccmni_start(ctlb);
366 case MD_STATE_EXCEPTION:
367 case MD_STATE_STOPPED:
368 t7xx_ccmni_pre_stop(ctlb);
370 ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
372 dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
374 t7xx_ccmni_post_stop(ctlb);
377 case MD_STATE_WAITING_FOR_HS1:
378 case MD_STATE_WAITING_TO_STOP:
379 ret = t7xx_dpmaif_md_state_callback(ctlb->hif_ctrl, state);
381 dev_err(dev, "DPMAIF md state callback err, state=%d\n", state);
392 static void init_md_status_notifier(struct t7xx_pci_dev *t7xx_dev)
394 struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
395 struct t7xx_fsm_notifier *md_status_notifier;
397 md_status_notifier = &ctlb->md_status_notify;
398 INIT_LIST_HEAD(&md_status_notifier->entry);
399 md_status_notifier->notifier_fn = t7xx_ccmni_md_state_callback;
400 md_status_notifier->data = ctlb;
402 t7xx_fsm_notifier_register(t7xx_dev->md, md_status_notifier);
405 static void t7xx_ccmni_recv_skb(struct t7xx_ccmni_ctrl *ccmni_ctlb, struct sk_buff *skb,
406 struct napi_struct *napi)
408 struct t7xx_skb_cb *skb_cb;
409 struct net_device *net_dev;
410 struct t7xx_ccmni *ccmni;
411 int pkt_type, skb_len;
414 skb_cb = T7XX_SKB_CB(skb);
415 netif_id = skb_cb->netif_idx;
416 ccmni = ccmni_ctlb->ccmni_inst[netif_id];
422 net_dev = ccmni->dev;
423 pkt_type = skb_cb->rx_pkt_type;
425 if (pkt_type == PKT_TYPE_IP6)
426 skb->protocol = htons(ETH_P_IPV6);
428 skb->protocol = htons(ETH_P_IP);
431 napi_gro_receive(napi, skb);
432 net_dev->stats.rx_packets++;
433 net_dev->stats.rx_bytes += skb_len;
436 static void t7xx_ccmni_queue_tx_irq_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
438 struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
439 struct netdev_queue *net_queue;
441 if (netif_running(ccmni->dev) && atomic_read(&ccmni->usage) > 0) {
442 net_queue = netdev_get_tx_queue(ccmni->dev, qno);
443 if (netif_tx_queue_stopped(net_queue))
444 netif_tx_wake_queue(net_queue);
448 static void t7xx_ccmni_queue_tx_full_notify(struct t7xx_ccmni_ctrl *ctlb, int qno)
450 struct t7xx_ccmni *ccmni = ctlb->ccmni_inst[0];
451 struct netdev_queue *net_queue;
453 if (atomic_read(&ccmni->usage) > 0) {
454 netdev_err(ccmni->dev, "TX queue %d is full\n", qno);
455 net_queue = netdev_get_tx_queue(ccmni->dev, qno);
456 netif_tx_stop_queue(net_queue);
460 static void t7xx_ccmni_queue_state_notify(struct t7xx_pci_dev *t7xx_dev,
461 enum dpmaif_txq_state state, int qno)
463 struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
465 if (ctlb->md_sta != MD_STATE_READY)
468 if (!ctlb->ccmni_inst[0]) {
469 dev_warn(&t7xx_dev->pdev->dev, "No netdev registered yet\n");
473 if (state == DMPAIF_TXQ_STATE_IRQ)
474 t7xx_ccmni_queue_tx_irq_notify(ctlb, qno);
475 else if (state == DMPAIF_TXQ_STATE_FULL)
476 t7xx_ccmni_queue_tx_full_notify(ctlb, qno);
479 int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
481 struct device *dev = &t7xx_dev->pdev->dev;
482 struct t7xx_ccmni_ctrl *ctlb;
484 ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
488 t7xx_dev->ccmni_ctlb = ctlb;
489 ctlb->t7xx_dev = t7xx_dev;
490 ctlb->callbacks.state_notify = t7xx_ccmni_queue_state_notify;
491 ctlb->callbacks.recv_skb = t7xx_ccmni_recv_skb;
492 ctlb->nic_dev_num = NIC_DEV_DEFAULT;
494 ctlb->hif_ctrl = t7xx_dpmaif_hif_init(t7xx_dev, &ctlb->callbacks);
498 t7xx_init_netdev_napi(ctlb);
499 init_md_status_notifier(t7xx_dev);
503 void t7xx_ccmni_exit(struct t7xx_pci_dev *t7xx_dev)
505 struct t7xx_ccmni_ctrl *ctlb = t7xx_dev->ccmni_ctlb;
507 t7xx_fsm_notifier_unregister(t7xx_dev->md, &ctlb->md_status_notify);
509 if (ctlb->wwan_is_registered) {
510 wwan_unregister_ops(&t7xx_dev->pdev->dev);
511 ctlb->wwan_is_registered = false;
514 t7xx_uninit_netdev_napi(ctlb);
515 t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);