1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2018-2022 Linaro Ltd.
7 #include <linux/errno.h>
8 #include <linux/if_arp.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/etherdevice.h>
13 #include <net/pkt_sched.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/remoteproc/qcom_rproc.h>
19 #include "ipa_endpoint.h"
20 #include "ipa_table.h"
22 #include "ipa_modem.h"
23 #include "ipa_smp2p.h"
26 #include "ipa_power.h"
28 #define IPA_NETDEV_NAME "rmnet_ipa%d"
29 #define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
30 #define IPA_NETDEV_TIMEOUT 10 /* seconds */
32 enum ipa_modem_state {
33 IPA_MODEM_STATE_STOPPED = 0,
34 IPA_MODEM_STATE_STARTING,
35 IPA_MODEM_STATE_RUNNING,
36 IPA_MODEM_STATE_STOPPING,
40 * struct ipa_priv - IPA network device private data
42 * @tx: Transmit endpoint pointer
43 * @rx: Receive endpoint pointer
44 * @work: Work structure used to wake the modem netdev TX queue
48 struct ipa_endpoint *tx;
49 struct ipa_endpoint *rx;
50 struct work_struct work;
53 /** ipa_open() - Opens the modem network interface */
54 static int ipa_open(struct net_device *netdev)
56 struct ipa_priv *priv = netdev_priv(netdev);
57 struct ipa *ipa = priv->ipa;
62 ret = pm_runtime_get_sync(dev);
66 ret = ipa_endpoint_enable_one(priv->tx);
70 ret = ipa_endpoint_enable_one(priv->rx);
74 netif_start_queue(netdev);
76 pm_runtime_mark_last_busy(dev);
77 (void)pm_runtime_put_autosuspend(dev);
82 ipa_endpoint_disable_one(priv->tx);
84 pm_runtime_put_noidle(dev);
89 /** ipa_stop() - Stops the modem network interface. */
90 static int ipa_stop(struct net_device *netdev)
92 struct ipa_priv *priv = netdev_priv(netdev);
93 struct ipa *ipa = priv->ipa;
98 ret = pm_runtime_get_sync(dev);
102 netif_stop_queue(netdev);
104 ipa_endpoint_disable_one(priv->rx);
105 ipa_endpoint_disable_one(priv->tx);
107 pm_runtime_mark_last_busy(dev);
108 (void)pm_runtime_put_autosuspend(dev);
113 /** ipa_start_xmit() - Transmit an skb
114 * @skb: Socket buffer to be transmitted
115 * @netdev: Network device
117 * Return: NETDEV_TX_OK if successful (or dropped), NETDEV_TX_BUSY otherwise
119 * Normally NETDEV_TX_OK indicates the buffer was successfully transmitted.
120 * If the buffer has an unexpected protocol or its size is out of range it
121 * is quietly dropped, returning NETDEV_TX_OK. NETDEV_TX_BUSY indicates
122 * the buffer cannot be sent at this time and should retried later.
125 ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
127 struct net_device_stats *stats = &netdev->stats;
128 struct ipa_priv *priv = netdev_priv(netdev);
129 struct ipa_endpoint *endpoint;
130 struct ipa *ipa = priv->ipa;
131 u32 skb_len = skb->len;
138 endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
139 if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
142 /* The hardware must be powered for us to transmit, so if we're not
143 * ready we want the network stack to stop queueing until power is
144 * ACTIVE. Once runtime resume has completed, we inform the network
145 * stack it's OK to try transmitting again.
147 * We learn from pm_runtime_get() whether the hardware is powered.
148 * If it was not, powering up is either started or already underway.
149 * And in that case we want to disable queueing, expecting it to be
150 * re-enabled once power is ACTIVE. But runtime PM and network
151 * transmit run concurrently, and if we're not careful the requests
152 * to stop and start queueing could occur in the wrong order.
154 * For that reason we *always* stop queueing here, *before* the call
155 * to pm_runtime_get(). If we determine here that power is ACTIVE,
156 * we restart queueing before transmitting the SKB. Otherwise
157 * queueing will eventually be enabled after resume completes.
159 netif_stop_queue(netdev);
162 ret = pm_runtime_get(dev);
164 /* If a resume won't happen, just drop the packet */
165 if (ret < 0 && ret != -EINPROGRESS) {
166 netif_wake_queue(netdev);
167 pm_runtime_put_noidle(dev);
171 pm_runtime_put_noidle(dev);
173 return NETDEV_TX_BUSY;
176 netif_wake_queue(netdev);
178 ret = ipa_endpoint_skb_tx(endpoint, skb);
180 pm_runtime_mark_last_busy(dev);
181 (void)pm_runtime_put_autosuspend(dev);
185 return NETDEV_TX_BUSY;
190 stats->tx_bytes += skb_len;
195 dev_kfree_skb_any(skb);
201 void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb)
203 struct net_device_stats *stats = &netdev->stats;
207 skb->protocol = htons(ETH_P_MAP);
209 stats->rx_bytes += skb->len;
211 (void)netif_receive_skb(skb);
217 static const struct net_device_ops ipa_modem_ops = {
218 .ndo_open = ipa_open,
219 .ndo_stop = ipa_stop,
220 .ndo_start_xmit = ipa_start_xmit,
223 /** ipa_modem_netdev_setup() - netdev setup function for the modem */
224 static void ipa_modem_netdev_setup(struct net_device *netdev)
226 netdev->netdev_ops = &ipa_modem_ops;
228 netdev->header_ops = NULL;
229 netdev->type = ARPHRD_RAWIP;
230 netdev->hard_header_len = 0;
231 netdev->min_header_len = ETH_HLEN;
232 netdev->min_mtu = ETH_MIN_MTU;
233 netdev->max_mtu = IPA_MTU;
234 netdev->mtu = netdev->max_mtu;
235 netdev->addr_len = 0;
236 netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
237 netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
238 netdev->priv_flags |= IFF_TX_SKB_SHARING;
239 eth_broadcast_addr(netdev->broadcast);
241 /* The endpoint is configured for QMAP */
242 netdev->needed_headroom = sizeof(struct rmnet_map_header);
243 netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
244 netdev->watchdog_timeo = IPA_NETDEV_TIMEOUT * HZ;
245 netdev->hw_features = NETIF_F_SG;
248 /** ipa_modem_suspend() - suspend callback
249 * @netdev: Network device
251 * Suspend the modem's endpoints.
253 void ipa_modem_suspend(struct net_device *netdev)
255 struct ipa_priv *priv;
257 if (!(netdev->flags & IFF_UP))
260 priv = netdev_priv(netdev);
261 ipa_endpoint_suspend_one(priv->rx);
262 ipa_endpoint_suspend_one(priv->tx);
266 * ipa_modem_wake_queue_work() - enable modem netdev queue
267 * @work: Work structure
269 * Re-enable transmit on the modem network device. This is called
270 * in (power management) work queue context, scheduled when resuming
271 * the modem. We can't enable the queue directly in ipa_modem_resume()
272 * because transmits restart the instant the queue is awakened; but the
273 * device power state won't be ACTIVE until *after* ipa_modem_resume()
276 static void ipa_modem_wake_queue_work(struct work_struct *work)
278 struct ipa_priv *priv = container_of(work, struct ipa_priv, work);
280 netif_wake_queue(priv->tx->netdev);
283 /** ipa_modem_resume() - resume callback for runtime_pm
284 * @dev: pointer to device
286 * Resume the modem's endpoints.
288 void ipa_modem_resume(struct net_device *netdev)
290 struct ipa_priv *priv;
292 if (!(netdev->flags & IFF_UP))
295 priv = netdev_priv(netdev);
296 ipa_endpoint_resume_one(priv->tx);
297 ipa_endpoint_resume_one(priv->rx);
299 /* Arrange for the TX queue to be restarted */
300 (void)queue_pm_work(&priv->work);
303 int ipa_modem_start(struct ipa *ipa)
305 enum ipa_modem_state state;
306 struct net_device *netdev;
307 struct ipa_priv *priv;
310 /* Only attempt to start the modem if it's stopped */
311 state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_STOPPED,
312 IPA_MODEM_STATE_STARTING);
314 /* Silently ignore attempts when running, or when changing state */
315 if (state != IPA_MODEM_STATE_STOPPED)
318 netdev = alloc_netdev(sizeof(struct ipa_priv), IPA_NETDEV_NAME,
319 NET_NAME_UNKNOWN, ipa_modem_netdev_setup);
325 SET_NETDEV_DEV(netdev, ipa->dev);
326 priv = netdev_priv(netdev);
328 priv->tx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
329 priv->rx = ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX];
330 INIT_WORK(&priv->work, ipa_modem_wake_queue_work);
332 priv->tx->netdev = netdev;
333 priv->rx->netdev = netdev;
335 ipa->modem_netdev = netdev;
337 ret = register_netdev(netdev);
339 ipa->modem_netdev = NULL;
340 priv->rx->netdev = NULL;
341 priv->tx->netdev = NULL;
348 atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
350 atomic_set(&ipa->modem_state, IPA_MODEM_STATE_RUNNING);
351 smp_mb__after_atomic();
356 int ipa_modem_stop(struct ipa *ipa)
358 struct net_device *netdev = ipa->modem_netdev;
359 enum ipa_modem_state state;
361 /* Only attempt to stop the modem if it's running */
362 state = atomic_cmpxchg(&ipa->modem_state, IPA_MODEM_STATE_RUNNING,
363 IPA_MODEM_STATE_STOPPING);
365 /* Silently ignore attempts when already stopped */
366 if (state == IPA_MODEM_STATE_STOPPED)
369 /* If we're somewhere between stopped and starting, we're busy */
370 if (state != IPA_MODEM_STATE_RUNNING)
373 /* Clean up the netdev and endpoints if it was started */
375 struct ipa_priv *priv = netdev_priv(netdev);
377 cancel_work_sync(&priv->work);
378 /* If it was opened, stop it first */
379 if (netdev->flags & IFF_UP)
380 (void)ipa_stop(netdev);
381 unregister_netdev(netdev);
383 ipa->modem_netdev = NULL;
384 priv->rx->netdev = NULL;
385 priv->tx->netdev = NULL;
390 atomic_set(&ipa->modem_state, IPA_MODEM_STATE_STOPPED);
391 smp_mb__after_atomic();
396 /* Treat a "clean" modem stop the same as a crash */
397 static void ipa_modem_crashed(struct ipa *ipa)
399 struct device *dev = ipa->dev;
402 /* Prevent the modem from triggering a call to ipa_setup() */
403 ipa_smp2p_irq_disable_setup(ipa);
405 ret = pm_runtime_get_sync(dev);
407 dev_err(dev, "error %d getting power to handle crash\n", ret);
411 ipa_endpoint_modem_pause_all(ipa, true);
413 ipa_endpoint_modem_hol_block_clear_all(ipa);
415 ipa_table_reset(ipa, true);
417 ret = ipa_table_hash_flush(ipa);
419 dev_err(dev, "error %d flushing hash caches\n", ret);
421 ret = ipa_endpoint_modem_exception_reset_all(ipa);
423 dev_err(dev, "error %d resetting exception endpoint\n", ret);
425 ipa_endpoint_modem_pause_all(ipa, false);
427 ret = ipa_modem_stop(ipa);
429 dev_err(dev, "error %d stopping modem\n", ret);
431 /* Now prepare for the next modem boot */
432 ret = ipa_mem_zero_modem(ipa);
434 dev_err(dev, "error %d zeroing modem memory regions\n", ret);
437 pm_runtime_mark_last_busy(dev);
438 (void)pm_runtime_put_autosuspend(dev);
441 static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
444 struct ipa *ipa = container_of(nb, struct ipa, nb);
445 struct qcom_ssr_notify_data *notify_data = data;
446 struct device *dev = ipa->dev;
449 case QCOM_SSR_BEFORE_POWERUP:
450 dev_info(dev, "received modem starting event\n");
452 ipa_smp2p_notify_reset(ipa);
455 case QCOM_SSR_AFTER_POWERUP:
456 dev_info(dev, "received modem running event\n");
459 case QCOM_SSR_BEFORE_SHUTDOWN:
460 dev_info(dev, "received modem %s event\n",
461 notify_data->crashed ? "crashed" : "stopping");
462 if (ipa->setup_complete)
463 ipa_modem_crashed(ipa);
466 case QCOM_SSR_AFTER_SHUTDOWN:
467 dev_info(dev, "received modem offline event\n");
471 dev_err(dev, "received unrecognized event %lu\n", action);
478 int ipa_modem_config(struct ipa *ipa)
482 ipa->nb.notifier_call = ipa_modem_notify;
484 notifier = qcom_register_ssr_notifier("mpss", &ipa->nb);
485 if (IS_ERR(notifier))
486 return PTR_ERR(notifier);
488 ipa->notifier = notifier;
493 void ipa_modem_deconfig(struct ipa *ipa)
495 struct device *dev = ipa->dev;
498 ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb);
500 dev_err(dev, "error %d unregistering notifier", ret);
502 ipa->notifier = NULL;
503 memset(&ipa->nb, 0, sizeof(ipa->nb));