]> Git Repo - J-linux.git/blob - drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / net / ethernet / qualcomm / rmnet / rmnet_handlers.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
3  *
4  * RMNET Data ingress/egress handler
5  */
6
7 #include <linux/netdevice.h>
8 #include <linux/netdev_features.h>
9 #include <linux/if_arp.h>
10 #include <net/sock.h>
11 #include "rmnet_private.h"
12 #include "rmnet_config.h"
13 #include "rmnet_vnd.h"
14 #include "rmnet_map.h"
15 #include "rmnet_handlers.h"
16
17 #define RMNET_IP_VERSION_4 0x40
18 #define RMNET_IP_VERSION_6 0x60
19
20 /* Helper Functions */
21
22 static void rmnet_set_skb_proto(struct sk_buff *skb)
23 {
24         switch (skb->data[0] & 0xF0) {
25         case RMNET_IP_VERSION_4:
26                 skb->protocol = htons(ETH_P_IP);
27                 break;
28         case RMNET_IP_VERSION_6:
29                 skb->protocol = htons(ETH_P_IPV6);
30                 break;
31         default:
32                 skb->protocol = htons(ETH_P_MAP);
33                 break;
34         }
35 }
36
37 /* Generic handler */
38
39 static void
40 rmnet_deliver_skb(struct sk_buff *skb)
41 {
42         struct rmnet_priv *priv = netdev_priv(skb->dev);
43
44         skb_reset_transport_header(skb);
45         skb_reset_network_header(skb);
46         rmnet_vnd_rx_fixup(skb, skb->dev);
47
48         skb->pkt_type = PACKET_HOST;
49         skb_set_mac_header(skb, 0);
50         gro_cells_receive(&priv->gro_cells, skb);
51 }
52
53 /* MAP handler */
54
55 static void
56 __rmnet_map_ingress_handler(struct sk_buff *skb,
57                             struct rmnet_port *port)
58 {
59         struct rmnet_map_header *map_header = (void *)skb->data;
60         struct rmnet_endpoint *ep;
61         u16 len, pad;
62         u8 mux_id;
63
64         if (map_header->flags & MAP_CMD_FLAG) {
65                 /* Packet contains a MAP command (not data) */
66                 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
67                         return rmnet_map_command(skb, port);
68
69                 goto free_skb;
70         }
71
72         mux_id = map_header->mux_id;
73         pad = map_header->flags & MAP_PAD_LEN_MASK;
74         len = ntohs(map_header->pkt_len) - pad;
75
76         if (mux_id >= RMNET_MAX_LOGICAL_EP)
77                 goto free_skb;
78
79         ep = rmnet_get_endpoint(port, mux_id);
80         if (!ep)
81                 goto free_skb;
82
83         skb->dev = ep->egress_dev;
84
85         if ((port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) &&
86             (map_header->flags & MAP_NEXT_HEADER_FLAG)) {
87                 if (rmnet_map_process_next_hdr_packet(skb, len))
88                         goto free_skb;
89                 skb_pull(skb, sizeof(*map_header));
90                 rmnet_set_skb_proto(skb);
91         } else {
92                 /* Subtract MAP header */
93                 skb_pull(skb, sizeof(*map_header));
94                 rmnet_set_skb_proto(skb);
95                 if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4 &&
96                     !rmnet_map_checksum_downlink_packet(skb, len + pad))
97                         skb->ip_summed = CHECKSUM_UNNECESSARY;
98         }
99
100         skb_trim(skb, len);
101         rmnet_deliver_skb(skb);
102         return;
103
104 free_skb:
105         kfree_skb(skb);
106 }
107
108 static void
109 rmnet_map_ingress_handler(struct sk_buff *skb,
110                           struct rmnet_port *port)
111 {
112         struct sk_buff *skbn;
113
114         if (skb->dev->type == ARPHRD_ETHER) {
115                 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
116                         kfree_skb(skb);
117                         return;
118                 }
119
120                 skb_push(skb, ETH_HLEN);
121         }
122
123         if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
124                 while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
125                         __rmnet_map_ingress_handler(skbn, port);
126
127                 consume_skb(skb);
128         } else {
129                 __rmnet_map_ingress_handler(skb, port);
130         }
131 }
132
133 static int rmnet_map_egress_handler(struct sk_buff *skb,
134                                     struct rmnet_port *port, u8 mux_id,
135                                     struct net_device *orig_dev)
136 {
137         int required_headroom, additional_header_len, csum_type = 0;
138         struct rmnet_map_header *map_header;
139
140         additional_header_len = 0;
141         required_headroom = sizeof(struct rmnet_map_header);
142
143         if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
144                 additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
145                 csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
146         } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
147                 additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
148                 csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
149         }
150
151         required_headroom += additional_header_len;
152
153         if (skb_cow_head(skb, required_headroom) < 0)
154                 return -ENOMEM;
155
156         if (csum_type)
157                 rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
158                                                  csum_type);
159
160         map_header = rmnet_map_add_map_header(skb, additional_header_len,
161                                               port, 0);
162         if (!map_header)
163                 return -ENOMEM;
164
165         map_header->mux_id = mux_id;
166
167         if (READ_ONCE(port->egress_agg_params.count) > 1) {
168                 unsigned int len;
169
170                 len = rmnet_map_tx_aggregate(skb, port, orig_dev);
171                 if (likely(len)) {
172                         rmnet_vnd_tx_fixup_len(len, orig_dev);
173                         return -EINPROGRESS;
174                 }
175                 return -ENOMEM;
176         }
177
178         skb->protocol = htons(ETH_P_MAP);
179         return 0;
180 }
181
182 static void
183 rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
184 {
185         if (skb_mac_header_was_set(skb))
186                 skb_push(skb, skb->mac_len);
187
188         if (bridge_dev) {
189                 skb->dev = bridge_dev;
190                 dev_queue_xmit(skb);
191         }
192 }
193
194 /* Ingress / Egress Entry Points */
195
196 /* Processes packet as per ingress data format for receiving device. Logical
197  * endpoint is determined from packet inspection. Packet is then sent to the
198  * egress device listed in the logical endpoint configuration.
199  */
200 rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
201 {
202         struct sk_buff *skb = *pskb;
203         struct rmnet_port *port;
204         struct net_device *dev;
205
206         if (!skb)
207                 goto done;
208
209         if (skb_linearize(skb)) {
210                 kfree_skb(skb);
211                 goto done;
212         }
213
214         if (skb->pkt_type == PACKET_LOOPBACK)
215                 return RX_HANDLER_PASS;
216
217         dev = skb->dev;
218         port = rmnet_get_port_rcu(dev);
219         if (unlikely(!port)) {
220                 dev_core_stats_rx_nohandler_inc(skb->dev);
221                 kfree_skb(skb);
222                 goto done;
223         }
224
225         switch (port->rmnet_mode) {
226         case RMNET_EPMODE_VND:
227                 rmnet_map_ingress_handler(skb, port);
228                 break;
229         case RMNET_EPMODE_BRIDGE:
230                 rmnet_bridge_handler(skb, port->bridge_ep);
231                 break;
232         }
233
234 done:
235         return RX_HANDLER_CONSUMED;
236 }
237
238 /* Modifies packet as per logical endpoint configuration and egress data format
239  * for egress device configured in logical endpoint. Packet is then transmitted
240  * on the egress device.
241  */
242 void rmnet_egress_handler(struct sk_buff *skb)
243 {
244         struct net_device *orig_dev;
245         struct rmnet_port *port;
246         struct rmnet_priv *priv;
247         u8 mux_id;
248         int err;
249
250         sk_pacing_shift_update(skb->sk, 8);
251
252         orig_dev = skb->dev;
253         priv = netdev_priv(orig_dev);
254         skb->dev = priv->real_dev;
255         mux_id = priv->mux_id;
256
257         port = rmnet_get_port_rcu(skb->dev);
258         if (!port)
259                 goto drop;
260
261         err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
262         if (err == -ENOMEM)
263                 goto drop;
264         else if (err == -EINPROGRESS)
265                 return;
266
267         rmnet_vnd_tx_fixup(skb, orig_dev);
268
269         dev_queue_xmit(skb);
270         return;
271
272 drop:
273         this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
274         kfree_skb(skb);
275 }
This page took 0.04621 seconds and 4 git commands to generate.