]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* -*- linux-c -*- |
2 | * INET 802.1Q VLAN | |
3 | * Ethernet-type device handling. | |
4 | * | |
5 | * Authors: Ben Greear <[email protected]> | |
ad712087 | 6 | * Please send support related email to: [email protected] |
1da177e4 | 7 | * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html |
122952fc | 8 | * |
1da177e4 LT |
9 | * Fixes: Mar 22 2001: Martin Bokaemper <[email protected]> |
10 | * - reset skb->pkt_type on incoming packets when MAC was changed | |
11 | * - see that changed MAC is saddr for outgoing packets | |
12 | * Oct 20, 2001: Ard van Breeman: | |
13 | * - Fix MC-list, finally. | |
14 | * - Flush MC-list on VLAN destroy. | |
122952fc | 15 | * |
1da177e4 LT |
16 | * |
17 | * This program is free software; you can redistribute it and/or | |
18 | * modify it under the terms of the GNU General Public License | |
19 | * as published by the Free Software Foundation; either version | |
20 | * 2 of the License, or (at your option) any later version. | |
21 | */ | |
22 | ||
23 | #include <linux/module.h> | |
24 | #include <linux/mm.h> | |
25 | #include <linux/in.h> | |
26 | #include <linux/init.h> | |
27 | #include <asm/uaccess.h> /* for copy_from_user */ | |
28 | #include <linux/skbuff.h> | |
29 | #include <linux/netdevice.h> | |
30 | #include <linux/etherdevice.h> | |
31 | #include <net/datalink.h> | |
32 | #include <net/p8022.h> | |
33 | #include <net/arp.h> | |
34 | ||
35 | #include "vlan.h" | |
36 | #include "vlanproc.h" | |
37 | #include <linux/if_vlan.h> | |
38 | #include <net/ip.h> | |
39 | ||
40 | /* | |
41 | * Rebuild the Ethernet MAC header. This is called after an ARP | |
42 | * (or in future other address resolution) has completed on this | |
43 | * sk_buff. We now let ARP fill in the other fields. | |
44 | * | |
45 | * This routine CANNOT use cached dst->neigh! | |
46 | * Really, it is used only when dst->neigh is wrong. | |
47 | * | |
48 | * TODO: This needs a checkup, I'm ignorant here. --BLG | |
49 | */ | |
ef3eb3e5 | 50 | static int vlan_dev_rebuild_header(struct sk_buff *skb) |
1da177e4 LT |
51 | { |
52 | struct net_device *dev = skb->dev; | |
53 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); | |
54 | ||
55 | switch (veth->h_vlan_encapsulated_proto) { | |
56 | #ifdef CONFIG_INET | |
57 | case __constant_htons(ETH_P_IP): | |
58 | ||
59 | /* TODO: Confirm this will work with VLAN headers... */ | |
60 | return arp_find(veth->h_dest, skb); | |
122952fc | 61 | #endif |
1da177e4 | 62 | default: |
40f98e1a PM |
63 | pr_debug("%s: unable to resolve type %X addresses.\n", |
64 | dev->name, ntohs(veth->h_vlan_encapsulated_proto)); | |
122952fc | 65 | |
1da177e4 LT |
66 | memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); |
67 | break; | |
3ff50b79 | 68 | } |
1da177e4 LT |
69 | |
70 | return 0; | |
71 | } | |
72 | ||
73 | static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) | |
74 | { | |
9dfebcc6 | 75 | if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { |
1da177e4 LT |
76 | if (skb_shared(skb) || skb_cloned(skb)) { |
77 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); | |
78 | kfree_skb(skb); | |
79 | skb = nskb; | |
80 | } | |
81 | if (skb) { | |
82 | /* Lifted from Gleb's VLAN code... */ | |
83 | memmove(skb->data - ETH_HLEN, | |
84 | skb->data - VLAN_ETH_HLEN, 12); | |
b0e380b1 | 85 | skb->mac_header += VLAN_HLEN; |
1da177e4 LT |
86 | } |
87 | } | |
88 | ||
89 | return skb; | |
90 | } | |
91 | ||
91b4f954 PE |
92 | static inline void vlan_set_encap_proto(struct sk_buff *skb, |
93 | struct vlan_hdr *vhdr) | |
94 | { | |
95 | __be16 proto; | |
96 | unsigned char *rawp; | |
97 | ||
98 | /* | |
99 | * Was a VLAN packet, grab the encapsulated protocol, which the layer | |
100 | * three protocols care about. | |
101 | */ | |
102 | ||
103 | proto = vhdr->h_vlan_encapsulated_proto; | |
104 | if (ntohs(proto) >= 1536) { | |
105 | skb->protocol = proto; | |
106 | return; | |
107 | } | |
108 | ||
109 | rawp = skb->data; | |
110 | if (*(unsigned short *)rawp == 0xFFFF) | |
111 | /* | |
112 | * This is a magic hack to spot IPX packets. Older Novell | |
113 | * breaks the protocol design and runs IPX over 802.3 without | |
114 | * an 802.2 LLC layer. We look for FFFF which isn't a used | |
115 | * 802.2 SSAP/DSAP. This won't work for fault tolerant netware | |
116 | * but does for the rest. | |
117 | */ | |
118 | skb->protocol = htons(ETH_P_802_3); | |
119 | else | |
120 | /* | |
121 | * Real 802.2 LLC | |
122 | */ | |
123 | skb->protocol = htons(ETH_P_802_2); | |
124 | } | |
125 | ||
1da177e4 | 126 | /* |
122952fc | 127 | * Determine the packet's protocol ID. The rule here is that we |
1da177e4 LT |
128 | * assume 802.3 if the type field is short enough to be a length. |
129 | * This is normal practice and works for any 'now in use' protocol. | |
130 | * | |
131 | * Also, at this point we assume that we ARE dealing exclusively with | |
132 | * VLAN packets, or packets that should be made into VLAN packets based | |
133 | * on a default VLAN ID. | |
134 | * | |
135 | * NOTE: Should be similar to ethernet/eth.c. | |
136 | * | |
137 | * SANITY NOTE: This method is called when a packet is moving up the stack | |
138 | * towards userland. To get here, it would have already passed | |
139 | * through the ethernet/eth.c eth_type_trans() method. | |
140 | * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be | |
141 | * stored UNALIGNED in the memory. RISC systems don't like | |
142 | * such cases very much... | |
2029cc2c PM |
143 | * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be |
144 | * aligned, so there doesn't need to be any of the unaligned | |
145 | * stuff. It has been commented out now... --Ben | |
1da177e4 LT |
146 | * |
147 | */ | |
148 | int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, | |
2029cc2c | 149 | struct packet_type *ptype, struct net_device *orig_dev) |
1da177e4 | 150 | { |
e7c243c9 | 151 | struct vlan_hdr *vhdr; |
1da177e4 LT |
152 | unsigned short vid; |
153 | struct net_device_stats *stats; | |
154 | unsigned short vlan_TCI; | |
1da177e4 | 155 | |
31ffdbcb PM |
156 | if (dev->nd_net != &init_net) |
157 | goto err_free; | |
e730c155 | 158 | |
2029cc2c PM |
159 | skb = skb_share_check(skb, GFP_ATOMIC); |
160 | if (skb == NULL) | |
31ffdbcb | 161 | goto err_free; |
e7c243c9 | 162 | |
31ffdbcb PM |
163 | if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) |
164 | goto err_free; | |
e7c243c9 | 165 | |
31ffdbcb | 166 | vhdr = (struct vlan_hdr *)skb->data; |
1da177e4 | 167 | vlan_TCI = ntohs(vhdr->h_vlan_TCI); |
1da177e4 LT |
168 | vid = (vlan_TCI & VLAN_VID_MASK); |
169 | ||
1da177e4 LT |
170 | rcu_read_lock(); |
171 | skb->dev = __find_vlan_dev(dev, vid); | |
172 | if (!skb->dev) { | |
2029cc2c PM |
173 | pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", |
174 | __FUNCTION__, (unsigned int)vid, dev->name); | |
31ffdbcb | 175 | goto err_unlock; |
1da177e4 LT |
176 | } |
177 | ||
178 | skb->dev->last_rx = jiffies; | |
179 | ||
7bd38d77 | 180 | stats = &skb->dev->stats; |
1da177e4 LT |
181 | stats->rx_packets++; |
182 | stats->rx_bytes += skb->len; | |
183 | ||
cbb042f9 | 184 | skb_pull_rcsum(skb, VLAN_HLEN); |
a388442c | 185 | |
2029cc2c PM |
186 | skb->priority = vlan_get_ingress_priority(skb->dev, |
187 | ntohs(vhdr->h_vlan_TCI)); | |
1da177e4 | 188 | |
40f98e1a PM |
189 | pr_debug("%s: priority: %u for TCI: %hu\n", |
190 | __FUNCTION__, skb->priority, ntohs(vhdr->h_vlan_TCI)); | |
1da177e4 | 191 | |
1da177e4 LT |
192 | switch (skb->pkt_type) { |
193 | case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ | |
2029cc2c | 194 | /* stats->broadcast ++; // no such counter :-( */ |
1da177e4 LT |
195 | break; |
196 | ||
197 | case PACKET_MULTICAST: | |
198 | stats->multicast++; | |
199 | break; | |
200 | ||
122952fc | 201 | case PACKET_OTHERHOST: |
1da177e4 | 202 | /* Our lower layer thinks this is not local, let's make sure. |
2029cc2c PM |
203 | * This allows the VLAN to have a different MAC than the |
204 | * underlying device, and still route correctly. | |
1da177e4 | 205 | */ |
2029cc2c PM |
206 | if (!compare_ether_addr(eth_hdr(skb)->h_dest, |
207 | skb->dev->dev_addr)) | |
1da177e4 | 208 | skb->pkt_type = PACKET_HOST; |
1da177e4 LT |
209 | break; |
210 | default: | |
211 | break; | |
3ff50b79 | 212 | } |
1da177e4 | 213 | |
91b4f954 | 214 | vlan_set_encap_proto(skb, vhdr); |
1da177e4 | 215 | |
1da177e4 | 216 | skb = vlan_check_reorder_header(skb); |
31ffdbcb | 217 | if (!skb) { |
1da177e4 | 218 | stats->rx_errors++; |
31ffdbcb | 219 | goto err_unlock; |
1da177e4 | 220 | } |
31ffdbcb PM |
221 | |
222 | netif_rx(skb); | |
1da177e4 | 223 | rcu_read_unlock(); |
31ffdbcb PM |
224 | return NET_RX_SUCCESS; |
225 | ||
226 | err_unlock: | |
227 | rcu_read_unlock(); | |
228 | err_free: | |
229 | kfree_skb(skb); | |
230 | return NET_RX_DROP; | |
1da177e4 LT |
231 | } |
232 | ||
2029cc2c PM |
233 | static inline unsigned short |
234 | vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) | |
1da177e4 | 235 | { |
2029cc2c | 236 | struct vlan_priority_tci_mapping *mp; |
1da177e4 | 237 | |
2029cc2c | 238 | mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)]; |
1da177e4 LT |
239 | while (mp) { |
240 | if (mp->priority == skb->priority) { | |
2029cc2c PM |
241 | return mp->vlan_qos; /* This should already be shifted |
242 | * to mask correctly with the | |
243 | * VLAN's TCI */ | |
1da177e4 LT |
244 | } |
245 | mp = mp->next; | |
246 | } | |
247 | return 0; | |
248 | } | |
249 | ||
250 | /* | |
122952fc | 251 | * Create the VLAN header for an arbitrary protocol layer |
1da177e4 LT |
252 | * |
253 | * saddr=NULL means use device source address | |
254 | * daddr=NULL means leave destination address (eg unresolved arp) | |
255 | * | |
256 | * This is called when the SKB is moving down the stack towards the | |
257 | * physical devices. | |
258 | */ | |
ef3eb3e5 PM |
259 | static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, |
260 | unsigned short type, | |
261 | const void *daddr, const void *saddr, | |
262 | unsigned int len) | |
1da177e4 LT |
263 | { |
264 | struct vlan_hdr *vhdr; | |
265 | unsigned short veth_TCI = 0; | |
266 | int rc = 0; | |
267 | int build_vlan_header = 0; | |
2029cc2c | 268 | struct net_device *vdev = dev; |
1da177e4 | 269 | |
40f98e1a | 270 | pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n", |
2029cc2c PM |
271 | __FUNCTION__, skb, type, len, vlan_dev_info(dev)->vlan_id, |
272 | daddr); | |
1da177e4 LT |
273 | |
274 | /* build vlan header only if re_order_header flag is NOT set. This | |
275 | * fixes some programs that get confused when they see a VLAN device | |
276 | * sending a frame that is VLAN encoded (the consensus is that the VLAN | |
277 | * device should look completely like an Ethernet device when the | |
122952fc | 278 | * REORDER_HEADER flag is set) The drawback to this is some extra |
1da177e4 LT |
279 | * header shuffling in the hard_start_xmit. Users can turn off this |
280 | * REORDER behaviour with the vconfig tool. | |
281 | */ | |
9dfebcc6 | 282 | if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) |
a4bf3af4 | 283 | build_vlan_header = 1; |
1da177e4 LT |
284 | |
285 | if (build_vlan_header) { | |
286 | vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); | |
287 | ||
288 | /* build the four bytes that make this a VLAN header. */ | |
289 | ||
2029cc2c PM |
290 | /* Now, construct the second two bytes. This field looks |
291 | * something like: | |
1da177e4 LT |
292 | * usr_priority: 3 bits (high bits) |
293 | * CFI 1 bit | |
294 | * VLAN ID 12 bits (low bits) | |
295 | * | |
296 | */ | |
9dfebcc6 | 297 | veth_TCI = vlan_dev_info(dev)->vlan_id; |
1da177e4 LT |
298 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); |
299 | ||
300 | vhdr->h_vlan_TCI = htons(veth_TCI); | |
301 | ||
302 | /* | |
2029cc2c PM |
303 | * Set the protocol type. For a packet of type ETH_P_802_3 we |
304 | * put the length in here instead. It is up to the 802.2 | |
305 | * layer to carry protocol information. | |
1da177e4 LT |
306 | */ |
307 | ||
2029cc2c | 308 | if (type != ETH_P_802_3) |
1da177e4 | 309 | vhdr->h_vlan_encapsulated_proto = htons(type); |
2029cc2c | 310 | else |
1da177e4 | 311 | vhdr->h_vlan_encapsulated_proto = htons(len); |
279e172a JB |
312 | |
313 | skb->protocol = htons(ETH_P_8021Q); | |
be8bd863 | 314 | skb_reset_network_header(skb); |
1da177e4 LT |
315 | } |
316 | ||
317 | /* Before delegating work to the lower layer, enter our MAC-address */ | |
318 | if (saddr == NULL) | |
319 | saddr = dev->dev_addr; | |
320 | ||
9dfebcc6 | 321 | dev = vlan_dev_info(dev)->real_dev; |
1da177e4 | 322 | |
2029cc2c PM |
323 | /* MPLS can send us skbuffs w/out enough space. This check will grow |
324 | * the skb if it doesn't have enough headroom. Not a beautiful solution, | |
325 | * so I'll tick a counter so that users can know it's happening... | |
326 | * If they care... | |
1da177e4 LT |
327 | */ |
328 | ||
2029cc2c PM |
329 | /* NOTE: This may still break if the underlying device is not the final |
330 | * device (and thus there are more headers to add...) It should work for | |
1da177e4 LT |
331 | * good-ole-ethernet though. |
332 | */ | |
333 | if (skb_headroom(skb) < dev->hard_header_len) { | |
334 | struct sk_buff *sk_tmp = skb; | |
335 | skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len); | |
336 | kfree_skb(sk_tmp); | |
337 | if (skb == NULL) { | |
7bd38d77 | 338 | struct net_device_stats *stats = &vdev->stats; |
1da177e4 LT |
339 | stats->tx_dropped++; |
340 | return -ENOMEM; | |
341 | } | |
9dfebcc6 | 342 | vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++; |
2029cc2c | 343 | pr_debug("%s: %s: had to grow skb\n", __FUNCTION__, vdev->name); |
1da177e4 LT |
344 | } |
345 | ||
346 | if (build_vlan_header) { | |
347 | /* Now make the underlying real hard header */ | |
0c4e8581 SH |
348 | rc = dev_hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, |
349 | len + VLAN_HLEN); | |
350 | if (rc > 0) | |
1da177e4 | 351 | rc += VLAN_HLEN; |
0c4e8581 | 352 | else if (rc < 0) |
1da177e4 | 353 | rc -= VLAN_HLEN; |
0c4e8581 | 354 | } else |
2029cc2c PM |
355 | /* If here, then we'll just make a normal looking ethernet |
356 | * frame, but, the hard_start_xmit method will insert the tag | |
357 | * (it has to be able to do this for bridged and other skbs | |
358 | * that don't come down the protocol stack in an orderly manner. | |
1da177e4 | 359 | */ |
0c4e8581 | 360 | rc = dev_hard_header(skb, dev, type, daddr, saddr, len); |
1da177e4 LT |
361 | |
362 | return rc; | |
363 | } | |
364 | ||
ef3eb3e5 | 365 | static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1da177e4 | 366 | { |
7bd38d77 | 367 | struct net_device_stats *stats = &dev->stats; |
1da177e4 | 368 | struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); |
55b01e86 | 369 | |
1da177e4 LT |
370 | /* Handle non-VLAN frames if they are sent to us, for example by DHCP. |
371 | * | |
372 | * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING | |
373 | * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... | |
374 | */ | |
375 | ||
6ab3b487 | 376 | if (veth->h_vlan_proto != htons(ETH_P_8021Q) || |
9dfebcc6 | 377 | vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { |
1da177e4 LT |
378 | int orig_headroom = skb_headroom(skb); |
379 | unsigned short veth_TCI; | |
380 | ||
381 | /* This is not a VLAN frame...but we can fix that! */ | |
9dfebcc6 | 382 | vlan_dev_info(dev)->cnt_encap_on_xmit++; |
1da177e4 | 383 | |
40f98e1a | 384 | pr_debug("%s: proto to encap: 0x%hx\n", |
9534f035 | 385 | __FUNCTION__, ntohs(veth->h_vlan_proto)); |
1da177e4 LT |
386 | /* Construct the second two bytes. This field looks something |
387 | * like: | |
388 | * usr_priority: 3 bits (high bits) | |
389 | * CFI 1 bit | |
390 | * VLAN ID 12 bits (low bits) | |
391 | */ | |
9dfebcc6 | 392 | veth_TCI = vlan_dev_info(dev)->vlan_id; |
1da177e4 LT |
393 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); |
394 | ||
395 | skb = __vlan_put_tag(skb, veth_TCI); | |
396 | if (!skb) { | |
397 | stats->tx_dropped++; | |
398 | return 0; | |
399 | } | |
400 | ||
2029cc2c | 401 | if (orig_headroom < VLAN_HLEN) |
9dfebcc6 | 402 | vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; |
1da177e4 LT |
403 | } |
404 | ||
40f98e1a | 405 | pr_debug("%s: about to send skb: %p to dev: %s\n", |
1da177e4 | 406 | __FUNCTION__, skb, skb->dev->name); |
55b01e86 DM |
407 | pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n", |
408 | veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], | |
409 | veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], | |
410 | veth->h_source[0], veth->h_source[1], veth->h_source[2], | |
411 | veth->h_source[3], veth->h_source[4], veth->h_source[5], | |
40f98e1a PM |
412 | veth->h_vlan_proto, veth->h_vlan_TCI, |
413 | veth->h_vlan_encapsulated_proto); | |
1da177e4 LT |
414 | |
415 | stats->tx_packets++; /* for statics only */ | |
416 | stats->tx_bytes += skb->len; | |
417 | ||
9dfebcc6 | 418 | skb->dev = vlan_dev_info(dev)->real_dev; |
1da177e4 LT |
419 | dev_queue_xmit(skb); |
420 | ||
421 | return 0; | |
422 | } | |
423 | ||
ef3eb3e5 PM |
424 | static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, |
425 | struct net_device *dev) | |
1da177e4 | 426 | { |
7bd38d77 | 427 | struct net_device_stats *stats = &dev->stats; |
1da177e4 LT |
428 | unsigned short veth_TCI; |
429 | ||
430 | /* Construct the second two bytes. This field looks something | |
431 | * like: | |
432 | * usr_priority: 3 bits (high bits) | |
433 | * CFI 1 bit | |
434 | * VLAN ID 12 bits (low bits) | |
435 | */ | |
9dfebcc6 | 436 | veth_TCI = vlan_dev_info(dev)->vlan_id; |
1da177e4 LT |
437 | veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); |
438 | skb = __vlan_hwaccel_put_tag(skb, veth_TCI); | |
439 | ||
440 | stats->tx_packets++; | |
441 | stats->tx_bytes += skb->len; | |
442 | ||
9dfebcc6 | 443 | skb->dev = vlan_dev_info(dev)->real_dev; |
1da177e4 LT |
444 | dev_queue_xmit(skb); |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
ef3eb3e5 | 449 | static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) |
1da177e4 LT |
450 | { |
451 | /* TODO: gotta make sure the underlying layer can handle it, | |
452 | * maybe an IFF_VLAN_CAPABLE flag for devices? | |
453 | */ | |
9dfebcc6 | 454 | if (vlan_dev_info(dev)->real_dev->mtu < new_mtu) |
1da177e4 LT |
455 | return -ERANGE; |
456 | ||
457 | dev->mtu = new_mtu; | |
458 | ||
459 | return 0; | |
460 | } | |
461 | ||
c17d8874 PM |
462 | void vlan_dev_set_ingress_priority(const struct net_device *dev, |
463 | u32 skb_prio, short vlan_prio) | |
1da177e4 | 464 | { |
9dfebcc6 | 465 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
b020cb48 PM |
466 | |
467 | if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) | |
468 | vlan->nr_ingress_mappings--; | |
469 | else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) | |
470 | vlan->nr_ingress_mappings++; | |
471 | ||
472 | vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; | |
1da177e4 LT |
473 | } |
474 | ||
c17d8874 PM |
475 | int vlan_dev_set_egress_priority(const struct net_device *dev, |
476 | u32 skb_prio, short vlan_prio) | |
1da177e4 | 477 | { |
9dfebcc6 | 478 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
1da177e4 LT |
479 | struct vlan_priority_tci_mapping *mp = NULL; |
480 | struct vlan_priority_tci_mapping *np; | |
b020cb48 | 481 | u32 vlan_qos = (vlan_prio << 13) & 0xE000; |
122952fc | 482 | |
c17d8874 | 483 | /* See if a priority mapping exists.. */ |
b020cb48 | 484 | mp = vlan->egress_priority_map[skb_prio & 0xF]; |
c17d8874 PM |
485 | while (mp) { |
486 | if (mp->priority == skb_prio) { | |
b020cb48 PM |
487 | if (mp->vlan_qos && !vlan_qos) |
488 | vlan->nr_egress_mappings--; | |
489 | else if (!mp->vlan_qos && vlan_qos) | |
490 | vlan->nr_egress_mappings++; | |
491 | mp->vlan_qos = vlan_qos; | |
c17d8874 | 492 | return 0; |
1da177e4 | 493 | } |
c17d8874 | 494 | mp = mp->next; |
1da177e4 | 495 | } |
c17d8874 PM |
496 | |
497 | /* Create a new mapping then. */ | |
b020cb48 | 498 | mp = vlan->egress_priority_map[skb_prio & 0xF]; |
c17d8874 PM |
499 | np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL); |
500 | if (!np) | |
501 | return -ENOBUFS; | |
502 | ||
503 | np->next = mp; | |
504 | np->priority = skb_prio; | |
b020cb48 PM |
505 | np->vlan_qos = vlan_qos; |
506 | vlan->egress_priority_map[skb_prio & 0xF] = np; | |
507 | if (vlan_qos) | |
508 | vlan->nr_egress_mappings++; | |
c17d8874 | 509 | return 0; |
1da177e4 LT |
510 | } |
511 | ||
a4bf3af4 | 512 | /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ |
c17d8874 PM |
513 | int vlan_dev_set_vlan_flag(const struct net_device *dev, |
514 | u32 flag, short flag_val) | |
1da177e4 | 515 | { |
c17d8874 | 516 | /* verify flag is supported */ |
a4bf3af4 | 517 | if (flag == VLAN_FLAG_REORDER_HDR) { |
2029cc2c | 518 | if (flag_val) |
9dfebcc6 | 519 | vlan_dev_info(dev)->flags |= VLAN_FLAG_REORDER_HDR; |
2029cc2c | 520 | else |
9dfebcc6 | 521 | vlan_dev_info(dev)->flags &= ~VLAN_FLAG_REORDER_HDR; |
c17d8874 | 522 | return 0; |
1da177e4 | 523 | } |
1da177e4 LT |
524 | return -EINVAL; |
525 | } | |
526 | ||
c17d8874 | 527 | void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) |
1da177e4 | 528 | { |
9dfebcc6 | 529 | strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); |
1da177e4 LT |
530 | } |
531 | ||
c17d8874 | 532 | void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result) |
1da177e4 | 533 | { |
9dfebcc6 | 534 | *result = vlan_dev_info(dev)->vlan_id; |
1da177e4 LT |
535 | } |
536 | ||
ef3eb3e5 | 537 | static int vlan_dev_open(struct net_device *dev) |
1da177e4 | 538 | { |
9dfebcc6 | 539 | struct vlan_dev_info *vlan = vlan_dev_info(dev); |
8c979c26 PM |
540 | struct net_device *real_dev = vlan->real_dev; |
541 | int err; | |
542 | ||
543 | if (!(real_dev->flags & IFF_UP)) | |
1da177e4 LT |
544 | return -ENETDOWN; |
545 | ||
8c979c26 PM |
546 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { |
547 | err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); | |
548 | if (err < 0) | |
549 | return err; | |
550 | } | |
551 | memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN); | |
552 | ||
6c78dcbd PM |
553 | if (dev->flags & IFF_ALLMULTI) |
554 | dev_set_allmulti(real_dev, 1); | |
555 | if (dev->flags & IFF_PROMISC) | |
556 | dev_set_promiscuity(real_dev, 1); | |
557 | ||
1da177e4 LT |
558 | return 0; |
559 | } | |
560 | ||
ef3eb3e5 | 561 | static int vlan_dev_stop(struct net_device *dev) |
1da177e4 | 562 | { |
9dfebcc6 | 563 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
8c979c26 | 564 | |
56addd6e | 565 | dev_mc_unsync(real_dev, dev); |
e83a2ea8 | 566 | dev_unicast_unsync(real_dev, dev); |
6c78dcbd PM |
567 | if (dev->flags & IFF_ALLMULTI) |
568 | dev_set_allmulti(real_dev, -1); | |
569 | if (dev->flags & IFF_PROMISC) | |
570 | dev_set_promiscuity(real_dev, -1); | |
571 | ||
8c979c26 PM |
572 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) |
573 | dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); | |
574 | ||
1da177e4 LT |
575 | return 0; |
576 | } | |
577 | ||
ef3eb3e5 | 578 | static int vlan_dev_set_mac_address(struct net_device *dev, void *p) |
39aaac11 | 579 | { |
9dfebcc6 | 580 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
39aaac11 PM |
581 | struct sockaddr *addr = p; |
582 | int err; | |
583 | ||
584 | if (!is_valid_ether_addr(addr->sa_data)) | |
585 | return -EADDRNOTAVAIL; | |
586 | ||
587 | if (!(dev->flags & IFF_UP)) | |
588 | goto out; | |
589 | ||
590 | if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { | |
591 | err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN); | |
592 | if (err < 0) | |
593 | return err; | |
594 | } | |
595 | ||
596 | if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) | |
597 | dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); | |
598 | ||
599 | out: | |
600 | memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); | |
601 | return 0; | |
602 | } | |
603 | ||
ef3eb3e5 | 604 | static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
1da177e4 | 605 | { |
9dfebcc6 | 606 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
1da177e4 LT |
607 | struct ifreq ifrr; |
608 | int err = -EOPNOTSUPP; | |
609 | ||
610 | strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); | |
611 | ifrr.ifr_ifru = ifr->ifr_ifru; | |
612 | ||
2029cc2c | 613 | switch (cmd) { |
1da177e4 LT |
614 | case SIOCGMIIPHY: |
615 | case SIOCGMIIREG: | |
616 | case SIOCSMIIREG: | |
122952fc | 617 | if (real_dev->do_ioctl && netif_device_present(real_dev)) |
1da177e4 LT |
618 | err = real_dev->do_ioctl(real_dev, &ifrr, cmd); |
619 | break; | |
1da177e4 LT |
620 | } |
621 | ||
122952fc | 622 | if (!err) |
1da177e4 LT |
623 | ifr->ifr_ifru = ifrr.ifr_ifru; |
624 | ||
625 | return err; | |
626 | } | |
627 | ||
ef3eb3e5 | 628 | static void vlan_dev_change_rx_flags(struct net_device *dev, int change) |
6c78dcbd | 629 | { |
9dfebcc6 | 630 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
6c78dcbd PM |
631 | |
632 | if (change & IFF_ALLMULTI) | |
633 | dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); | |
634 | if (change & IFF_PROMISC) | |
635 | dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); | |
636 | } | |
637 | ||
e83a2ea8 | 638 | static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) |
1da177e4 | 639 | { |
9dfebcc6 | 640 | dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
e83a2ea8 | 641 | dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); |
1da177e4 | 642 | } |
ef3eb3e5 PM |
643 | |
644 | /* | |
645 | * vlan network devices have devices nesting below it, and are a special | |
646 | * "super class" of normal network devices; split their locks off into a | |
647 | * separate class since they always nest. | |
648 | */ | |
649 | static struct lock_class_key vlan_netdev_xmit_lock_key; | |
650 | ||
651 | static const struct header_ops vlan_header_ops = { | |
652 | .create = vlan_dev_hard_header, | |
653 | .rebuild = vlan_dev_rebuild_header, | |
654 | .parse = eth_header_parse, | |
655 | }; | |
656 | ||
657 | static int vlan_dev_init(struct net_device *dev) | |
658 | { | |
9dfebcc6 | 659 | struct net_device *real_dev = vlan_dev_info(dev)->real_dev; |
ef3eb3e5 PM |
660 | int subclass = 0; |
661 | ||
662 | /* IFF_BROADCAST|IFF_MULTICAST; ??? */ | |
663 | dev->flags = real_dev->flags & ~IFF_UP; | |
664 | dev->iflink = real_dev->ifindex; | |
665 | dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | | |
666 | (1<<__LINK_STATE_DORMANT))) | | |
667 | (1<<__LINK_STATE_PRESENT); | |
668 | ||
669 | /* ipv6 shared card related stuff */ | |
670 | dev->dev_id = real_dev->dev_id; | |
671 | ||
672 | if (is_zero_ether_addr(dev->dev_addr)) | |
673 | memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len); | |
674 | if (is_zero_ether_addr(dev->broadcast)) | |
675 | memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); | |
676 | ||
677 | if (real_dev->features & NETIF_F_HW_VLAN_TX) { | |
678 | dev->header_ops = real_dev->header_ops; | |
679 | dev->hard_header_len = real_dev->hard_header_len; | |
680 | dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit; | |
681 | } else { | |
682 | dev->header_ops = &vlan_header_ops; | |
683 | dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; | |
684 | dev->hard_start_xmit = vlan_dev_hard_start_xmit; | |
685 | } | |
686 | ||
687 | if (real_dev->priv_flags & IFF_802_1Q_VLAN) | |
688 | subclass = 1; | |
689 | ||
690 | lockdep_set_class_and_subclass(&dev->_xmit_lock, | |
691 | &vlan_netdev_xmit_lock_key, subclass); | |
692 | return 0; | |
693 | } | |
694 | ||
695 | void vlan_setup(struct net_device *dev) | |
696 | { | |
697 | ether_setup(dev); | |
698 | ||
699 | dev->priv_flags |= IFF_802_1Q_VLAN; | |
700 | dev->tx_queue_len = 0; | |
701 | ||
702 | dev->change_mtu = vlan_dev_change_mtu; | |
703 | dev->init = vlan_dev_init; | |
704 | dev->open = vlan_dev_open; | |
705 | dev->stop = vlan_dev_stop; | |
706 | dev->set_mac_address = vlan_dev_set_mac_address; | |
e83a2ea8 CL |
707 | dev->set_rx_mode = vlan_dev_set_rx_mode; |
708 | dev->set_multicast_list = vlan_dev_set_rx_mode; | |
ef3eb3e5 PM |
709 | dev->change_rx_flags = vlan_dev_change_rx_flags; |
710 | dev->do_ioctl = vlan_dev_ioctl; | |
711 | dev->destructor = free_netdev; | |
712 | ||
713 | memset(dev->broadcast, 0, ETH_ALEN); | |
714 | } |