2 * Generic HDLC support routines for Linux
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
16 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
17 0,x -> 1,1 if "link reliable" when sending FULL STATUS
18 1,1 -> 1,0 if received FULL STATUS ACK
20 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
21 -> 1 when "PVC up" and (exist,new) = 1,0
24 (exist,new,active) = FULL STATUS if "link reliable"
25 = 0, 0, 0 if "link unreliable"
27 active = open and "link reliable"
28 exist = new = not used
30 CCITT LMI: ITU-T Q.933 Annex A
31 ANSI LMI: ANSI T1.617 Annex D
32 CISCO LMI: the original, aka "Gang of Four" LMI
36 #include <linux/errno.h>
37 #include <linux/etherdevice.h>
38 #include <linux/hdlc.h>
39 #include <linux/if_arp.h>
40 #include <linux/inetdevice.h>
41 #include <linux/init.h>
42 #include <linux/kernel.h>
43 #include <linux/module.h>
44 #include <linux/pkt_sched.h>
45 #include <linux/poll.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/skbuff.h>
48 #include <linux/slab.h>
60 #define NLPID_IPV6 0x8E
61 #define NLPID_SNAP 0x80
62 #define NLPID_PAD 0x00
63 #define NLPID_CCITT_ANSI_LMI 0x08
64 #define NLPID_CISCO_LMI 0x09
67 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
68 #define LMI_CISCO_DLCI 1023
70 #define LMI_CALLREF 0x00 /* Call Reference */
71 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
72 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
73 #define LMI_CCITT_REPTYPE 0x51
74 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
75 #define LMI_CCITT_ALIVE 0x53
76 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
77 #define LMI_CCITT_PVCSTAT 0x57
79 #define LMI_FULLREP 0x00 /* full report */
80 #define LMI_INTEGRITY 0x01 /* link integrity report */
81 #define LMI_SINGLE 0x02 /* single PVC report */
83 #define LMI_STATUS_ENQUIRY 0x75
84 #define LMI_STATUS 0x7D /* reply */
86 #define LMI_REPT_LEN 1 /* report type element length */
87 #define LMI_INTEG_LEN 2 /* link integrity element length */
89 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
90 #define LMI_ANSI_LENGTH 14
94 #if defined(__LITTLE_ENDIAN_BITFIELD)
119 struct net_device *frad;
120 struct net_device *main;
121 struct net_device *ether; /* bridged Ethernet interface */
122 struct pvc_device *next; /* Sorted in ascending DLCI order */
128 unsigned int active: 1;
129 unsigned int exist: 1;
130 unsigned int deleted: 1;
131 unsigned int fecn: 1;
132 unsigned int becn: 1;
133 unsigned int bandwidth; /* Cisco LMI reporting only */
139 struct pvc_device *first_pvc;
142 struct timer_list timer;
143 struct net_device *dev;
144 unsigned long last_poll;
149 u32 last_errors; /* last errors bit list */
151 u8 txseq; /* TX sequence number */
152 u8 rxseq; /* RX sequence number */
156 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
159 static inline u16 q922_to_dlci(u8 *hdr)
161 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
165 static inline void dlci_to_q922(u8 *hdr, u16 dlci)
167 hdr[0] = (dlci >> 2) & 0xFC;
168 hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
172 static inline struct frad_state* state(hdlc_device *hdlc)
174 return(struct frad_state *)(hdlc->state);
178 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
180 struct pvc_device *pvc = state(hdlc)->first_pvc;
183 if (pvc->dlci == dlci)
185 if (pvc->dlci > dlci)
186 return NULL; /* the list is sorted */
194 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
196 hdlc_device *hdlc = dev_to_hdlc(dev);
197 struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
200 if ((*pvc_p)->dlci == dlci)
202 if ((*pvc_p)->dlci > dlci)
203 break; /* the list is sorted */
204 pvc_p = &(*pvc_p)->next;
207 pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
209 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
216 pvc->next = *pvc_p; /* Put it in the chain */
222 static inline int pvc_is_used(struct pvc_device *pvc)
224 return pvc->main || pvc->ether;
228 static inline void pvc_carrier(int on, struct pvc_device *pvc)
232 if (!netif_carrier_ok(pvc->main))
233 netif_carrier_on(pvc->main);
235 if (!netif_carrier_ok(pvc->ether))
236 netif_carrier_on(pvc->ether);
239 if (netif_carrier_ok(pvc->main))
240 netif_carrier_off(pvc->main);
242 if (netif_carrier_ok(pvc->ether))
243 netif_carrier_off(pvc->ether);
248 static inline void delete_unused_pvcs(hdlc_device *hdlc)
250 struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
253 if (!pvc_is_used(*pvc_p)) {
254 struct pvc_device *pvc = *pvc_p;
256 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
262 pvc_p = &(*pvc_p)->next;
267 static inline struct net_device **get_dev_p(struct pvc_device *pvc,
270 if (type == ARPHRD_ETHER)
277 static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
280 struct sk_buff *skb = *skb_p;
282 switch (skb->protocol) {
283 case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
285 skb_push(skb, head_len);
286 skb->data[3] = NLPID_CCITT_ANSI_LMI;
289 case cpu_to_be16(NLPID_CISCO_LMI):
291 skb_push(skb, head_len);
292 skb->data[3] = NLPID_CISCO_LMI;
295 case cpu_to_be16(ETH_P_IP):
297 skb_push(skb, head_len);
298 skb->data[3] = NLPID_IP;
301 case cpu_to_be16(ETH_P_IPV6):
303 skb_push(skb, head_len);
304 skb->data[3] = NLPID_IPV6;
307 case cpu_to_be16(ETH_P_802_3):
309 if (skb_headroom(skb) < head_len) {
310 struct sk_buff *skb2 = skb_realloc_headroom(skb,
317 skb_push(skb, head_len);
318 skb->data[3] = FR_PAD;
319 skb->data[4] = NLPID_SNAP;
320 skb->data[5] = FR_PAD;
324 skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
329 skb_push(skb, head_len);
330 skb->data[3] = FR_PAD;
331 skb->data[4] = NLPID_SNAP;
332 skb->data[5] = FR_PAD;
333 skb->data[6] = FR_PAD;
334 skb->data[7] = FR_PAD;
335 *(__be16*)(skb->data + 8) = skb->protocol;
338 dlci_to_q922(skb->data, dlci);
339 skb->data[2] = FR_UI;
345 static int pvc_open(struct net_device *dev)
347 struct pvc_device *pvc = dev->ml_priv;
349 if ((pvc->frad->flags & IFF_UP) == 0)
350 return -EIO; /* Frad must be UP in order to activate PVC */
352 if (pvc->open_count++ == 0) {
353 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
354 if (state(hdlc)->settings.lmi == LMI_NONE)
355 pvc->state.active = netif_carrier_ok(pvc->frad);
357 pvc_carrier(pvc->state.active, pvc);
358 state(hdlc)->dce_changed = 1;
365 static int pvc_close(struct net_device *dev)
367 struct pvc_device *pvc = dev->ml_priv;
369 if (--pvc->open_count == 0) {
370 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
371 if (state(hdlc)->settings.lmi == LMI_NONE)
372 pvc->state.active = 0;
374 if (state(hdlc)->settings.dce) {
375 state(hdlc)->dce_changed = 1;
376 pvc->state.active = 0;
384 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
386 struct pvc_device *pvc = dev->ml_priv;
387 fr_proto_pvc_info info;
389 if (ifr->ifr_settings.type == IF_GET_PROTO) {
390 if (dev->type == ARPHRD_ETHER)
391 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
393 ifr->ifr_settings.type = IF_PROTO_FR_PVC;
395 if (ifr->ifr_settings.size < sizeof(info)) {
396 /* data size wanted */
397 ifr->ifr_settings.size = sizeof(info);
401 info.dlci = pvc->dlci;
402 memcpy(info.master, pvc->frad->name, IFNAMSIZ);
403 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
404 &info, sizeof(info)))
412 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
414 struct pvc_device *pvc = dev->ml_priv;
416 if (pvc->state.active) {
417 if (dev->type == ARPHRD_ETHER) {
418 int pad = ETH_ZLEN - skb->len;
419 if (pad > 0) { /* Pad the frame with zeros */
421 if (skb_tailroom(skb) < pad)
422 if (pskb_expand_head(skb, 0, pad,
424 dev->stats.tx_dropped++;
429 memset(skb->data + len, 0, pad);
431 skb->protocol = cpu_to_be16(ETH_P_802_3);
433 if (!fr_hard_header(&skb, pvc->dlci)) {
434 dev->stats.tx_bytes += skb->len;
435 dev->stats.tx_packets++;
436 if (pvc->state.fecn) /* TX Congestion counter */
437 dev->stats.tx_compressed++;
438 skb->dev = pvc->frad;
444 dev->stats.tx_dropped++;
449 static inline void fr_log_dlci_active(struct pvc_device *pvc)
451 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
453 pvc->main ? pvc->main->name : "",
454 pvc->main && pvc->ether ? " " : "",
455 pvc->ether ? pvc->ether->name : "",
456 pvc->state.new ? " new" : "",
457 !pvc->state.exist ? "deleted" :
458 pvc->state.active ? "active" : "inactive");
463 static inline u8 fr_lmi_nextseq(u8 x)
470 static void fr_lmi_send(struct net_device *dev, int fullrep)
472 hdlc_device *hdlc = dev_to_hdlc(dev);
474 struct pvc_device *pvc = state(hdlc)->first_pvc;
475 int lmi = state(hdlc)->settings.lmi;
476 int dce = state(hdlc)->settings.dce;
477 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
478 int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
482 if (dce && fullrep) {
483 len += state(hdlc)->dce_pvc_count * (2 + stat_len);
484 if (len > HDLC_MAX_MRU) {
485 netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
490 skb = dev_alloc_skb(len);
492 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
495 memset(skb->data, 0, len);
497 if (lmi == LMI_CISCO) {
498 skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
499 fr_hard_header(&skb, LMI_CISCO_DLCI);
501 skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
502 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
504 data = skb_tail_pointer(skb);
505 data[i++] = LMI_CALLREF;
506 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
508 data[i++] = LMI_ANSI_LOCKSHIFT;
509 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
510 LMI_ANSI_CISCO_REPTYPE;
511 data[i++] = LMI_REPT_LEN;
512 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
513 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
514 data[i++] = LMI_INTEG_LEN;
515 data[i++] = state(hdlc)->txseq =
516 fr_lmi_nextseq(state(hdlc)->txseq);
517 data[i++] = state(hdlc)->rxseq;
519 if (dce && fullrep) {
521 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
522 LMI_ANSI_CISCO_PVCSTAT;
523 data[i++] = stat_len;
525 /* LMI start/restart */
526 if (state(hdlc)->reliable && !pvc->state.exist) {
527 pvc->state.exist = pvc->state.new = 1;
528 fr_log_dlci_active(pvc);
531 /* ifconfig PVC up */
532 if (pvc->open_count && !pvc->state.active &&
533 pvc->state.exist && !pvc->state.new) {
535 pvc->state.active = 1;
536 fr_log_dlci_active(pvc);
539 if (lmi == LMI_CISCO) {
540 data[i] = pvc->dlci >> 8;
541 data[i + 1] = pvc->dlci & 0xFF;
543 data[i] = (pvc->dlci >> 4) & 0x3F;
544 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
550 else if (pvc->state.active)
559 skb->priority = TC_PRIO_CONTROL;
561 skb_reset_network_header(skb);
568 static void fr_set_link_state(int reliable, struct net_device *dev)
570 hdlc_device *hdlc = dev_to_hdlc(dev);
571 struct pvc_device *pvc = state(hdlc)->first_pvc;
573 state(hdlc)->reliable = reliable;
575 netif_dormant_off(dev);
576 state(hdlc)->n391cnt = 0; /* Request full status */
577 state(hdlc)->dce_changed = 1;
579 if (state(hdlc)->settings.lmi == LMI_NONE) {
580 while (pvc) { /* Activate all PVCs */
582 pvc->state.exist = pvc->state.active = 1;
588 netif_dormant_on(dev);
589 while (pvc) { /* Deactivate all PVCs */
591 pvc->state.exist = pvc->state.active = 0;
593 if (!state(hdlc)->settings.dce)
594 pvc->state.bandwidth = 0;
601 static void fr_timer(struct timer_list *t)
603 struct frad_state *st = from_timer(st, t, timer);
604 struct net_device *dev = st->dev;
605 hdlc_device *hdlc = dev_to_hdlc(dev);
606 int i, cnt = 0, reliable;
609 if (state(hdlc)->settings.dce) {
610 reliable = state(hdlc)->request &&
611 time_before(jiffies, state(hdlc)->last_poll +
612 state(hdlc)->settings.t392 * HZ);
613 state(hdlc)->request = 0;
615 state(hdlc)->last_errors <<= 1; /* Shift the list */
616 if (state(hdlc)->request) {
617 if (state(hdlc)->reliable)
618 netdev_info(dev, "No LMI status reply received\n");
619 state(hdlc)->last_errors |= 1;
622 list = state(hdlc)->last_errors;
623 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
624 cnt += (list & 1); /* errors count */
626 reliable = (cnt < state(hdlc)->settings.n392);
629 if (state(hdlc)->reliable != reliable) {
630 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
631 fr_set_link_state(reliable, dev);
634 if (state(hdlc)->settings.dce)
635 state(hdlc)->timer.expires = jiffies +
636 state(hdlc)->settings.t392 * HZ;
638 if (state(hdlc)->n391cnt)
639 state(hdlc)->n391cnt--;
641 fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
643 state(hdlc)->last_poll = jiffies;
644 state(hdlc)->request = 1;
645 state(hdlc)->timer.expires = jiffies +
646 state(hdlc)->settings.t391 * HZ;
649 add_timer(&state(hdlc)->timer);
653 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
655 hdlc_device *hdlc = dev_to_hdlc(dev);
656 struct pvc_device *pvc;
658 int lmi = state(hdlc)->settings.lmi;
659 int dce = state(hdlc)->settings.dce;
660 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
662 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
663 LMI_CCITT_CISCO_LENGTH)) {
664 netdev_info(dev, "Short LMI frame\n");
668 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
669 NLPID_CCITT_ANSI_LMI)) {
670 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
674 if (skb->data[4] != LMI_CALLREF) {
675 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
680 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
681 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
686 if (lmi == LMI_ANSI) {
687 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
688 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
696 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
697 LMI_ANSI_CISCO_REPTYPE)) {
698 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
703 if (skb->data[++i] != LMI_REPT_LEN) {
704 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
709 reptype = skb->data[++i];
710 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
711 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
716 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
717 LMI_ANSI_CISCO_ALIVE)) {
718 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
723 if (skb->data[++i] != LMI_INTEG_LEN) {
724 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
730 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
731 rxseq = skb->data[i++]; /* Should confirm our sequence */
733 txseq = state(hdlc)->txseq;
736 state(hdlc)->last_poll = jiffies;
739 if (!state(hdlc)->reliable)
742 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
743 state(hdlc)->n391cnt = 0;
748 if (state(hdlc)->fullrep_sent && !error) {
749 /* Stop sending full report - the last one has been confirmed by DTE */
750 state(hdlc)->fullrep_sent = 0;
751 pvc = state(hdlc)->first_pvc;
753 if (pvc->state.new) {
756 /* Tell DTE that new PVC is now active */
757 state(hdlc)->dce_changed = 1;
763 if (state(hdlc)->dce_changed) {
764 reptype = LMI_FULLREP;
765 state(hdlc)->fullrep_sent = 1;
766 state(hdlc)->dce_changed = 0;
769 state(hdlc)->request = 1; /* got request */
770 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
776 state(hdlc)->request = 0; /* got response, no request pending */
781 if (reptype != LMI_FULLREP)
784 pvc = state(hdlc)->first_pvc;
787 pvc->state.deleted = 1;
792 while (skb->len >= i + 2 + stat_len) {
795 unsigned int active, new;
797 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
798 LMI_ANSI_CISCO_PVCSTAT)) {
799 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
804 if (skb->data[++i] != stat_len) {
805 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
811 new = !! (skb->data[i + 2] & 0x08);
812 active = !! (skb->data[i + 2] & 0x02);
813 if (lmi == LMI_CISCO) {
814 dlci = (skb->data[i] << 8) | skb->data[i + 1];
815 bw = (skb->data[i + 3] << 16) |
816 (skb->data[i + 4] << 8) |
819 dlci = ((skb->data[i] & 0x3F) << 4) |
820 ((skb->data[i + 1] & 0x78) >> 3);
824 pvc = add_pvc(dev, dlci);
826 if (!pvc && !no_ram) {
827 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
832 pvc->state.exist = 1;
833 pvc->state.deleted = 0;
834 if (active != pvc->state.active ||
835 new != pvc->state.new ||
836 bw != pvc->state.bandwidth ||
838 pvc->state.new = new;
839 pvc->state.active = active;
840 pvc->state.bandwidth = bw;
841 pvc_carrier(active, pvc);
842 fr_log_dlci_active(pvc);
849 pvc = state(hdlc)->first_pvc;
852 if (pvc->state.deleted && pvc->state.exist) {
854 pvc->state.active = pvc->state.new = 0;
855 pvc->state.exist = 0;
856 pvc->state.bandwidth = 0;
857 fr_log_dlci_active(pvc);
862 /* Next full report after N391 polls */
863 state(hdlc)->n391cnt = state(hdlc)->settings.n391;
869 static int fr_rx(struct sk_buff *skb)
871 struct net_device *frad = skb->dev;
872 hdlc_device *hdlc = dev_to_hdlc(frad);
873 struct fr_hdr *fh = (struct fr_hdr *)skb->data;
874 u8 *data = skb->data;
876 struct pvc_device *pvc;
877 struct net_device *dev = NULL;
879 if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
882 dlci = q922_to_dlci(skb->data);
884 if ((dlci == LMI_CCITT_ANSI_DLCI &&
885 (state(hdlc)->settings.lmi == LMI_ANSI ||
886 state(hdlc)->settings.lmi == LMI_CCITT)) ||
887 (dlci == LMI_CISCO_DLCI &&
888 state(hdlc)->settings.lmi == LMI_CISCO)) {
889 if (fr_lmi_recv(frad, skb))
891 dev_kfree_skb_any(skb);
892 return NET_RX_SUCCESS;
895 pvc = find_pvc(hdlc, dlci);
898 netdev_info(frad, "No PVC for received frame's DLCI %d\n",
901 dev_kfree_skb_any(skb);
905 if (pvc->state.fecn != fh->fecn) {
907 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
908 dlci, fh->fecn ? "N" : "FF");
910 pvc->state.fecn ^= 1;
913 if (pvc->state.becn != fh->becn) {
915 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
916 dlci, fh->becn ? "N" : "FF");
918 pvc->state.becn ^= 1;
922 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
923 frad->stats.rx_dropped++;
927 if (data[3] == NLPID_IP) {
928 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
930 skb->protocol = htons(ETH_P_IP);
932 } else if (data[3] == NLPID_IPV6) {
933 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
935 skb->protocol = htons(ETH_P_IPV6);
937 } else if (skb->len > 10 && data[3] == FR_PAD &&
938 data[4] == NLPID_SNAP && data[5] == FR_PAD) {
939 u16 oui = ntohs(*(__be16*)(data + 6));
940 u16 pid = ntohs(*(__be16*)(data + 8));
943 switch ((((u32)oui) << 16) | pid) {
944 case ETH_P_ARP: /* routed frame with SNAP */
946 case ETH_P_IP: /* a long variant */
949 skb->protocol = htons(pid);
952 case 0x80C20007: /* bridged Ethernet frame */
953 if ((dev = pvc->ether) != NULL)
954 skb->protocol = eth_type_trans(skb, dev);
958 netdev_info(frad, "Unsupported protocol, OUI=%x PID=%x\n",
960 dev_kfree_skb_any(skb);
964 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
966 dev_kfree_skb_any(skb);
971 dev->stats.rx_packets++; /* PVC traffic */
972 dev->stats.rx_bytes += skb->len;
974 dev->stats.rx_compressed++;
977 return NET_RX_SUCCESS;
979 dev_kfree_skb_any(skb);
984 frad->stats.rx_errors++; /* Mark error */
985 dev_kfree_skb_any(skb);
991 static void fr_start(struct net_device *dev)
993 hdlc_device *hdlc = dev_to_hdlc(dev);
995 printk(KERN_DEBUG "fr_start\n");
997 if (state(hdlc)->settings.lmi != LMI_NONE) {
998 state(hdlc)->reliable = 0;
999 state(hdlc)->dce_changed = 1;
1000 state(hdlc)->request = 0;
1001 state(hdlc)->fullrep_sent = 0;
1002 state(hdlc)->last_errors = 0xFFFFFFFF;
1003 state(hdlc)->n391cnt = 0;
1004 state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1006 state(hdlc)->dev = dev;
1007 timer_setup(&state(hdlc)->timer, fr_timer, 0);
1008 /* First poll after 1 s */
1009 state(hdlc)->timer.expires = jiffies + HZ;
1010 add_timer(&state(hdlc)->timer);
1012 fr_set_link_state(1, dev);
1016 static void fr_stop(struct net_device *dev)
1018 hdlc_device *hdlc = dev_to_hdlc(dev);
1020 printk(KERN_DEBUG "fr_stop\n");
1022 if (state(hdlc)->settings.lmi != LMI_NONE)
1023 del_timer_sync(&state(hdlc)->timer);
1024 fr_set_link_state(0, dev);
1028 static void fr_close(struct net_device *dev)
1030 hdlc_device *hdlc = dev_to_hdlc(dev);
1031 struct pvc_device *pvc = state(hdlc)->first_pvc;
1033 while (pvc) { /* Shutdown all PVCs for this FRAD */
1035 dev_close(pvc->main);
1037 dev_close(pvc->ether);
1043 static void pvc_setup(struct net_device *dev)
1045 dev->type = ARPHRD_DLCI;
1046 dev->flags = IFF_POINTOPOINT;
1047 dev->hard_header_len = 10;
1049 netif_keep_dst(dev);
1052 static const struct net_device_ops pvc_ops = {
1053 .ndo_open = pvc_open,
1054 .ndo_stop = pvc_close,
1055 .ndo_start_xmit = pvc_xmit,
1056 .ndo_do_ioctl = pvc_ioctl,
1059 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1061 hdlc_device *hdlc = dev_to_hdlc(frad);
1062 struct pvc_device *pvc;
1063 struct net_device *dev;
1066 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1067 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1071 if (*get_dev_p(pvc, type))
1074 used = pvc_is_used(pvc);
1076 if (type == ARPHRD_ETHER)
1077 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1080 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1083 netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1084 delete_unused_pvcs(hdlc);
1088 if (type == ARPHRD_ETHER) {
1089 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1090 eth_hw_addr_random(dev);
1092 *(__be16*)dev->dev_addr = htons(dlci);
1093 dlci_to_q922(dev->broadcast, dlci);
1095 dev->netdev_ops = &pvc_ops;
1096 dev->mtu = HDLC_MAX_MTU;
1098 dev->max_mtu = HDLC_MAX_MTU;
1099 dev->priv_flags |= IFF_NO_QUEUE;
1102 if (register_netdevice(dev) != 0) {
1104 delete_unused_pvcs(hdlc);
1108 dev->needs_free_netdev = true;
1109 *get_dev_p(pvc, type) = dev;
1111 state(hdlc)->dce_changed = 1;
1112 state(hdlc)->dce_pvc_count++;
1119 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1121 struct pvc_device *pvc;
1122 struct net_device *dev;
1124 if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1127 if ((dev = *get_dev_p(pvc, type)) == NULL)
1130 if (dev->flags & IFF_UP)
1131 return -EBUSY; /* PVC in use */
1133 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1134 *get_dev_p(pvc, type) = NULL;
1136 if (!pvc_is_used(pvc)) {
1137 state(hdlc)->dce_pvc_count--;
1138 state(hdlc)->dce_changed = 1;
1140 delete_unused_pvcs(hdlc);
1146 static void fr_destroy(struct net_device *frad)
1148 hdlc_device *hdlc = dev_to_hdlc(frad);
1149 struct pvc_device *pvc = state(hdlc)->first_pvc;
1150 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1151 state(hdlc)->dce_pvc_count = 0;
1152 state(hdlc)->dce_changed = 1;
1155 struct pvc_device *next = pvc->next;
1156 /* destructors will free_netdev() main and ether */
1158 unregister_netdevice(pvc->main);
1161 unregister_netdevice(pvc->ether);
1169 static struct hdlc_proto proto = {
1173 .detach = fr_destroy,
1176 .module = THIS_MODULE,
1180 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1182 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1183 const size_t size = sizeof(fr_proto);
1184 fr_proto new_settings;
1185 hdlc_device *hdlc = dev_to_hdlc(dev);
1189 switch (ifr->ifr_settings.type) {
1191 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1193 ifr->ifr_settings.type = IF_PROTO_FR;
1194 if (ifr->ifr_settings.size < size) {
1195 ifr->ifr_settings.size = size; /* data size wanted */
1198 if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1203 if (!capable(CAP_NET_ADMIN))
1206 if (dev->flags & IFF_UP)
1209 if (copy_from_user(&new_settings, fr_s, size))
1212 if (new_settings.lmi == LMI_DEFAULT)
1213 new_settings.lmi = LMI_ANSI;
1215 if ((new_settings.lmi != LMI_NONE &&
1216 new_settings.lmi != LMI_ANSI &&
1217 new_settings.lmi != LMI_CCITT &&
1218 new_settings.lmi != LMI_CISCO) ||
1219 new_settings.t391 < 1 ||
1220 new_settings.t392 < 2 ||
1221 new_settings.n391 < 1 ||
1222 new_settings.n392 < 1 ||
1223 new_settings.n393 < new_settings.n392 ||
1224 new_settings.n393 > 32 ||
1225 (new_settings.dce != 0 &&
1226 new_settings.dce != 1))
1229 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1233 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1234 result = attach_hdlc_protocol(dev, &proto,
1235 sizeof(struct frad_state));
1238 state(hdlc)->first_pvc = NULL;
1239 state(hdlc)->dce_pvc_count = 0;
1241 memcpy(&state(hdlc)->settings, &new_settings, size);
1242 dev->type = ARPHRD_FRAD;
1243 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1246 case IF_PROTO_FR_ADD_PVC:
1247 case IF_PROTO_FR_DEL_PVC:
1248 case IF_PROTO_FR_ADD_ETH_PVC:
1249 case IF_PROTO_FR_DEL_ETH_PVC:
1250 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1253 if (!capable(CAP_NET_ADMIN))
1256 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1257 sizeof(fr_proto_pvc)))
1260 if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1261 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1263 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1264 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1265 result = ARPHRD_ETHER; /* bridged Ethernet device */
1267 result = ARPHRD_DLCI;
1269 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1270 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1271 return fr_add_pvc(dev, pvc.dlci, result);
1273 return fr_del_pvc(hdlc, pvc.dlci, result);
1280 static int __init mod_init(void)
1282 register_hdlc_protocol(&proto);
1287 static void __exit mod_exit(void)
1289 unregister_hdlc_protocol(&proto);
1293 module_init(mod_init);
1294 module_exit(mod_exit);
1297 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1298 MODULE_LICENSE("GPL v2");