]>
Commit | Line | Data |
---|---|---|
c72dfae2 SB |
1 | /* |
2 | * CAIF Interface registration. | |
3 | * Copyright (C) ST-Ericsson AB 2010 | |
26ee65e6 | 4 | * Author: Sjur Brendeland |
c72dfae2 SB |
5 | * License terms: GNU General Public License (GPL) version 2 |
6 | * | |
31fdc555 | 7 | * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont |
c72dfae2 SB |
8 | * and Sakari Ailus <[email protected]> |
9 | */ | |
10 | ||
b31fa5ba JP |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ |
12 | ||
c72dfae2 SB |
13 | #include <linux/kernel.h> |
14 | #include <linux/if_arp.h> | |
15 | #include <linux/net.h> | |
16 | #include <linux/netdevice.h> | |
bd30ce4b | 17 | #include <linux/mutex.h> |
3a9a231d | 18 | #include <linux/module.h> |
0e4c7d85 | 19 | #include <linux/spinlock.h> |
c72dfae2 SB |
20 | #include <net/netns/generic.h> |
21 | #include <net/net_namespace.h> | |
22 | #include <net/pkt_sched.h> | |
23 | #include <net/caif/caif_device.h> | |
c72dfae2 | 24 | #include <net/caif/caif_layer.h> |
8203274e | 25 | #include <net/caif/caif_dev.h> |
c72dfae2 SB |
26 | #include <net/caif/cfpkt.h> |
27 | #include <net/caif/cfcnfg.h> | |
7c18d220 | 28 | #include <net/caif/cfserl.h> |
c72dfae2 SB |
29 | |
30 | MODULE_LICENSE("GPL"); | |
c72dfae2 SB |
31 | |
32 | /* Used for local tracking of the CAIF net devices */ | |
33 | struct caif_device_entry { | |
34 | struct cflayer layer; | |
35 | struct list_head list; | |
c72dfae2 | 36 | struct net_device *netdev; |
bd30ce4b | 37 | int __percpu *pcpu_refcnt; |
0e4c7d85 | 38 | spinlock_t flow_lock; |
7d311304 | 39 | struct sk_buff *xoff_skb; |
40 | void (*xoff_skb_dtor)(struct sk_buff *skb); | |
0e4c7d85 | 41 | bool xoff; |
c72dfae2 SB |
42 | }; |
43 | ||
44 | struct caif_device_entry_list { | |
45 | struct list_head list; | |
46 | /* Protects simulanous deletes in list */ | |
bd30ce4b | 47 | struct mutex lock; |
c72dfae2 SB |
48 | }; |
49 | ||
50 | struct caif_net { | |
bee925db | 51 | struct cfcnfg *cfg; |
c72dfae2 SB |
52 | struct caif_device_entry_list caifdevs; |
53 | }; | |
54 | ||
55 | static int caif_net_id; | |
0e4c7d85 | 56 | static int q_high = 50; /* Percent */ |
bee925db | 57 | |
58 | struct cfcnfg *get_cfcnfg(struct net *net) | |
59 | { | |
60 | struct caif_net *caifn; | |
bee925db | 61 | caifn = net_generic(net, caif_net_id); |
bee925db | 62 | return caifn->cfg; |
63 | } | |
64 | EXPORT_SYMBOL(get_cfcnfg); | |
c72dfae2 SB |
65 | |
66 | static struct caif_device_entry_list *caif_device_list(struct net *net) | |
67 | { | |
68 | struct caif_net *caifn; | |
c72dfae2 | 69 | caifn = net_generic(net, caif_net_id); |
c72dfae2 SB |
70 | return &caifn->caifdevs; |
71 | } | |
72 | ||
bd30ce4b | 73 | static void caifd_put(struct caif_device_entry *e) |
74 | { | |
933393f5 | 75 | this_cpu_dec(*e->pcpu_refcnt); |
bd30ce4b | 76 | } |
77 | ||
78 | static void caifd_hold(struct caif_device_entry *e) | |
79 | { | |
933393f5 | 80 | this_cpu_inc(*e->pcpu_refcnt); |
bd30ce4b | 81 | } |
82 | ||
83 | static int caifd_refcnt_read(struct caif_device_entry *e) | |
84 | { | |
85 | int i, refcnt = 0; | |
86 | for_each_possible_cpu(i) | |
87 | refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); | |
88 | return refcnt; | |
89 | } | |
90 | ||
c72dfae2 SB |
91 | /* Allocate new CAIF device. */ |
92 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | |
93 | { | |
c72dfae2 | 94 | struct caif_device_entry *caifd; |
bd30ce4b | 95 | |
4fb66b82 | 96 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); |
c72dfae2 SB |
97 | if (!caifd) |
98 | return NULL; | |
bd30ce4b | 99 | caifd->pcpu_refcnt = alloc_percpu(int); |
4fb66b82 ED |
100 | if (!caifd->pcpu_refcnt) { |
101 | kfree(caifd); | |
102 | return NULL; | |
103 | } | |
c72dfae2 | 104 | caifd->netdev = dev; |
bd30ce4b | 105 | dev_hold(dev); |
c72dfae2 SB |
106 | return caifd; |
107 | } | |
108 | ||
109 | static struct caif_device_entry *caif_get(struct net_device *dev) | |
110 | { | |
111 | struct caif_device_entry_list *caifdevs = | |
112 | caif_device_list(dev_net(dev)); | |
113 | struct caif_device_entry *caifd; | |
7c18d220 | 114 | |
bd30ce4b | 115 | list_for_each_entry_rcu(caifd, &caifdevs->list, list) { |
c72dfae2 SB |
116 | if (caifd->netdev == dev) |
117 | return caifd; | |
118 | } | |
119 | return NULL; | |
120 | } | |
121 | ||
d6e89c0b | 122 | static void caif_flow_cb(struct sk_buff *skb) |
0e4c7d85 | 123 | { |
124 | struct caif_device_entry *caifd; | |
7d311304 | 125 | void (*dtor)(struct sk_buff *skb) = NULL; |
0e4c7d85 | 126 | bool send_xoff; |
127 | ||
128 | WARN_ON(skb->dev == NULL); | |
129 | ||
130 | rcu_read_lock(); | |
131 | caifd = caif_get(skb->dev); | |
c95567c8 KLX |
132 | |
133 | WARN_ON(caifd == NULL); | |
134 | if (caifd == NULL) | |
135 | return; | |
136 | ||
0e4c7d85 | 137 | caifd_hold(caifd); |
138 | rcu_read_unlock(); | |
139 | ||
140 | spin_lock_bh(&caifd->flow_lock); | |
141 | send_xoff = caifd->xoff; | |
142 | caifd->xoff = 0; | |
59f608d8 | 143 | dtor = caifd->xoff_skb_dtor; |
144 | ||
145 | if (WARN_ON(caifd->xoff_skb != skb)) | |
146 | skb = NULL; | |
147 | ||
148 | caifd->xoff_skb = NULL; | |
149 | caifd->xoff_skb_dtor = NULL; | |
150 | ||
0e4c7d85 | 151 | spin_unlock_bh(&caifd->flow_lock); |
152 | ||
59f608d8 | 153 | if (dtor && skb) |
7d311304 | 154 | dtor(skb); |
155 | ||
0e4c7d85 | 156 | if (send_xoff) |
157 | caifd->layer.up-> | |
158 | ctrlcmd(caifd->layer.up, | |
159 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, | |
160 | caifd->layer.id); | |
161 | caifd_put(caifd); | |
162 | } | |
163 | ||
c72dfae2 SB |
164 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) |
165 | { | |
0e4c7d85 | 166 | int err, high = 0, qlen = 0; |
c72dfae2 SB |
167 | struct caif_device_entry *caifd = |
168 | container_of(layer, struct caif_device_entry, layer); | |
4dd820c0 | 169 | struct sk_buff *skb; |
0e4c7d85 | 170 | struct netdev_queue *txq; |
171 | ||
172 | rcu_read_lock_bh(); | |
4dd820c0 | 173 | |
c72dfae2 SB |
174 | skb = cfpkt_tonative(pkt); |
175 | skb->dev = caifd->netdev; | |
7c18d220 | 176 | skb_reset_network_header(skb); |
177 | skb->protocol = htons(ETH_P_CAIF); | |
0e4c7d85 | 178 | |
179 | /* Check if we need to handle xoff */ | |
4676a152 | 180 | if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE)) |
0e4c7d85 | 181 | goto noxoff; |
182 | ||
183 | if (unlikely(caifd->xoff)) | |
184 | goto noxoff; | |
185 | ||
186 | if (likely(!netif_queue_stopped(caifd->netdev))) { | |
187 | /* If we run with a TX queue, check if the queue is too long*/ | |
188 | txq = netdev_get_tx_queue(skb->dev, 0); | |
189 | qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); | |
190 | ||
191 | if (likely(qlen == 0)) | |
192 | goto noxoff; | |
193 | ||
194 | high = (caifd->netdev->tx_queue_len * q_high) / 100; | |
195 | if (likely(qlen < high)) | |
196 | goto noxoff; | |
197 | } | |
198 | ||
199 | /* Hold lock while accessing xoff */ | |
200 | spin_lock_bh(&caifd->flow_lock); | |
201 | if (caifd->xoff) { | |
202 | spin_unlock_bh(&caifd->flow_lock); | |
203 | goto noxoff; | |
204 | } | |
205 | ||
206 | /* | |
207 | * Handle flow off, we do this by temporary hi-jacking this | |
208 | * skb's destructor function, and replace it with our own | |
209 | * flow-on callback. The callback will set flow-on and call | |
210 | * the original destructor. | |
211 | */ | |
212 | ||
213 | pr_debug("queue has stopped(%d) or is full (%d > %d)\n", | |
214 | netif_queue_stopped(caifd->netdev), | |
215 | qlen, high); | |
216 | caifd->xoff = 1; | |
7d311304 | 217 | caifd->xoff_skb = skb; |
218 | caifd->xoff_skb_dtor = skb->destructor; | |
219 | skb->destructor = caif_flow_cb; | |
0e4c7d85 | 220 | spin_unlock_bh(&caifd->flow_lock); |
0e4c7d85 | 221 | |
222 | caifd->layer.up->ctrlcmd(caifd->layer.up, | |
223 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | |
224 | caifd->layer.id); | |
225 | noxoff: | |
226 | rcu_read_unlock_bh(); | |
4dd820c0 | 227 | |
c85c2951 | 228 | err = dev_queue_xmit(skb); |
229 | if (err > 0) | |
230 | err = -EIO; | |
c72dfae2 | 231 | |
c85c2951 | 232 | return err; |
c72dfae2 SB |
233 | } |
234 | ||
c72dfae2 | 235 | /* |
bd30ce4b | 236 | * Stuff received packets into the CAIF stack. |
c72dfae2 SB |
237 | * On error, returns non-zero and releases the skb. |
238 | */ | |
239 | static int receive(struct sk_buff *skb, struct net_device *dev, | |
240 | struct packet_type *pkttype, struct net_device *orig_dev) | |
241 | { | |
c72dfae2 SB |
242 | struct cfpkt *pkt; |
243 | struct caif_device_entry *caifd; | |
69c867c9 | 244 | int err; |
bd30ce4b | 245 | |
c72dfae2 | 246 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); |
bd30ce4b | 247 | |
248 | rcu_read_lock(); | |
c72dfae2 | 249 | caifd = caif_get(dev); |
c72dfae2 | 250 | |
bd30ce4b | 251 | if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || |
252 | !netif_oper_up(caifd->netdev)) { | |
253 | rcu_read_unlock(); | |
254 | kfree_skb(skb); | |
c72dfae2 | 255 | return NET_RX_DROP; |
bd30ce4b | 256 | } |
257 | ||
258 | /* Hold reference to netdevice while using CAIF stack */ | |
259 | caifd_hold(caifd); | |
260 | rcu_read_unlock(); | |
c72dfae2 | 261 | |
69c867c9 | 262 | err = caifd->layer.up->receive(caifd->layer.up, pkt); |
263 | ||
264 | /* For -EILSEQ the packet is not freed so so it now */ | |
265 | if (err == -EILSEQ) | |
266 | cfpkt_destroy(pkt); | |
bd30ce4b | 267 | |
268 | /* Release reference to stack upwards */ | |
269 | caifd_put(caifd); | |
7c18d220 | 270 | |
271 | if (err != 0) | |
272 | err = NET_RX_DROP; | |
273 | return err; | |
c72dfae2 SB |
274 | } |
275 | ||
276 | static struct packet_type caif_packet_type __read_mostly = { | |
277 | .type = cpu_to_be16(ETH_P_CAIF), | |
278 | .func = receive, | |
279 | }; | |
280 | ||
281 | static void dev_flowctrl(struct net_device *dev, int on) | |
282 | { | |
bd30ce4b | 283 | struct caif_device_entry *caifd; |
284 | ||
285 | rcu_read_lock(); | |
286 | ||
287 | caifd = caif_get(dev); | |
288 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { | |
289 | rcu_read_unlock(); | |
c72dfae2 | 290 | return; |
bd30ce4b | 291 | } |
292 | ||
293 | caifd_hold(caifd); | |
294 | rcu_read_unlock(); | |
c72dfae2 SB |
295 | |
296 | caifd->layer.up->ctrlcmd(caifd->layer.up, | |
297 | on ? | |
298 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : | |
299 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | |
300 | caifd->layer.id); | |
bd30ce4b | 301 | caifd_put(caifd); |
c72dfae2 SB |
302 | } |
303 | ||
7c18d220 | 304 | void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
3bffc475 SMP |
305 | struct cflayer *link_support, int head_room, |
306 | struct cflayer **layer, | |
307 | int (**rcv_func)(struct sk_buff *, struct net_device *, | |
308 | struct packet_type *, | |
309 | struct net_device *)) | |
7c18d220 | 310 | { |
311 | struct caif_device_entry *caifd; | |
312 | enum cfcnfg_phy_preference pref; | |
313 | struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); | |
314 | struct caif_device_entry_list *caifdevs; | |
315 | ||
316 | caifdevs = caif_device_list(dev_net(dev)); | |
7c18d220 | 317 | caifd = caif_device_alloc(dev); |
318 | if (!caifd) | |
319 | return; | |
320 | *layer = &caifd->layer; | |
0e4c7d85 | 321 | spin_lock_init(&caifd->flow_lock); |
7c18d220 | 322 | |
323 | switch (caifdev->link_select) { | |
324 | case CAIF_LINK_HIGH_BANDW: | |
325 | pref = CFPHYPREF_HIGH_BW; | |
326 | break; | |
327 | case CAIF_LINK_LOW_LATENCY: | |
328 | pref = CFPHYPREF_LOW_LAT; | |
329 | break; | |
330 | default: | |
331 | pref = CFPHYPREF_HIGH_BW; | |
332 | break; | |
333 | } | |
334 | mutex_lock(&caifdevs->lock); | |
335 | list_add_rcu(&caifd->list, &caifdevs->list); | |
336 | ||
337 | strncpy(caifd->layer.name, dev->name, | |
338 | sizeof(caifd->layer.name) - 1); | |
339 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; | |
340 | caifd->layer.transmit = transmit; | |
341 | cfcnfg_add_phy_layer(cfg, | |
342 | dev, | |
343 | &caifd->layer, | |
344 | pref, | |
345 | link_support, | |
346 | caifdev->use_fcs, | |
347 | head_room); | |
348 | mutex_unlock(&caifdevs->lock); | |
349 | if (rcv_func) | |
350 | *rcv_func = receive; | |
351 | } | |
7ad65bf6 | 352 | EXPORT_SYMBOL(caif_enroll_dev); |
7c18d220 | 353 | |
c72dfae2 SB |
354 | /* notify Caif of device events */ |
355 | static int caif_device_notify(struct notifier_block *me, unsigned long what, | |
351638e7 | 356 | void *ptr) |
c72dfae2 | 357 | { |
351638e7 | 358 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
c72dfae2 SB |
359 | struct caif_device_entry *caifd = NULL; |
360 | struct caif_dev_common *caifdev; | |
bee925db | 361 | struct cfcnfg *cfg; |
7c18d220 | 362 | struct cflayer *layer, *link_support; |
363 | int head_room = 0; | |
08613e46 | 364 | struct caif_device_entry_list *caifdevs; |
c72dfae2 | 365 | |
bee925db | 366 | cfg = get_cfcnfg(dev_net(dev)); |
7c18d220 | 367 | caifdevs = caif_device_list(dev_net(dev)); |
bee925db | 368 | |
7c18d220 | 369 | caifd = caif_get(dev); |
370 | if (caifd == NULL && dev->type != ARPHRD_CAIF) | |
371 | return 0; | |
08613e46 | 372 | |
c72dfae2 SB |
373 | switch (what) { |
374 | case NETDEV_REGISTER: | |
7c18d220 | 375 | if (caifd != NULL) |
376 | break; | |
bd30ce4b | 377 | |
c72dfae2 | 378 | caifdev = netdev_priv(dev); |
c72dfae2 | 379 | |
7c18d220 | 380 | link_support = NULL; |
381 | if (caifdev->use_frag) { | |
382 | head_room = 1; | |
383 | link_support = cfserl_create(dev->ifindex, | |
e977b4cf | 384 | caifdev->use_stx); |
7c18d220 | 385 | if (!link_support) { |
386 | pr_warn("Out of memory\n"); | |
387 | break; | |
388 | } | |
c72dfae2 | 389 | } |
7c18d220 | 390 | caif_enroll_dev(dev, caifdev, link_support, head_room, |
391 | &layer, NULL); | |
392 | caifdev->flowctrl = dev_flowctrl; | |
c72dfae2 SB |
393 | break; |
394 | ||
bd30ce4b | 395 | case NETDEV_UP: |
396 | rcu_read_lock(); | |
397 | ||
c72dfae2 | 398 | caifd = caif_get(dev); |
bd30ce4b | 399 | if (caifd == NULL) { |
400 | rcu_read_unlock(); | |
c72dfae2 | 401 | break; |
bd30ce4b | 402 | } |
c72dfae2 | 403 | |
0e4c7d85 | 404 | caifd->xoff = 0; |
bd30ce4b | 405 | cfcnfg_set_phy_state(cfg, &caifd->layer, true); |
406 | rcu_read_unlock(); | |
c72dfae2 | 407 | |
c72dfae2 SB |
408 | break; |
409 | ||
410 | case NETDEV_DOWN: | |
bd30ce4b | 411 | rcu_read_lock(); |
412 | ||
c72dfae2 | 413 | caifd = caif_get(dev); |
bd30ce4b | 414 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { |
415 | rcu_read_unlock(); | |
416 | return -EINVAL; | |
417 | } | |
418 | ||
419 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); | |
420 | caifd_hold(caifd); | |
421 | rcu_read_unlock(); | |
422 | ||
423 | caifd->layer.up->ctrlcmd(caifd->layer.up, | |
424 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, | |
425 | caifd->layer.id); | |
7d311304 | 426 | |
427 | spin_lock_bh(&caifd->flow_lock); | |
428 | ||
429 | /* | |
430 | * Replace our xoff-destructor with original destructor. | |
431 | * We trust that skb->destructor *always* is called before | |
432 | * the skb reference is invalid. The hijacked SKB destructor | |
433 | * takes the flow_lock so manipulating the skb->destructor here | |
434 | * should be safe. | |
435 | */ | |
436 | if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) | |
437 | caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; | |
438 | ||
439 | caifd->xoff = 0; | |
440 | caifd->xoff_skb_dtor = NULL; | |
441 | caifd->xoff_skb = NULL; | |
442 | ||
443 | spin_unlock_bh(&caifd->flow_lock); | |
bd30ce4b | 444 | caifd_put(caifd); |
c72dfae2 SB |
445 | break; |
446 | ||
447 | case NETDEV_UNREGISTER: | |
bd30ce4b | 448 | mutex_lock(&caifdevs->lock); |
449 | ||
c72dfae2 | 450 | caifd = caif_get(dev); |
bd30ce4b | 451 | if (caifd == NULL) { |
452 | mutex_unlock(&caifdevs->lock); | |
f2527ec4 | 453 | break; |
bd30ce4b | 454 | } |
455 | list_del_rcu(&caifd->list); | |
456 | ||
457 | /* | |
458 | * NETDEV_UNREGISTER is called repeatedly until all reference | |
459 | * counts for the net-device are released. If references to | |
460 | * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for | |
461 | * the next call to NETDEV_UNREGISTER. | |
462 | * | |
463 | * If any packets are in flight down the CAIF Stack, | |
464 | * cfcnfg_del_phy_layer will return nonzero. | |
465 | * If no packets are in flight, the CAIF Stack associated | |
466 | * with the net-device un-registering is freed. | |
467 | */ | |
468 | ||
469 | if (caifd_refcnt_read(caifd) != 0 || | |
470 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { | |
471 | ||
472 | pr_info("Wait for device inuse\n"); | |
473 | /* Enrole device if CAIF Stack is still in use */ | |
474 | list_add_rcu(&caifd->list, &caifdevs->list); | |
475 | mutex_unlock(&caifdevs->lock); | |
476 | break; | |
477 | } | |
478 | ||
479 | synchronize_rcu(); | |
480 | dev_put(caifd->netdev); | |
481 | free_percpu(caifd->pcpu_refcnt); | |
482 | kfree(caifd); | |
483 | ||
484 | mutex_unlock(&caifdevs->lock); | |
c72dfae2 SB |
485 | break; |
486 | } | |
487 | return 0; | |
488 | } | |
489 | ||
490 | static struct notifier_block caif_device_notifier = { | |
491 | .notifier_call = caif_device_notify, | |
492 | .priority = 0, | |
493 | }; | |
494 | ||
c72dfae2 SB |
495 | /* Per-namespace Caif devices handling */ |
496 | static int caif_init_net(struct net *net) | |
497 | { | |
498 | struct caif_net *caifn = net_generic(net, caif_net_id); | |
499 | INIT_LIST_HEAD(&caifn->caifdevs.list); | |
bd30ce4b | 500 | mutex_init(&caifn->caifdevs.lock); |
bee925db | 501 | |
502 | caifn->cfg = cfcnfg_create(); | |
f84ea779 | 503 | if (!caifn->cfg) |
bee925db | 504 | return -ENOMEM; |
bee925db | 505 | |
c72dfae2 SB |
506 | return 0; |
507 | } | |
508 | ||
509 | static void caif_exit_net(struct net *net) | |
510 | { | |
bd30ce4b | 511 | struct caif_device_entry *caifd, *tmp; |
512 | struct caif_device_entry_list *caifdevs = | |
513 | caif_device_list(net); | |
7c18d220 | 514 | struct cfcnfg *cfg = get_cfcnfg(net); |
515 | ||
c72dfae2 | 516 | rtnl_lock(); |
bd30ce4b | 517 | mutex_lock(&caifdevs->lock); |
518 | ||
519 | list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { | |
520 | int i = 0; | |
521 | list_del_rcu(&caifd->list); | |
522 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); | |
523 | ||
524 | while (i < 10 && | |
525 | (caifd_refcnt_read(caifd) != 0 || | |
526 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { | |
527 | ||
528 | pr_info("Wait for device inuse\n"); | |
529 | msleep(250); | |
530 | i++; | |
531 | } | |
532 | synchronize_rcu(); | |
533 | dev_put(caifd->netdev); | |
534 | free_percpu(caifd->pcpu_refcnt); | |
535 | kfree(caifd); | |
c72dfae2 | 536 | } |
bee925db | 537 | cfcnfg_remove(cfg); |
bd30ce4b | 538 | |
539 | mutex_unlock(&caifdevs->lock); | |
c72dfae2 SB |
540 | rtnl_unlock(); |
541 | } | |
542 | ||
543 | static struct pernet_operations caif_net_ops = { | |
544 | .init = caif_init_net, | |
545 | .exit = caif_exit_net, | |
546 | .id = &caif_net_id, | |
547 | .size = sizeof(struct caif_net), | |
548 | }; | |
549 | ||
550 | /* Initialize Caif devices list */ | |
551 | static int __init caif_device_init(void) | |
552 | { | |
553 | int result; | |
bd30ce4b | 554 | |
8a8ee9af | 555 | result = register_pernet_subsys(&caif_net_ops); |
c72dfae2 | 556 | |
bee925db | 557 | if (result) |
c72dfae2 | 558 | return result; |
bee925db | 559 | |
c72dfae2 | 560 | register_netdevice_notifier(&caif_device_notifier); |
bee925db | 561 | dev_add_pack(&caif_packet_type); |
c72dfae2 SB |
562 | |
563 | return result; | |
c72dfae2 SB |
564 | } |
565 | ||
566 | static void __exit caif_device_exit(void) | |
567 | { | |
c72dfae2 | 568 | unregister_netdevice_notifier(&caif_device_notifier); |
bee925db | 569 | dev_remove_pack(&caif_packet_type); |
96f80d12 | 570 | unregister_pernet_subsys(&caif_net_ops); |
c72dfae2 SB |
571 | } |
572 | ||
573 | module_init(caif_device_init); | |
574 | module_exit(caif_device_exit); |