]>
Commit | Line | Data |
---|---|---|
b97bf3fd PL |
1 | /* |
2 | * net/tipc/bcast.c: TIPC broadcast code | |
c4307285 | 3 | * |
593a5f22 | 4 | * Copyright (c) 2004-2006, Ericsson AB |
b97bf3fd | 5 | * Copyright (c) 2004, Intel Corporation. |
2d627b92 | 6 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
b97bf3fd PL |
7 | * All rights reserved. |
8 | * | |
9ea1fd3c | 9 | * Redistribution and use in source and binary forms, with or without |
b97bf3fd PL |
10 | * modification, are permitted provided that the following conditions are met: |
11 | * | |
9ea1fd3c PL |
12 | * 1. Redistributions of source code must retain the above copyright |
13 | * notice, this list of conditions and the following disclaimer. | |
14 | * 2. Redistributions in binary form must reproduce the above copyright | |
15 | * notice, this list of conditions and the following disclaimer in the | |
16 | * documentation and/or other materials provided with the distribution. | |
17 | * 3. Neither the names of the copyright holders nor the names of its | |
18 | * contributors may be used to endorse or promote products derived from | |
19 | * this software without specific prior written permission. | |
b97bf3fd | 20 | * |
9ea1fd3c PL |
21 | * Alternatively, this software may be distributed under the terms of the |
22 | * GNU General Public License ("GPL") version 2 as published by the Free | |
23 | * Software Foundation. | |
24 | * | |
25 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
26 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
27 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
28 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
29 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
32 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
33 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
b97bf3fd PL |
35 | * POSSIBILITY OF SUCH DAMAGE. |
36 | */ | |
37 | ||
38 | #include "core.h" | |
b97bf3fd | 39 | #include "link.h" |
b97bf3fd | 40 | #include "port.h" |
b97bf3fd PL |
41 | #include "bcast.h" |
42 | ||
b97bf3fd PL |
43 | #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ |
44 | ||
45 | #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ | |
46 | ||
d356eeba AS |
47 | /* |
48 | * Loss rate for incoming broadcast frames; used to test retransmission code. | |
49 | * Set to N to cause every N'th frame to be discarded; 0 => don't discard any. | |
50 | */ | |
c4307285 | 51 | |
d356eeba AS |
52 | #define TIPC_BCAST_LOSS_RATE 0 |
53 | ||
b97bf3fd PL |
54 | /** |
55 | * struct bcbearer_pair - a pair of bearers used by broadcast link | |
56 | * @primary: pointer to primary bearer | |
57 | * @secondary: pointer to secondary bearer | |
c4307285 YH |
58 | * |
59 | * Bearers must have same priority and same set of reachable destinations | |
b97bf3fd PL |
60 | * to be paired. |
61 | */ | |
62 | ||
63 | struct bcbearer_pair { | |
2d627b92 AS |
64 | struct tipc_bearer *primary; |
65 | struct tipc_bearer *secondary; | |
b97bf3fd PL |
66 | }; |
67 | ||
68 | /** | |
69 | * struct bcbearer - bearer used by broadcast link | |
70 | * @bearer: (non-standard) broadcast bearer structure | |
71 | * @media: (non-standard) broadcast media structure | |
72 | * @bpairs: array of bearer pairs | |
65f51ef0 AS |
73 | * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() |
74 | * @remains: temporary node map used by tipc_bcbearer_send() | |
75 | * @remains_new: temporary node map used tipc_bcbearer_send() | |
c4307285 | 76 | * |
65f51ef0 AS |
77 | * Note: The fields labelled "temporary" are incorporated into the bearer |
78 | * to avoid consuming potentially limited stack space through the use of | |
79 | * large local variables within multicast routines. Concurrent access is | |
80 | * prevented through use of the spinlock "bc_lock". | |
b97bf3fd PL |
81 | */ |
82 | ||
83 | struct bcbearer { | |
2d627b92 | 84 | struct tipc_bearer bearer; |
b97bf3fd PL |
85 | struct media media; |
86 | struct bcbearer_pair bpairs[MAX_BEARERS]; | |
16cb4b33 | 87 | struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1]; |
6c00055a DM |
88 | struct tipc_node_map remains; |
89 | struct tipc_node_map remains_new; | |
b97bf3fd PL |
90 | }; |
91 | ||
92 | /** | |
93 | * struct bclink - link used for broadcast messages | |
94 | * @link: (non-standard) broadcast link structure | |
95 | * @node: (non-standard) node structure representing b'cast link's peer node | |
c4307285 | 96 | * |
b97bf3fd PL |
97 | * Handles sequence numbering, fragmentation, bundling, etc. |
98 | */ | |
99 | ||
100 | struct bclink { | |
101 | struct link link; | |
6c00055a | 102 | struct tipc_node node; |
b97bf3fd PL |
103 | }; |
104 | ||
105 | ||
e3ec9c7d AS |
106 | static struct bcbearer *bcbearer; |
107 | static struct bclink *bclink; | |
108 | static struct link *bcl; | |
34af946a | 109 | static DEFINE_SPINLOCK(bc_lock); |
b97bf3fd | 110 | |
8f92df6a AS |
111 | /* broadcast-capable node map */ |
112 | struct tipc_node_map tipc_bcast_nmap; | |
113 | ||
3aec9cc9 | 114 | const char tipc_bclink_name[] = "broadcast-link"; |
b97bf3fd | 115 | |
31e3c3f6 | 116 | static void tipc_nmap_diff(struct tipc_node_map *nm_a, |
117 | struct tipc_node_map *nm_b, | |
118 | struct tipc_node_map *nm_diff); | |
b97bf3fd | 119 | |
05790c64 | 120 | static u32 buf_seqno(struct sk_buff *buf) |
b97bf3fd PL |
121 | { |
122 | return msg_seqno(buf_msg(buf)); | |
c4307285 | 123 | } |
b97bf3fd | 124 | |
05790c64 | 125 | static u32 bcbuf_acks(struct sk_buff *buf) |
b97bf3fd | 126 | { |
880b005f | 127 | return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; |
b97bf3fd PL |
128 | } |
129 | ||
05790c64 | 130 | static void bcbuf_set_acks(struct sk_buff *buf, u32 acks) |
b97bf3fd | 131 | { |
880b005f | 132 | TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks; |
b97bf3fd PL |
133 | } |
134 | ||
05790c64 | 135 | static void bcbuf_decr_acks(struct sk_buff *buf) |
b97bf3fd PL |
136 | { |
137 | bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); | |
138 | } | |
139 | ||
140 | ||
5b1f7bde AS |
141 | static void bclink_set_last_sent(void) |
142 | { | |
143 | if (bcl->next_out) | |
144 | bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); | |
145 | else | |
146 | bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); | |
147 | } | |
148 | ||
149 | u32 tipc_bclink_get_last_sent(void) | |
150 | { | |
151 | return bcl->fsm_msg_cnt; | |
152 | } | |
153 | ||
c4307285 | 154 | /** |
b97bf3fd | 155 | * bclink_set_gap - set gap according to contents of current deferred pkt queue |
c4307285 | 156 | * |
b97bf3fd PL |
157 | * Called with 'node' locked, bc_lock unlocked |
158 | */ | |
159 | ||
6c00055a | 160 | static void bclink_set_gap(struct tipc_node *n_ptr) |
b97bf3fd PL |
161 | { |
162 | struct sk_buff *buf = n_ptr->bclink.deferred_head; | |
163 | ||
164 | n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = | |
165 | mod(n_ptr->bclink.last_in); | |
166 | if (unlikely(buf != NULL)) | |
167 | n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1); | |
168 | } | |
169 | ||
c4307285 | 170 | /** |
b97bf3fd | 171 | * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment |
c4307285 | 172 | * |
b97bf3fd PL |
173 | * This mechanism endeavours to prevent all nodes in network from trying |
174 | * to ACK or NACK at the same time. | |
c4307285 | 175 | * |
b97bf3fd | 176 | * Note: TIPC uses a different trigger to distribute ACKs than it does to |
c4307285 | 177 | * distribute NACKs, but tries to use the same spacing (divide by 16). |
b97bf3fd PL |
178 | */ |
179 | ||
05790c64 | 180 | static int bclink_ack_allowed(u32 n) |
b97bf3fd | 181 | { |
a02cec21 | 182 | return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; |
b97bf3fd PL |
183 | } |
184 | ||
185 | ||
c4307285 | 186 | /** |
b97bf3fd PL |
187 | * bclink_retransmit_pkt - retransmit broadcast packets |
188 | * @after: sequence number of last packet to *not* retransmit | |
189 | * @to: sequence number of last packet to retransmit | |
c4307285 | 190 | * |
d356eeba | 191 | * Called with bc_lock locked |
b97bf3fd PL |
192 | */ |
193 | ||
194 | static void bclink_retransmit_pkt(u32 after, u32 to) | |
195 | { | |
196 | struct sk_buff *buf; | |
197 | ||
b97bf3fd | 198 | buf = bcl->first_out; |
a016892c | 199 | while (buf && less_eq(buf_seqno(buf), after)) |
c4307285 | 200 | buf = buf->next; |
d356eeba | 201 | tipc_link_retransmit(bcl, buf, mod(to - after)); |
b97bf3fd PL |
202 | } |
203 | ||
c4307285 | 204 | /** |
4323add6 | 205 | * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets |
b97bf3fd PL |
206 | * @n_ptr: node that sent acknowledgement info |
207 | * @acked: broadcast sequence # that has been acknowledged | |
c4307285 | 208 | * |
b97bf3fd PL |
209 | * Node is locked, bc_lock unlocked. |
210 | */ | |
211 | ||
6c00055a | 212 | void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) |
b97bf3fd PL |
213 | { |
214 | struct sk_buff *crs; | |
215 | struct sk_buff *next; | |
216 | unsigned int released = 0; | |
217 | ||
218 | if (less_eq(acked, n_ptr->bclink.acked)) | |
219 | return; | |
220 | ||
221 | spin_lock_bh(&bc_lock); | |
222 | ||
223 | /* Skip over packets that node has previously acknowledged */ | |
224 | ||
225 | crs = bcl->first_out; | |
a016892c | 226 | while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) |
b97bf3fd | 227 | crs = crs->next; |
b97bf3fd PL |
228 | |
229 | /* Update packets that node is now acknowledging */ | |
230 | ||
231 | while (crs && less_eq(buf_seqno(crs), acked)) { | |
232 | next = crs->next; | |
233 | bcbuf_decr_acks(crs); | |
234 | if (bcbuf_acks(crs) == 0) { | |
235 | bcl->first_out = next; | |
236 | bcl->out_queue_size--; | |
237 | buf_discard(crs); | |
238 | released = 1; | |
239 | } | |
240 | crs = next; | |
241 | } | |
242 | n_ptr->bclink.acked = acked; | |
243 | ||
244 | /* Try resolving broadcast link congestion, if necessary */ | |
245 | ||
5b1f7bde | 246 | if (unlikely(bcl->next_out)) { |
4323add6 | 247 | tipc_link_push_queue(bcl); |
5b1f7bde AS |
248 | bclink_set_last_sent(); |
249 | } | |
b97bf3fd | 250 | if (unlikely(released && !list_empty(&bcl->waiting_ports))) |
4323add6 | 251 | tipc_link_wakeup_ports(bcl, 0); |
b97bf3fd PL |
252 | spin_unlock_bh(&bc_lock); |
253 | } | |
254 | ||
c4307285 | 255 | /** |
b97bf3fd | 256 | * bclink_send_ack - unicast an ACK msg |
c4307285 | 257 | * |
4323add6 | 258 | * tipc_net_lock and node lock set |
b97bf3fd PL |
259 | */ |
260 | ||
6c00055a | 261 | static void bclink_send_ack(struct tipc_node *n_ptr) |
b97bf3fd PL |
262 | { |
263 | struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; | |
264 | ||
265 | if (l_ptr != NULL) | |
4323add6 | 266 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); |
b97bf3fd PL |
267 | } |
268 | ||
c4307285 | 269 | /** |
b97bf3fd | 270 | * bclink_send_nack- broadcast a NACK msg |
c4307285 | 271 | * |
4323add6 | 272 | * tipc_net_lock and node lock set |
b97bf3fd PL |
273 | */ |
274 | ||
6c00055a | 275 | static void bclink_send_nack(struct tipc_node *n_ptr) |
b97bf3fd PL |
276 | { |
277 | struct sk_buff *buf; | |
278 | struct tipc_msg *msg; | |
279 | ||
280 | if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) | |
281 | return; | |
282 | ||
31e3c3f6 | 283 | buf = tipc_buf_acquire(INT_H_SIZE); |
b97bf3fd PL |
284 | if (buf) { |
285 | msg = buf_msg(buf); | |
c68ca7b7 | 286 | tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, |
75715217 | 287 | INT_H_SIZE, n_ptr->addr); |
b97bf3fd | 288 | msg_set_mc_netid(msg, tipc_net_id); |
c4307285 | 289 | msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); |
b97bf3fd PL |
290 | msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); |
291 | msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); | |
292 | msg_set_bcast_tag(msg, tipc_own_tag); | |
293 | ||
1fc54d8f | 294 | if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) { |
b97bf3fd PL |
295 | bcl->stats.sent_nacks++; |
296 | buf_discard(buf); | |
297 | } else { | |
4323add6 | 298 | tipc_bearer_schedule(bcl->b_ptr, bcl); |
b97bf3fd PL |
299 | bcl->proto_msg_queue = buf; |
300 | bcl->stats.bearer_congs++; | |
301 | } | |
302 | ||
c4307285 | 303 | /* |
b97bf3fd PL |
304 | * Ensure we doesn't send another NACK msg to the node |
305 | * until 16 more deferred messages arrive from it | |
306 | * (i.e. helps prevent all nodes from NACK'ing at same time) | |
307 | */ | |
c4307285 | 308 | |
b97bf3fd PL |
309 | n_ptr->bclink.nack_sync = tipc_own_tag; |
310 | } | |
311 | } | |
312 | ||
c4307285 | 313 | /** |
4323add6 | 314 | * tipc_bclink_check_gap - send a NACK if a sequence gap exists |
b97bf3fd | 315 | * |
4323add6 | 316 | * tipc_net_lock and node lock set |
b97bf3fd PL |
317 | */ |
318 | ||
6c00055a | 319 | void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent) |
b97bf3fd PL |
320 | { |
321 | if (!n_ptr->bclink.supported || | |
322 | less_eq(last_sent, mod(n_ptr->bclink.last_in))) | |
323 | return; | |
324 | ||
325 | bclink_set_gap(n_ptr); | |
326 | if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to) | |
327 | n_ptr->bclink.gap_to = last_sent; | |
328 | bclink_send_nack(n_ptr); | |
329 | } | |
330 | ||
c4307285 | 331 | /** |
4323add6 | 332 | * tipc_bclink_peek_nack - process a NACK msg meant for another node |
c4307285 | 333 | * |
4323add6 | 334 | * Only tipc_net_lock set. |
b97bf3fd PL |
335 | */ |
336 | ||
988f088a | 337 | static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) |
b97bf3fd | 338 | { |
6c00055a | 339 | struct tipc_node *n_ptr = tipc_node_find(dest); |
b97bf3fd PL |
340 | u32 my_after, my_to; |
341 | ||
4323add6 | 342 | if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) |
b97bf3fd | 343 | return; |
4323add6 | 344 | tipc_node_lock(n_ptr); |
b97bf3fd PL |
345 | /* |
346 | * Modify gap to suppress unnecessary NACKs from this node | |
347 | */ | |
348 | my_after = n_ptr->bclink.gap_after; | |
349 | my_to = n_ptr->bclink.gap_to; | |
350 | ||
351 | if (less_eq(gap_after, my_after)) { | |
352 | if (less(my_after, gap_to) && less(gap_to, my_to)) | |
353 | n_ptr->bclink.gap_after = gap_to; | |
354 | else if (less_eq(my_to, gap_to)) | |
355 | n_ptr->bclink.gap_to = n_ptr->bclink.gap_after; | |
356 | } else if (less_eq(gap_after, my_to)) { | |
357 | if (less_eq(my_to, gap_to)) | |
358 | n_ptr->bclink.gap_to = gap_after; | |
359 | } else { | |
c4307285 | 360 | /* |
b97bf3fd PL |
361 | * Expand gap if missing bufs not in deferred queue: |
362 | */ | |
363 | struct sk_buff *buf = n_ptr->bclink.deferred_head; | |
364 | u32 prev = n_ptr->bclink.gap_to; | |
365 | ||
366 | for (; buf; buf = buf->next) { | |
367 | u32 seqno = buf_seqno(buf); | |
368 | ||
3ac90216 | 369 | if (mod(seqno - prev) != 1) { |
b97bf3fd | 370 | buf = NULL; |
3ac90216 ES |
371 | break; |
372 | } | |
b97bf3fd PL |
373 | if (seqno == gap_after) |
374 | break; | |
375 | prev = seqno; | |
376 | } | |
377 | if (buf == NULL) | |
378 | n_ptr->bclink.gap_to = gap_after; | |
379 | } | |
380 | /* | |
381 | * Some nodes may send a complementary NACK now: | |
c4307285 | 382 | */ |
b97bf3fd PL |
383 | if (bclink_ack_allowed(sender_tag + 1)) { |
384 | if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) { | |
385 | bclink_send_nack(n_ptr); | |
386 | bclink_set_gap(n_ptr); | |
387 | } | |
388 | } | |
4323add6 | 389 | tipc_node_unlock(n_ptr); |
b97bf3fd PL |
390 | } |
391 | ||
392 | /** | |
4323add6 | 393 | * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster |
b97bf3fd PL |
394 | */ |
395 | ||
4323add6 | 396 | int tipc_bclink_send_msg(struct sk_buff *buf) |
b97bf3fd PL |
397 | { |
398 | int res; | |
399 | ||
400 | spin_lock_bh(&bc_lock); | |
401 | ||
4323add6 | 402 | res = tipc_link_send_buf(bcl, buf); |
b97bf3fd PL |
403 | if (unlikely(res == -ELINKCONG)) |
404 | buf_discard(buf); | |
0048b826 | 405 | else |
5b1f7bde | 406 | bclink_set_last_sent(); |
b97bf3fd PL |
407 | |
408 | if (bcl->out_queue_size > bcl->stats.max_queue_sz) | |
409 | bcl->stats.max_queue_sz = bcl->out_queue_size; | |
410 | bcl->stats.queue_sz_counts++; | |
411 | bcl->stats.accu_queue_sz += bcl->out_queue_size; | |
412 | ||
413 | spin_unlock_bh(&bc_lock); | |
414 | return res; | |
415 | } | |
416 | ||
417 | /** | |
4323add6 | 418 | * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards |
c4307285 | 419 | * |
4323add6 | 420 | * tipc_net_lock is read_locked, no other locks set |
b97bf3fd PL |
421 | */ |
422 | ||
4323add6 | 423 | void tipc_bclink_recv_pkt(struct sk_buff *buf) |
d356eeba AS |
424 | { |
425 | #if (TIPC_BCAST_LOSS_RATE) | |
e3ec9c7d | 426 | static int rx_count; |
d356eeba | 427 | #endif |
b97bf3fd | 428 | struct tipc_msg *msg = buf_msg(buf); |
0e65967e | 429 | struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); |
b97bf3fd PL |
430 | u32 next_in; |
431 | u32 seqno; | |
432 | struct sk_buff *deferred; | |
433 | ||
c4307285 | 434 | if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || |
b97bf3fd PL |
435 | (msg_mc_netid(msg) != tipc_net_id))) { |
436 | buf_discard(buf); | |
437 | return; | |
438 | } | |
439 | ||
440 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { | |
b97bf3fd | 441 | if (msg_destnode(msg) == tipc_own_addr) { |
4323add6 PL |
442 | tipc_node_lock(node); |
443 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); | |
444 | tipc_node_unlock(node); | |
d356eeba | 445 | spin_lock_bh(&bc_lock); |
b97bf3fd | 446 | bcl->stats.recv_nacks++; |
d356eeba | 447 | bcl->owner->next = node; /* remember requestor */ |
b97bf3fd PL |
448 | bclink_retransmit_pkt(msg_bcgap_after(msg), |
449 | msg_bcgap_to(msg)); | |
d356eeba | 450 | bcl->owner->next = NULL; |
c4307285 | 451 | spin_unlock_bh(&bc_lock); |
b97bf3fd | 452 | } else { |
4323add6 | 453 | tipc_bclink_peek_nack(msg_destnode(msg), |
988f088a AB |
454 | msg_bcast_tag(msg), |
455 | msg_bcgap_after(msg), | |
456 | msg_bcgap_to(msg)); | |
b97bf3fd PL |
457 | } |
458 | buf_discard(buf); | |
459 | return; | |
460 | } | |
461 | ||
d356eeba AS |
462 | #if (TIPC_BCAST_LOSS_RATE) |
463 | if (++rx_count == TIPC_BCAST_LOSS_RATE) { | |
464 | rx_count = 0; | |
465 | buf_discard(buf); | |
466 | return; | |
467 | } | |
468 | #endif | |
469 | ||
4323add6 | 470 | tipc_node_lock(node); |
b97bf3fd PL |
471 | receive: |
472 | deferred = node->bclink.deferred_head; | |
473 | next_in = mod(node->bclink.last_in + 1); | |
474 | seqno = msg_seqno(msg); | |
475 | ||
476 | if (likely(seqno == next_in)) { | |
477 | bcl->stats.recv_info++; | |
478 | node->bclink.last_in++; | |
479 | bclink_set_gap(node); | |
480 | if (unlikely(bclink_ack_allowed(seqno))) { | |
481 | bclink_send_ack(node); | |
482 | bcl->stats.sent_acks++; | |
483 | } | |
484 | if (likely(msg_isdata(msg))) { | |
4323add6 PL |
485 | tipc_node_unlock(node); |
486 | tipc_port_recv_mcast(buf, NULL); | |
b97bf3fd PL |
487 | } else if (msg_user(msg) == MSG_BUNDLER) { |
488 | bcl->stats.recv_bundles++; | |
489 | bcl->stats.recv_bundled += msg_msgcnt(msg); | |
4323add6 PL |
490 | tipc_node_unlock(node); |
491 | tipc_link_recv_bundle(buf); | |
b97bf3fd PL |
492 | } else if (msg_user(msg) == MSG_FRAGMENTER) { |
493 | bcl->stats.recv_fragments++; | |
4323add6 PL |
494 | if (tipc_link_recv_fragment(&node->bclink.defragm, |
495 | &buf, &msg)) | |
b97bf3fd | 496 | bcl->stats.recv_fragmented++; |
4323add6 PL |
497 | tipc_node_unlock(node); |
498 | tipc_net_route_msg(buf); | |
b97bf3fd | 499 | } else { |
4323add6 PL |
500 | tipc_node_unlock(node); |
501 | tipc_net_route_msg(buf); | |
b97bf3fd PL |
502 | } |
503 | if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { | |
4323add6 | 504 | tipc_node_lock(node); |
b97bf3fd PL |
505 | buf = deferred; |
506 | msg = buf_msg(buf); | |
507 | node->bclink.deferred_head = deferred->next; | |
508 | goto receive; | |
509 | } | |
510 | return; | |
511 | } else if (less(next_in, seqno)) { | |
512 | u32 gap_after = node->bclink.gap_after; | |
513 | u32 gap_to = node->bclink.gap_to; | |
514 | ||
4323add6 PL |
515 | if (tipc_link_defer_pkt(&node->bclink.deferred_head, |
516 | &node->bclink.deferred_tail, | |
517 | buf)) { | |
b97bf3fd PL |
518 | node->bclink.nack_sync++; |
519 | bcl->stats.deferred_recv++; | |
520 | if (seqno == mod(gap_after + 1)) | |
521 | node->bclink.gap_after = seqno; | |
522 | else if (less(gap_after, seqno) && less(seqno, gap_to)) | |
523 | node->bclink.gap_to = seqno; | |
524 | } | |
525 | if (bclink_ack_allowed(node->bclink.nack_sync)) { | |
526 | if (gap_to != gap_after) | |
527 | bclink_send_nack(node); | |
528 | bclink_set_gap(node); | |
529 | } | |
530 | } else { | |
531 | bcl->stats.duplicates++; | |
532 | buf_discard(buf); | |
533 | } | |
4323add6 | 534 | tipc_node_unlock(node); |
b97bf3fd PL |
535 | } |
536 | ||
6c00055a | 537 | u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) |
b97bf3fd PL |
538 | { |
539 | return (n_ptr->bclink.supported && | |
4323add6 | 540 | (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); |
b97bf3fd PL |
541 | } |
542 | ||
543 | ||
544 | /** | |
4323add6 | 545 | * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer |
c4307285 | 546 | * |
b97bf3fd PL |
547 | * Send through as many bearers as necessary to reach all nodes |
548 | * that support TIPC multicasting. | |
c4307285 | 549 | * |
b97bf3fd PL |
550 | * Returns 0 if packet sent successfully, non-zero if not |
551 | */ | |
552 | ||
988f088a AB |
553 | static int tipc_bcbearer_send(struct sk_buff *buf, |
554 | struct tipc_bearer *unused1, | |
555 | struct tipc_media_addr *unused2) | |
b97bf3fd | 556 | { |
b97bf3fd | 557 | int bp_index; |
b97bf3fd PL |
558 | |
559 | /* Prepare buffer for broadcasting (if first time trying to send it) */ | |
560 | ||
561 | if (likely(!msg_non_seq(buf_msg(buf)))) { | |
562 | struct tipc_msg *msg; | |
563 | ||
8f92df6a AS |
564 | assert(tipc_bcast_nmap.count != 0); |
565 | bcbuf_set_acks(buf, tipc_bcast_nmap.count); | |
b97bf3fd | 566 | msg = buf_msg(buf); |
40aecb1b | 567 | msg_set_non_seq(msg, 1); |
b97bf3fd | 568 | msg_set_mc_netid(msg, tipc_net_id); |
0048b826 | 569 | bcl->stats.sent_info++; |
b97bf3fd PL |
570 | } |
571 | ||
b97bf3fd | 572 | /* Send buffer over bearers until all targets reached */ |
c4307285 | 573 | |
8f92df6a | 574 | bcbearer->remains = tipc_bcast_nmap; |
b97bf3fd PL |
575 | |
576 | for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { | |
2d627b92 AS |
577 | struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary; |
578 | struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary; | |
b97bf3fd PL |
579 | |
580 | if (!p) | |
581 | break; /* no more bearers to try */ | |
582 | ||
65f51ef0 AS |
583 | tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new); |
584 | if (bcbearer->remains_new.count == bcbearer->remains.count) | |
b97bf3fd PL |
585 | continue; /* bearer pair doesn't add anything */ |
586 | ||
2d627b92 AS |
587 | if (p->blocked || |
588 | p->media->send_msg(buf, p, &p->media->bcast_addr)) { | |
1a624832 | 589 | /* unable to send on primary bearer */ |
2d627b92 AS |
590 | if (!s || s->blocked || |
591 | s->media->send_msg(buf, s, | |
1a624832 NH |
592 | &s->media->bcast_addr)) { |
593 | /* unable to send on either bearer */ | |
594 | continue; | |
595 | } | |
596 | } | |
597 | ||
598 | if (s) { | |
599 | bcbearer->bpairs[bp_index].primary = s; | |
600 | bcbearer->bpairs[bp_index].secondary = p; | |
b97bf3fd PL |
601 | } |
602 | ||
65f51ef0 | 603 | if (bcbearer->remains_new.count == 0) |
0e35fd5e | 604 | return 0; |
b97bf3fd | 605 | |
65f51ef0 | 606 | bcbearer->remains = bcbearer->remains_new; |
b97bf3fd | 607 | } |
c4307285 | 608 | |
7e3e5d09 AS |
609 | /* |
610 | * Unable to reach all targets (indicate success, since currently | |
611 | * there isn't code in place to properly block & unblock the | |
612 | * pseudo-bearer used by the broadcast link) | |
613 | */ | |
b97bf3fd | 614 | |
7e3e5d09 | 615 | return TIPC_OK; |
b97bf3fd PL |
616 | } |
617 | ||
618 | /** | |
4323add6 | 619 | * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer |
b97bf3fd PL |
620 | */ |
621 | ||
4323add6 | 622 | void tipc_bcbearer_sort(void) |
b97bf3fd PL |
623 | { |
624 | struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; | |
625 | struct bcbearer_pair *bp_curr; | |
626 | int b_index; | |
627 | int pri; | |
628 | ||
629 | spin_lock_bh(&bc_lock); | |
630 | ||
631 | /* Group bearers by priority (can assume max of two per priority) */ | |
632 | ||
633 | memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); | |
634 | ||
635 | for (b_index = 0; b_index < MAX_BEARERS; b_index++) { | |
2d627b92 | 636 | struct tipc_bearer *b = &tipc_bearers[b_index]; |
b97bf3fd PL |
637 | |
638 | if (!b->active || !b->nodes.count) | |
639 | continue; | |
640 | ||
641 | if (!bp_temp[b->priority].primary) | |
642 | bp_temp[b->priority].primary = b; | |
643 | else | |
644 | bp_temp[b->priority].secondary = b; | |
645 | } | |
646 | ||
647 | /* Create array of bearer pairs for broadcasting */ | |
648 | ||
649 | bp_curr = bcbearer->bpairs; | |
650 | memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); | |
651 | ||
16cb4b33 | 652 | for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) { |
b97bf3fd PL |
653 | |
654 | if (!bp_temp[pri].primary) | |
655 | continue; | |
656 | ||
657 | bp_curr->primary = bp_temp[pri].primary; | |
658 | ||
659 | if (bp_temp[pri].secondary) { | |
4323add6 PL |
660 | if (tipc_nmap_equal(&bp_temp[pri].primary->nodes, |
661 | &bp_temp[pri].secondary->nodes)) { | |
b97bf3fd PL |
662 | bp_curr->secondary = bp_temp[pri].secondary; |
663 | } else { | |
664 | bp_curr++; | |
665 | bp_curr->primary = bp_temp[pri].secondary; | |
666 | } | |
667 | } | |
668 | ||
669 | bp_curr++; | |
670 | } | |
671 | ||
672 | spin_unlock_bh(&bc_lock); | |
673 | } | |
674 | ||
675 | /** | |
4323add6 | 676 | * tipc_bcbearer_push - resolve bearer congestion |
c4307285 | 677 | * |
b97bf3fd PL |
678 | * Forces bclink to push out any unsent packets, until all packets are gone |
679 | * or congestion reoccurs. | |
680 | * No locks set when function called | |
681 | */ | |
682 | ||
4323add6 | 683 | void tipc_bcbearer_push(void) |
b97bf3fd | 684 | { |
2d627b92 | 685 | struct tipc_bearer *b_ptr; |
b97bf3fd PL |
686 | |
687 | spin_lock_bh(&bc_lock); | |
688 | b_ptr = &bcbearer->bearer; | |
2d627b92 AS |
689 | if (b_ptr->blocked) { |
690 | b_ptr->blocked = 0; | |
4323add6 | 691 | tipc_bearer_lock_push(b_ptr); |
b97bf3fd PL |
692 | } |
693 | spin_unlock_bh(&bc_lock); | |
694 | } | |
695 | ||
696 | ||
4323add6 | 697 | int tipc_bclink_stats(char *buf, const u32 buf_size) |
b97bf3fd PL |
698 | { |
699 | struct print_buf pb; | |
700 | ||
701 | if (!bcl) | |
702 | return 0; | |
703 | ||
4323add6 | 704 | tipc_printbuf_init(&pb, buf, buf_size); |
b97bf3fd PL |
705 | |
706 | spin_lock_bh(&bc_lock); | |
707 | ||
708 | tipc_printf(&pb, "Link <%s>\n" | |
c4307285 | 709 | " Window:%u packets\n", |
b97bf3fd | 710 | bcl->name, bcl->queue_limit[0]); |
c4307285 | 711 | tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", |
b97bf3fd PL |
712 | bcl->stats.recv_info, |
713 | bcl->stats.recv_fragments, | |
714 | bcl->stats.recv_fragmented, | |
715 | bcl->stats.recv_bundles, | |
716 | bcl->stats.recv_bundled); | |
c4307285 | 717 | tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", |
b97bf3fd PL |
718 | bcl->stats.sent_info, |
719 | bcl->stats.sent_fragments, | |
c4307285 | 720 | bcl->stats.sent_fragmented, |
b97bf3fd PL |
721 | bcl->stats.sent_bundles, |
722 | bcl->stats.sent_bundled); | |
c4307285 | 723 | tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n", |
b97bf3fd | 724 | bcl->stats.recv_nacks, |
c4307285 | 725 | bcl->stats.deferred_recv, |
b97bf3fd | 726 | bcl->stats.duplicates); |
c4307285 YH |
727 | tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n", |
728 | bcl->stats.sent_nacks, | |
729 | bcl->stats.sent_acks, | |
b97bf3fd PL |
730 | bcl->stats.retransmitted); |
731 | tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", | |
732 | bcl->stats.bearer_congs, | |
733 | bcl->stats.link_congs, | |
734 | bcl->stats.max_queue_sz, | |
735 | bcl->stats.queue_sz_counts | |
736 | ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts) | |
737 | : 0); | |
738 | ||
739 | spin_unlock_bh(&bc_lock); | |
4323add6 | 740 | return tipc_printbuf_validate(&pb); |
b97bf3fd PL |
741 | } |
742 | ||
4323add6 | 743 | int tipc_bclink_reset_stats(void) |
b97bf3fd PL |
744 | { |
745 | if (!bcl) | |
746 | return -ENOPROTOOPT; | |
747 | ||
748 | spin_lock_bh(&bc_lock); | |
749 | memset(&bcl->stats, 0, sizeof(bcl->stats)); | |
750 | spin_unlock_bh(&bc_lock); | |
0e35fd5e | 751 | return 0; |
b97bf3fd PL |
752 | } |
753 | ||
4323add6 | 754 | int tipc_bclink_set_queue_limits(u32 limit) |
b97bf3fd PL |
755 | { |
756 | if (!bcl) | |
757 | return -ENOPROTOOPT; | |
758 | if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) | |
759 | return -EINVAL; | |
760 | ||
761 | spin_lock_bh(&bc_lock); | |
4323add6 | 762 | tipc_link_set_queue_limits(bcl, limit); |
b97bf3fd | 763 | spin_unlock_bh(&bc_lock); |
0e35fd5e | 764 | return 0; |
b97bf3fd PL |
765 | } |
766 | ||
4323add6 | 767 | int tipc_bclink_init(void) |
b97bf3fd | 768 | { |
2710b57f ACM |
769 | bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); |
770 | bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); | |
b97bf3fd | 771 | if (!bcbearer || !bclink) { |
c4307285 | 772 | warn("Multicast link creation failed, no memory\n"); |
b97bf3fd PL |
773 | kfree(bcbearer); |
774 | bcbearer = NULL; | |
775 | kfree(bclink); | |
776 | bclink = NULL; | |
777 | return -ENOMEM; | |
778 | } | |
779 | ||
b97bf3fd PL |
780 | INIT_LIST_HEAD(&bcbearer->bearer.cong_links); |
781 | bcbearer->bearer.media = &bcbearer->media; | |
4323add6 | 782 | bcbearer->media.send_msg = tipc_bcbearer_send; |
b97bf3fd PL |
783 | sprintf(bcbearer->media.name, "tipc-multicast"); |
784 | ||
785 | bcl = &bclink->link; | |
b97bf3fd PL |
786 | INIT_LIST_HEAD(&bcl->waiting_ports); |
787 | bcl->next_out_no = 1; | |
34af946a | 788 | spin_lock_init(&bclink->node.lock); |
b97bf3fd | 789 | bcl->owner = &bclink->node; |
c4307285 | 790 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; |
4323add6 | 791 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); |
b97bf3fd PL |
792 | bcl->b_ptr = &bcbearer->bearer; |
793 | bcl->state = WORKING_WORKING; | |
4b704d59 | 794 | strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); |
b97bf3fd | 795 | |
0e35fd5e | 796 | return 0; |
b97bf3fd PL |
797 | } |
798 | ||
4323add6 | 799 | void tipc_bclink_stop(void) |
b97bf3fd PL |
800 | { |
801 | spin_lock_bh(&bc_lock); | |
802 | if (bcbearer) { | |
4323add6 | 803 | tipc_link_stop(bcl); |
b97bf3fd PL |
804 | bcl = NULL; |
805 | kfree(bclink); | |
806 | bclink = NULL; | |
807 | kfree(bcbearer); | |
808 | bcbearer = NULL; | |
809 | } | |
810 | spin_unlock_bh(&bc_lock); | |
811 | } | |
812 | ||
3e22e62b AS |
813 | |
814 | /** | |
815 | * tipc_nmap_add - add a node to a node map | |
816 | */ | |
817 | ||
818 | void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) | |
819 | { | |
820 | int n = tipc_node(node); | |
821 | int w = n / WSIZE; | |
822 | u32 mask = (1 << (n % WSIZE)); | |
823 | ||
824 | if ((nm_ptr->map[w] & mask) == 0) { | |
825 | nm_ptr->count++; | |
826 | nm_ptr->map[w] |= mask; | |
827 | } | |
828 | } | |
829 | ||
830 | /** | |
831 | * tipc_nmap_remove - remove a node from a node map | |
832 | */ | |
833 | ||
834 | void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) | |
835 | { | |
836 | int n = tipc_node(node); | |
837 | int w = n / WSIZE; | |
838 | u32 mask = (1 << (n % WSIZE)); | |
839 | ||
840 | if ((nm_ptr->map[w] & mask) != 0) { | |
841 | nm_ptr->map[w] &= ~mask; | |
842 | nm_ptr->count--; | |
843 | } | |
844 | } | |
845 | ||
846 | /** | |
847 | * tipc_nmap_diff - find differences between node maps | |
848 | * @nm_a: input node map A | |
849 | * @nm_b: input node map B | |
850 | * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) | |
851 | */ | |
852 | ||
31e3c3f6 | 853 | static void tipc_nmap_diff(struct tipc_node_map *nm_a, |
854 | struct tipc_node_map *nm_b, | |
855 | struct tipc_node_map *nm_diff) | |
3e22e62b AS |
856 | { |
857 | int stop = ARRAY_SIZE(nm_a->map); | |
858 | int w; | |
859 | int b; | |
860 | u32 map; | |
861 | ||
862 | memset(nm_diff, 0, sizeof(*nm_diff)); | |
863 | for (w = 0; w < stop; w++) { | |
864 | map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]); | |
865 | nm_diff->map[w] = map; | |
866 | if (map != 0) { | |
867 | for (b = 0 ; b < WSIZE; b++) { | |
868 | if (map & (1 << b)) | |
869 | nm_diff->count++; | |
870 | } | |
871 | } | |
872 | } | |
873 | } | |
43608edc AS |
874 | |
875 | /** | |
876 | * tipc_port_list_add - add a port to a port list, ensuring no duplicates | |
877 | */ | |
878 | ||
879 | void tipc_port_list_add(struct port_list *pl_ptr, u32 port) | |
880 | { | |
881 | struct port_list *item = pl_ptr; | |
882 | int i; | |
883 | int item_sz = PLSIZE; | |
884 | int cnt = pl_ptr->count; | |
885 | ||
886 | for (; ; cnt -= item_sz, item = item->next) { | |
887 | if (cnt < PLSIZE) | |
888 | item_sz = cnt; | |
889 | for (i = 0; i < item_sz; i++) | |
890 | if (item->ports[i] == port) | |
891 | return; | |
892 | if (i < PLSIZE) { | |
893 | item->ports[i] = port; | |
894 | pl_ptr->count++; | |
895 | return; | |
896 | } | |
897 | if (!item->next) { | |
898 | item->next = kmalloc(sizeof(*item), GFP_ATOMIC); | |
899 | if (!item->next) { | |
900 | warn("Incomplete multicast delivery, no memory\n"); | |
901 | return; | |
902 | } | |
903 | item->next->next = NULL; | |
904 | } | |
905 | } | |
906 | } | |
907 | ||
908 | /** | |
909 | * tipc_port_list_free - free dynamically created entries in port_list chain | |
910 | * | |
911 | */ | |
912 | ||
913 | void tipc_port_list_free(struct port_list *pl_ptr) | |
914 | { | |
915 | struct port_list *item; | |
916 | struct port_list *next; | |
917 | ||
918 | for (item = pl_ptr->next; item; item = next) { | |
919 | next = item->next; | |
920 | kfree(item); | |
921 | } | |
922 | } | |
923 |