]>
Commit | Line | Data |
---|---|---|
5c115590 | 1 | /* |
a43cced9 | 2 | * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. |
5c115590 AG |
3 | * |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/kernel.h> | |
d9b93842 | 34 | #include <linux/moduleparam.h> |
5a0e3ad6 | 35 | #include <linux/gfp.h> |
5c115590 AG |
36 | #include <net/sock.h> |
37 | #include <linux/in.h> | |
38 | #include <linux/list.h> | |
cb0a6056 | 39 | #include <linux/ratelimit.h> |
bc3b2d7f | 40 | #include <linux/export.h> |
4bebdd7a | 41 | #include <linux/sizes.h> |
5c115590 AG |
42 | |
43 | #include "rds.h" | |
5c115590 AG |
44 | |
45 | /* When transmitting messages in rds_send_xmit, we need to emerge from | |
46 | * time to time and briefly release the CPU. Otherwise the softlock watchdog | |
47 | * will kick our shin. | |
48 | * Also, it seems fairer to not let one busy connection stall all the | |
49 | * others. | |
50 | * | |
51 | * send_batch_count is the number of times we'll loop in send_xmit. Setting | |
52 | * it to 0 will restore the old behavior (where we looped until we had | |
53 | * drained the queue). | |
54 | */ | |
4bebdd7a | 55 | static int send_batch_count = SZ_1K; |
5c115590 AG |
56 | module_param(send_batch_count, int, 0444); |
57 | MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); | |
58 | ||
ff51bf84 | 59 | static void rds_send_remove_from_sock(struct list_head *messages, int status); |
60 | ||
5c115590 | 61 | /* |
0f4b1c7e ZB |
62 | * Reset the send state. Callers must ensure that this doesn't race with |
63 | * rds_send_xmit(). | |
5c115590 | 64 | */ |
d769ef81 | 65 | void rds_send_path_reset(struct rds_conn_path *cp) |
5c115590 AG |
66 | { |
67 | struct rds_message *rm, *tmp; | |
68 | unsigned long flags; | |
69 | ||
4e9b551c SV |
70 | if (cp->cp_xmit_rm) { |
71 | rm = cp->cp_xmit_rm; | |
72 | cp->cp_xmit_rm = NULL; | |
5c115590 AG |
73 | /* Tell the user the RDMA op is no longer mapped by the |
74 | * transport. This isn't entirely true (it's flushed out | |
75 | * independently) but as the connection is down, there's | |
76 | * no ongoing RDMA to/from that memory */ | |
7e3f2952 | 77 | rds_message_unmapped(rm); |
7e3f2952 | 78 | rds_message_put(rm); |
5c115590 | 79 | } |
7e3f2952 | 80 | |
4e9b551c SV |
81 | cp->cp_xmit_sg = 0; |
82 | cp->cp_xmit_hdr_off = 0; | |
83 | cp->cp_xmit_data_off = 0; | |
84 | cp->cp_xmit_atomic_sent = 0; | |
85 | cp->cp_xmit_rdma_sent = 0; | |
86 | cp->cp_xmit_data_sent = 0; | |
5c115590 | 87 | |
4e9b551c | 88 | cp->cp_conn->c_map_queued = 0; |
5c115590 | 89 | |
4e9b551c SV |
90 | cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; |
91 | cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; | |
5c115590 AG |
92 | |
93 | /* Mark messages as retransmissions, and move them to the send q */ | |
4e9b551c SV |
94 | spin_lock_irqsave(&cp->cp_lock, flags); |
95 | list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { | |
5c115590 AG |
96 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
97 | set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); | |
98 | } | |
4e9b551c SV |
99 | list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); |
100 | spin_unlock_irqrestore(&cp->cp_lock, flags); | |
101 | } | |
d769ef81 | 102 | EXPORT_SYMBOL_GPL(rds_send_path_reset); |
5c115590 | 103 | |
1f9ecd7e | 104 | static int acquire_in_xmit(struct rds_conn_path *cp) |
0f4b1c7e | 105 | { |
1f9ecd7e | 106 | return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; |
0f4b1c7e ZB |
107 | } |
108 | ||
1f9ecd7e | 109 | static void release_in_xmit(struct rds_conn_path *cp) |
0f4b1c7e | 110 | { |
1f9ecd7e | 111 | clear_bit(RDS_IN_XMIT, &cp->cp_flags); |
4e857c58 | 112 | smp_mb__after_atomic(); |
0f4b1c7e ZB |
113 | /* |
114 | * We don't use wait_on_bit()/wake_up_bit() because our waking is in a | |
115 | * hot path and finding waiters is very rare. We don't want to walk | |
116 | * the system-wide hashed waitqueue buckets in the fast path only to | |
117 | * almost never find waiters. | |
118 | */ | |
1f9ecd7e SV |
119 | if (waitqueue_active(&cp->cp_waitq)) |
120 | wake_up_all(&cp->cp_waitq); | |
0f4b1c7e ZB |
121 | } |
122 | ||
5c115590 | 123 | /* |
25985edc | 124 | * We're making the conscious trade-off here to only send one message |
5c115590 AG |
125 | * down the connection at a time. |
126 | * Pro: | |
127 | * - tx queueing is a simple fifo list | |
128 | * - reassembly is optional and easily done by transports per conn | |
129 | * - no per flow rx lookup at all, straight to the socket | |
130 | * - less per-frag memory and wire overhead | |
131 | * Con: | |
132 | * - queued acks can be delayed behind large messages | |
133 | * Depends: | |
134 | * - small message latency is higher behind queued large messages | |
135 | * - large message latency isn't starved by intervening small sends | |
136 | */ | |
1f9ecd7e | 137 | int rds_send_xmit(struct rds_conn_path *cp) |
5c115590 | 138 | { |
1f9ecd7e | 139 | struct rds_connection *conn = cp->cp_conn; |
5c115590 AG |
140 | struct rds_message *rm; |
141 | unsigned long flags; | |
142 | unsigned int tmp; | |
5c115590 AG |
143 | struct scatterlist *sg; |
144 | int ret = 0; | |
5c115590 | 145 | LIST_HEAD(to_be_dropped); |
443be0e5 SV |
146 | int batch_count; |
147 | unsigned long send_gen = 0; | |
11740ef4 | 148 | int same_rm = 0; |
5c115590 | 149 | |
fcc5450c | 150 | restart: |
443be0e5 | 151 | batch_count = 0; |
049ee3f5 | 152 | |
5c115590 AG |
153 | /* |
154 | * sendmsg calls here after having queued its message on the send | |
155 | * queue. We only have one task feeding the connection at a time. If | |
156 | * another thread is already feeding the queue then we back off. This | |
157 | * avoids blocking the caller and trading per-connection data between | |
158 | * caches per message. | |
5c115590 | 159 | */ |
1f9ecd7e | 160 | if (!acquire_in_xmit(cp)) { |
049ee3f5 | 161 | rds_stats_inc(s_send_lock_contention); |
5c115590 AG |
162 | ret = -ENOMEM; |
163 | goto out; | |
164 | } | |
0f4b1c7e | 165 | |
ebeeb1ad | 166 | if (rds_destroy_pending(cp->cp_conn)) { |
3db6e0d1 SV |
167 | release_in_xmit(cp); |
168 | ret = -ENETUNREACH; /* dont requeue send work */ | |
169 | goto out; | |
170 | } | |
171 | ||
443be0e5 SV |
172 | /* |
173 | * we record the send generation after doing the xmit acquire. | |
174 | * if someone else manages to jump in and do some work, we'll use | |
175 | * this to avoid a goto restart farther down. | |
176 | * | |
177 | * The acquire_in_xmit() check above ensures that only one | |
178 | * caller can increment c_send_gen at any time. | |
179 | */ | |
e623a48e HB |
180 | send_gen = READ_ONCE(cp->cp_send_gen) + 1; |
181 | WRITE_ONCE(cp->cp_send_gen, send_gen); | |
443be0e5 | 182 | |
0f4b1c7e ZB |
183 | /* |
184 | * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, | |
185 | * we do the opposite to avoid races. | |
186 | */ | |
1f9ecd7e SV |
187 | if (!rds_conn_path_up(cp)) { |
188 | release_in_xmit(cp); | |
0f4b1c7e ZB |
189 | ret = 0; |
190 | goto out; | |
191 | } | |
5c115590 | 192 | |
226f7a7d SV |
193 | if (conn->c_trans->xmit_path_prepare) |
194 | conn->c_trans->xmit_path_prepare(cp); | |
5c115590 AG |
195 | |
196 | /* | |
197 | * spin trying to push headers and data down the connection until | |
5b2366bd | 198 | * the connection doesn't make forward progress. |
5c115590 | 199 | */ |
fcc5450c | 200 | while (1) { |
5c115590 | 201 | |
1f9ecd7e | 202 | rm = cp->cp_xmit_rm; |
5c115590 | 203 | |
11740ef4 AG |
204 | if (!rm) { |
205 | same_rm = 0; | |
206 | } else { | |
207 | same_rm++; | |
208 | if (same_rm >= 4096) { | |
209 | rds_stats_inc(s_send_stuck_rm); | |
210 | ret = -EAGAIN; | |
211 | break; | |
212 | } | |
213 | } | |
214 | ||
5b2366bd AG |
215 | /* |
216 | * If between sending messages, we can send a pending congestion | |
217 | * map update. | |
5c115590 | 218 | */ |
8690bfa1 | 219 | if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { |
77dd550e AG |
220 | rm = rds_cong_update_alloc(conn); |
221 | if (IS_ERR(rm)) { | |
222 | ret = PTR_ERR(rm); | |
223 | break; | |
5b2366bd | 224 | } |
77dd550e | 225 | rm->data.op_active = 1; |
1f9ecd7e SV |
226 | rm->m_inc.i_conn_path = cp; |
227 | rm->m_inc.i_conn = cp->cp_conn; | |
77dd550e | 228 | |
1f9ecd7e | 229 | cp->cp_xmit_rm = rm; |
5c115590 AG |
230 | } |
231 | ||
232 | /* | |
5b2366bd | 233 | * If not already working on one, grab the next message. |
5c115590 | 234 | * |
1f9ecd7e | 235 | * cp_xmit_rm holds a ref while we're sending this message down |
5c115590 AG |
236 | * the connction. We can use this ref while holding the |
237 | * send_sem.. rds_send_reset() is serialized with it. | |
238 | */ | |
8690bfa1 | 239 | if (!rm) { |
5c115590 AG |
240 | unsigned int len; |
241 | ||
443be0e5 SV |
242 | batch_count++; |
243 | ||
244 | /* we want to process as big a batch as we can, but | |
245 | * we also want to avoid softlockups. If we've been | |
246 | * through a lot of messages, lets back off and see | |
247 | * if anyone else jumps in | |
248 | */ | |
4bebdd7a | 249 | if (batch_count >= send_batch_count) |
443be0e5 SV |
250 | goto over_batch; |
251 | ||
1f9ecd7e | 252 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 | 253 | |
1f9ecd7e SV |
254 | if (!list_empty(&cp->cp_send_queue)) { |
255 | rm = list_entry(cp->cp_send_queue.next, | |
5c115590 AG |
256 | struct rds_message, |
257 | m_conn_item); | |
258 | rds_message_addref(rm); | |
259 | ||
260 | /* | |
261 | * Move the message from the send queue to the retransmit | |
262 | * list right away. | |
263 | */ | |
1f9ecd7e SV |
264 | list_move_tail(&rm->m_conn_item, |
265 | &cp->cp_retrans); | |
5c115590 AG |
266 | } |
267 | ||
1f9ecd7e | 268 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 | 269 | |
fcc5450c | 270 | if (!rm) |
5c115590 | 271 | break; |
5c115590 AG |
272 | |
273 | /* Unfortunately, the way Infiniband deals with | |
274 | * RDMA to a bad MR key is by moving the entire | |
275 | * queue pair to error state. We cold possibly | |
276 | * recover from that, but right now we drop the | |
277 | * connection. | |
278 | * Therefore, we never retransmit messages with RDMA ops. | |
279 | */ | |
905dd418 SV |
280 | if (test_bit(RDS_MSG_FLUSH, &rm->m_flags) || |
281 | (rm->rdma.op_active && | |
282 | test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))) { | |
1f9ecd7e | 283 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 AG |
284 | if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) |
285 | list_move(&rm->m_conn_item, &to_be_dropped); | |
1f9ecd7e | 286 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 AG |
287 | continue; |
288 | } | |
289 | ||
290 | /* Require an ACK every once in a while */ | |
291 | len = ntohl(rm->m_inc.i_hdr.h_len); | |
1f9ecd7e SV |
292 | if (cp->cp_unacked_packets == 0 || |
293 | cp->cp_unacked_bytes < len) { | |
f530f39f | 294 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
5c115590 | 295 | |
1f9ecd7e SV |
296 | cp->cp_unacked_packets = |
297 | rds_sysctl_max_unacked_packets; | |
298 | cp->cp_unacked_bytes = | |
299 | rds_sysctl_max_unacked_bytes; | |
5c115590 AG |
300 | rds_stats_inc(s_send_ack_required); |
301 | } else { | |
1f9ecd7e SV |
302 | cp->cp_unacked_bytes -= len; |
303 | cp->cp_unacked_packets--; | |
5c115590 AG |
304 | } |
305 | ||
1f9ecd7e | 306 | cp->cp_xmit_rm = rm; |
5c115590 AG |
307 | } |
308 | ||
2c3a5f9a | 309 | /* The transport either sends the whole rdma or none of it */ |
1f9ecd7e | 310 | if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { |
ff3d7d36 | 311 | rm->m_final_op = &rm->rdma; |
4f73113c | 312 | /* The transport owns the mapped memory for now. |
313 | * You can't unmap it while it's on the send queue | |
314 | */ | |
315 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
2c3a5f9a | 316 | ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); |
4f73113c | 317 | if (ret) { |
318 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
319 | wake_up_interruptible(&rm->m_flush_wait); | |
15133f6e | 320 | break; |
4f73113c | 321 | } |
1f9ecd7e | 322 | cp->cp_xmit_rdma_sent = 1; |
2c3a5f9a | 323 | |
15133f6e AG |
324 | } |
325 | ||
1f9ecd7e | 326 | if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { |
ff3d7d36 | 327 | rm->m_final_op = &rm->atomic; |
4f73113c | 328 | /* The transport owns the mapped memory for now. |
329 | * You can't unmap it while it's on the send queue | |
330 | */ | |
331 | set_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
ff3d7d36 | 332 | ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); |
4f73113c | 333 | if (ret) { |
334 | clear_bit(RDS_MSG_MAPPED, &rm->m_flags); | |
335 | wake_up_interruptible(&rm->m_flush_wait); | |
5c115590 | 336 | break; |
4f73113c | 337 | } |
1f9ecd7e | 338 | cp->cp_xmit_atomic_sent = 1; |
ff3d7d36 | 339 | |
5c115590 AG |
340 | } |
341 | ||
2c3a5f9a AG |
342 | /* |
343 | * A number of cases require an RDS header to be sent | |
344 | * even if there is no data. | |
345 | * We permit 0-byte sends; rds-ping depends on this. | |
346 | * However, if there are exclusively attached silent ops, | |
347 | * we skip the hdr/data send, to enable silent operation. | |
348 | */ | |
349 | if (rm->data.op_nents == 0) { | |
350 | int ops_present; | |
351 | int all_ops_are_silent = 1; | |
352 | ||
353 | ops_present = (rm->atomic.op_active || rm->rdma.op_active); | |
354 | if (rm->atomic.op_active && !rm->atomic.op_silent) | |
355 | all_ops_are_silent = 0; | |
356 | if (rm->rdma.op_active && !rm->rdma.op_silent) | |
357 | all_ops_are_silent = 0; | |
358 | ||
359 | if (ops_present && all_ops_are_silent | |
360 | && !rm->m_rdma_cookie) | |
361 | rm->data.op_active = 0; | |
362 | } | |
363 | ||
1f9ecd7e | 364 | if (rm->data.op_active && !cp->cp_xmit_data_sent) { |
ff3d7d36 | 365 | rm->m_final_op = &rm->data; |
1f9ecd7e | 366 | |
5c115590 | 367 | ret = conn->c_trans->xmit(conn, rm, |
1f9ecd7e SV |
368 | cp->cp_xmit_hdr_off, |
369 | cp->cp_xmit_sg, | |
370 | cp->cp_xmit_data_off); | |
5c115590 AG |
371 | if (ret <= 0) |
372 | break; | |
373 | ||
1f9ecd7e | 374 | if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { |
5c115590 AG |
375 | tmp = min_t(int, ret, |
376 | sizeof(struct rds_header) - | |
1f9ecd7e SV |
377 | cp->cp_xmit_hdr_off); |
378 | cp->cp_xmit_hdr_off += tmp; | |
5c115590 AG |
379 | ret -= tmp; |
380 | } | |
381 | ||
1f9ecd7e | 382 | sg = &rm->data.op_sg[cp->cp_xmit_sg]; |
5c115590 AG |
383 | while (ret) { |
384 | tmp = min_t(int, ret, sg->length - | |
1f9ecd7e SV |
385 | cp->cp_xmit_data_off); |
386 | cp->cp_xmit_data_off += tmp; | |
5c115590 | 387 | ret -= tmp; |
1f9ecd7e SV |
388 | if (cp->cp_xmit_data_off == sg->length) { |
389 | cp->cp_xmit_data_off = 0; | |
5c115590 | 390 | sg++; |
1f9ecd7e SV |
391 | cp->cp_xmit_sg++; |
392 | BUG_ON(ret != 0 && cp->cp_xmit_sg == | |
393 | rm->data.op_nents); | |
5c115590 AG |
394 | } |
395 | } | |
5b2366bd | 396 | |
1f9ecd7e SV |
397 | if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && |
398 | (cp->cp_xmit_sg == rm->data.op_nents)) | |
399 | cp->cp_xmit_data_sent = 1; | |
5b2366bd AG |
400 | } |
401 | ||
402 | /* | |
403 | * A rm will only take multiple times through this loop | |
404 | * if there is a data op. Thus, if the data is sent (or there was | |
405 | * none), then we're done with the rm. | |
406 | */ | |
1f9ecd7e SV |
407 | if (!rm->data.op_active || cp->cp_xmit_data_sent) { |
408 | cp->cp_xmit_rm = NULL; | |
409 | cp->cp_xmit_sg = 0; | |
410 | cp->cp_xmit_hdr_off = 0; | |
411 | cp->cp_xmit_data_off = 0; | |
412 | cp->cp_xmit_rdma_sent = 0; | |
413 | cp->cp_xmit_atomic_sent = 0; | |
414 | cp->cp_xmit_data_sent = 0; | |
5b2366bd AG |
415 | |
416 | rds_message_put(rm); | |
5c115590 AG |
417 | } |
418 | } | |
419 | ||
443be0e5 | 420 | over_batch: |
226f7a7d SV |
421 | if (conn->c_trans->xmit_path_complete) |
422 | conn->c_trans->xmit_path_complete(cp); | |
1f9ecd7e | 423 | release_in_xmit(cp); |
5c115590 | 424 | |
2ad8099b AG |
425 | /* Nuke any messages we decided not to retransmit. */ |
426 | if (!list_empty(&to_be_dropped)) { | |
427 | /* irqs on here, so we can put(), unlike above */ | |
428 | list_for_each_entry(rm, &to_be_dropped, m_conn_item) | |
429 | rds_message_put(rm); | |
430 | rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); | |
431 | } | |
432 | ||
fcc5450c | 433 | /* |
0f4b1c7e ZB |
434 | * Other senders can queue a message after we last test the send queue |
435 | * but before we clear RDS_IN_XMIT. In that case they'd back off and | |
436 | * not try and send their newly queued message. We need to check the | |
437 | * send queue after having cleared RDS_IN_XMIT so that their message | |
438 | * doesn't get stuck on the send queue. | |
fcc5450c AG |
439 | * |
440 | * If the transport cannot continue (i.e ret != 0), then it must | |
441 | * call us when more room is available, such as from the tx | |
442 | * completion handler. | |
443be0e5 SV |
443 | * |
444 | * We have an extra generation check here so that if someone manages | |
445 | * to jump in after our release_in_xmit, we'll see that they have done | |
446 | * some work and we will skip our goto | |
fcc5450c AG |
447 | */ |
448 | if (ret == 0) { | |
126f760c HB |
449 | bool raced; |
450 | ||
9e29db0e | 451 | smp_mb(); |
126f760c HB |
452 | raced = send_gen != READ_ONCE(cp->cp_send_gen); |
453 | ||
0c484240 | 454 | if ((test_bit(0, &conn->c_map_queued) || |
126f760c | 455 | !list_empty(&cp->cp_send_queue)) && !raced) { |
4bebdd7a SS |
456 | if (batch_count < send_batch_count) |
457 | goto restart; | |
3db6e0d1 | 458 | rcu_read_lock(); |
ebeeb1ad | 459 | if (rds_destroy_pending(cp->cp_conn)) |
3db6e0d1 SV |
460 | ret = -ENETUNREACH; |
461 | else | |
462 | queue_delayed_work(rds_wq, &cp->cp_send_w, 1); | |
463 | rcu_read_unlock(); | |
126f760c HB |
464 | } else if (raced) { |
465 | rds_stats_inc(s_send_lock_queue_raced); | |
5c115590 | 466 | } |
5c115590 AG |
467 | } |
468 | out: | |
469 | return ret; | |
470 | } | |
0c28c045 | 471 | EXPORT_SYMBOL_GPL(rds_send_xmit); |
5c115590 AG |
472 | |
473 | static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) | |
474 | { | |
475 | u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | |
476 | ||
477 | assert_spin_locked(&rs->rs_lock); | |
478 | ||
479 | BUG_ON(rs->rs_snd_bytes < len); | |
480 | rs->rs_snd_bytes -= len; | |
481 | ||
482 | if (rs->rs_snd_bytes == 0) | |
483 | rds_stats_inc(s_send_queue_empty); | |
484 | } | |
485 | ||
486 | static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, | |
487 | is_acked_func is_acked) | |
488 | { | |
489 | if (is_acked) | |
490 | return is_acked(rm, ack); | |
491 | return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; | |
492 | } | |
493 | ||
5c115590 AG |
494 | /* |
495 | * This is pretty similar to what happens below in the ACK | |
496 | * handling code - except that we call here as soon as we get | |
497 | * the IB send completion on the RDMA op and the accompanying | |
498 | * message. | |
499 | */ | |
500 | void rds_rdma_send_complete(struct rds_message *rm, int status) | |
501 | { | |
502 | struct rds_sock *rs = NULL; | |
f8b3aaf2 | 503 | struct rm_rdma_op *ro; |
5c115590 | 504 | struct rds_notifier *notifier; |
9de0864c | 505 | unsigned long flags; |
5c115590 | 506 | |
9de0864c | 507 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 | 508 | |
f8b3aaf2 | 509 | ro = &rm->rdma; |
f64f9e71 | 510 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && |
616d37a0 | 511 | ro->op_active && ro->op_notify && ro->op_notifier) { |
f8b3aaf2 | 512 | notifier = ro->op_notifier; |
5c115590 AG |
513 | rs = rm->m_rs; |
514 | sock_hold(rds_rs_to_sk(rs)); | |
515 | ||
516 | notifier->n_status = status; | |
517 | spin_lock(&rs->rs_lock); | |
518 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | |
519 | spin_unlock(&rs->rs_lock); | |
520 | ||
f8b3aaf2 | 521 | ro->op_notifier = NULL; |
5c115590 AG |
522 | } |
523 | ||
9de0864c | 524 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
5c115590 AG |
525 | |
526 | if (rs) { | |
527 | rds_wake_sk_sleep(rs); | |
528 | sock_put(rds_rs_to_sk(rs)); | |
529 | } | |
530 | } | |
616b757a | 531 | EXPORT_SYMBOL_GPL(rds_rdma_send_complete); |
5c115590 | 532 | |
15133f6e AG |
533 | /* |
534 | * Just like above, except looks at atomic op | |
535 | */ | |
536 | void rds_atomic_send_complete(struct rds_message *rm, int status) | |
537 | { | |
538 | struct rds_sock *rs = NULL; | |
539 | struct rm_atomic_op *ao; | |
540 | struct rds_notifier *notifier; | |
cf4b7389 | 541 | unsigned long flags; |
15133f6e | 542 | |
cf4b7389 | 543 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
15133f6e AG |
544 | |
545 | ao = &rm->atomic; | |
546 | if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) | |
547 | && ao->op_active && ao->op_notify && ao->op_notifier) { | |
548 | notifier = ao->op_notifier; | |
549 | rs = rm->m_rs; | |
550 | sock_hold(rds_rs_to_sk(rs)); | |
551 | ||
552 | notifier->n_status = status; | |
553 | spin_lock(&rs->rs_lock); | |
554 | list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); | |
555 | spin_unlock(&rs->rs_lock); | |
556 | ||
557 | ao->op_notifier = NULL; | |
558 | } | |
559 | ||
cf4b7389 | 560 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
15133f6e AG |
561 | |
562 | if (rs) { | |
563 | rds_wake_sk_sleep(rs); | |
564 | sock_put(rds_rs_to_sk(rs)); | |
565 | } | |
566 | } | |
567 | EXPORT_SYMBOL_GPL(rds_atomic_send_complete); | |
568 | ||
5c115590 AG |
569 | /* |
570 | * This is the same as rds_rdma_send_complete except we | |
571 | * don't do any locking - we have all the ingredients (message, | |
572 | * socket, socket lock) and can just move the notifier. | |
573 | */ | |
574 | static inline void | |
940786eb | 575 | __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) |
5c115590 | 576 | { |
f8b3aaf2 | 577 | struct rm_rdma_op *ro; |
940786eb | 578 | struct rm_atomic_op *ao; |
5c115590 | 579 | |
f8b3aaf2 AG |
580 | ro = &rm->rdma; |
581 | if (ro->op_active && ro->op_notify && ro->op_notifier) { | |
582 | ro->op_notifier->n_status = status; | |
583 | list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); | |
584 | ro->op_notifier = NULL; | |
5c115590 AG |
585 | } |
586 | ||
940786eb AG |
587 | ao = &rm->atomic; |
588 | if (ao->op_active && ao->op_notify && ao->op_notifier) { | |
589 | ao->op_notifier->n_status = status; | |
590 | list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); | |
591 | ao->op_notifier = NULL; | |
592 | } | |
593 | ||
5c115590 AG |
594 | /* No need to wake the app - caller does this */ |
595 | } | |
596 | ||
5c115590 AG |
597 | /* |
598 | * This removes messages from the socket's list if they're on it. The list | |
599 | * argument must be private to the caller, we must be able to modify it | |
600 | * without locks. The messages must have a reference held for their | |
601 | * position on the list. This function will drop that reference after | |
602 | * removing the messages from the 'messages' list regardless of if it found | |
603 | * the messages on the socket list or not. | |
604 | */ | |
ff51bf84 | 605 | static void rds_send_remove_from_sock(struct list_head *messages, int status) |
5c115590 | 606 | { |
561c7df6 | 607 | unsigned long flags; |
5c115590 AG |
608 | struct rds_sock *rs = NULL; |
609 | struct rds_message *rm; | |
610 | ||
5c115590 | 611 | while (!list_empty(messages)) { |
561c7df6 AG |
612 | int was_on_sock = 0; |
613 | ||
5c115590 AG |
614 | rm = list_entry(messages->next, struct rds_message, |
615 | m_conn_item); | |
616 | list_del_init(&rm->m_conn_item); | |
617 | ||
618 | /* | |
619 | * If we see this flag cleared then we're *sure* that someone | |
620 | * else beat us to removing it from the sock. If we race | |
621 | * with their flag update we'll get the lock and then really | |
622 | * see that the flag has been cleared. | |
623 | * | |
624 | * The message spinlock makes sure nobody clears rm->m_rs | |
625 | * while we're messing with it. It does not prevent the | |
626 | * message from being removed from the socket, though. | |
627 | */ | |
561c7df6 | 628 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 AG |
629 | if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) |
630 | goto unlock_and_drop; | |
631 | ||
632 | if (rs != rm->m_rs) { | |
633 | if (rs) { | |
5c115590 AG |
634 | rds_wake_sk_sleep(rs); |
635 | sock_put(rds_rs_to_sk(rs)); | |
636 | } | |
637 | rs = rm->m_rs; | |
593cbb3e HK |
638 | if (rs) |
639 | sock_hold(rds_rs_to_sk(rs)); | |
5c115590 | 640 | } |
593cbb3e HK |
641 | if (!rs) |
642 | goto unlock_and_drop; | |
048c15e6 | 643 | spin_lock(&rs->rs_lock); |
5c115590 AG |
644 | |
645 | if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { | |
f8b3aaf2 | 646 | struct rm_rdma_op *ro = &rm->rdma; |
5c115590 AG |
647 | struct rds_notifier *notifier; |
648 | ||
649 | list_del_init(&rm->m_sock_item); | |
650 | rds_send_sndbuf_remove(rs, rm); | |
651 | ||
f8b3aaf2 AG |
652 | if (ro->op_active && ro->op_notifier && |
653 | (ro->op_notify || (ro->op_recverr && status))) { | |
654 | notifier = ro->op_notifier; | |
5c115590 AG |
655 | list_add_tail(¬ifier->n_list, |
656 | &rs->rs_notify_queue); | |
657 | if (!notifier->n_status) | |
658 | notifier->n_status = status; | |
f8b3aaf2 | 659 | rm->rdma.op_notifier = NULL; |
5c115590 | 660 | } |
561c7df6 | 661 | was_on_sock = 1; |
5c115590 | 662 | } |
048c15e6 | 663 | spin_unlock(&rs->rs_lock); |
5c115590 AG |
664 | |
665 | unlock_and_drop: | |
561c7df6 | 666 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
5c115590 | 667 | rds_message_put(rm); |
561c7df6 AG |
668 | if (was_on_sock) |
669 | rds_message_put(rm); | |
5c115590 AG |
670 | } |
671 | ||
672 | if (rs) { | |
5c115590 AG |
673 | rds_wake_sk_sleep(rs); |
674 | sock_put(rds_rs_to_sk(rs)); | |
675 | } | |
5c115590 AG |
676 | } |
677 | ||
678 | /* | |
679 | * Transports call here when they've determined that the receiver queued | |
680 | * messages up to, and including, the given sequence number. Messages are | |
681 | * moved to the retrans queue when rds_send_xmit picks them off the send | |
682 | * queue. This means that in the TCP case, the message may not have been | |
683 | * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked | |
684 | * checks the RDS_MSG_HAS_ACK_SEQ bit. | |
5c115590 | 685 | */ |
5c3d274c SV |
686 | void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, |
687 | is_acked_func is_acked) | |
5c115590 AG |
688 | { |
689 | struct rds_message *rm, *tmp; | |
690 | unsigned long flags; | |
691 | LIST_HEAD(list); | |
692 | ||
5c3d274c | 693 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 | 694 | |
5c3d274c | 695 | list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { |
5c115590 AG |
696 | if (!rds_send_is_acked(rm, ack, is_acked)) |
697 | break; | |
698 | ||
699 | list_move(&rm->m_conn_item, &list); | |
700 | clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); | |
701 | } | |
702 | ||
703 | /* order flag updates with spin locks */ | |
704 | if (!list_empty(&list)) | |
4e857c58 | 705 | smp_mb__after_atomic(); |
5c115590 | 706 | |
5c3d274c | 707 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 AG |
708 | |
709 | /* now remove the messages from the sock list as needed */ | |
710 | rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); | |
711 | } | |
5c3d274c SV |
712 | EXPORT_SYMBOL_GPL(rds_send_path_drop_acked); |
713 | ||
714 | void rds_send_drop_acked(struct rds_connection *conn, u64 ack, | |
715 | is_acked_func is_acked) | |
716 | { | |
717 | WARN_ON(conn->c_trans->t_mp_capable); | |
718 | rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); | |
719 | } | |
616b757a | 720 | EXPORT_SYMBOL_GPL(rds_send_drop_acked); |
5c115590 | 721 | |
eee2fa6a | 722 | void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in6 *dest) |
5c115590 AG |
723 | { |
724 | struct rds_message *rm, *tmp; | |
725 | struct rds_connection *conn; | |
01ff34ed | 726 | struct rds_conn_path *cp; |
7c82eaf0 | 727 | unsigned long flags; |
5c115590 | 728 | LIST_HEAD(list); |
5c115590 AG |
729 | |
730 | /* get all the messages we're dropping under the rs lock */ | |
731 | spin_lock_irqsave(&rs->rs_lock, flags); | |
732 | ||
733 | list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { | |
eee2fa6a KCP |
734 | if (dest && |
735 | (!ipv6_addr_equal(&dest->sin6_addr, &rm->m_daddr) || | |
736 | dest->sin6_port != rm->m_inc.i_hdr.h_dport)) | |
5c115590 AG |
737 | continue; |
738 | ||
5c115590 AG |
739 | list_move(&rm->m_sock_item, &list); |
740 | rds_send_sndbuf_remove(rs, rm); | |
741 | clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | |
5c115590 AG |
742 | } |
743 | ||
744 | /* order flag updates with the rs lock */ | |
4e857c58 | 745 | smp_mb__after_atomic(); |
5c115590 AG |
746 | |
747 | spin_unlock_irqrestore(&rs->rs_lock, flags); | |
748 | ||
7c82eaf0 AG |
749 | if (list_empty(&list)) |
750 | return; | |
5c115590 | 751 | |
7c82eaf0 | 752 | /* Remove the messages from the conn */ |
5c115590 | 753 | list_for_each_entry(rm, &list, m_sock_item) { |
7c82eaf0 AG |
754 | |
755 | conn = rm->m_inc.i_conn; | |
01ff34ed SV |
756 | if (conn->c_trans->t_mp_capable) |
757 | cp = rm->m_inc.i_conn_path; | |
758 | else | |
759 | cp = &conn->c_path[0]; | |
5c115590 | 760 | |
01ff34ed | 761 | spin_lock_irqsave(&cp->cp_lock, flags); |
5c115590 | 762 | /* |
7c82eaf0 AG |
763 | * Maybe someone else beat us to removing rm from the conn. |
764 | * If we race with their flag update we'll get the lock and | |
765 | * then really see that the flag has been cleared. | |
5c115590 | 766 | */ |
7c82eaf0 | 767 | if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { |
01ff34ed | 768 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 | 769 | continue; |
5c115590 | 770 | } |
9de0864c | 771 | list_del_init(&rm->m_conn_item); |
01ff34ed | 772 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 | 773 | |
7c82eaf0 AG |
774 | /* |
775 | * Couldn't grab m_rs_lock in top loop (lock ordering), | |
776 | * but we can now. | |
777 | */ | |
9de0864c | 778 | spin_lock_irqsave(&rm->m_rs_lock, flags); |
5c115590 | 779 | |
7c82eaf0 | 780 | spin_lock(&rs->rs_lock); |
940786eb | 781 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); |
7c82eaf0 AG |
782 | spin_unlock(&rs->rs_lock); |
783 | ||
9de0864c | 784 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
7c82eaf0 | 785 | |
7c82eaf0 | 786 | rds_message_put(rm); |
7c82eaf0 | 787 | } |
5c115590 | 788 | |
7c82eaf0 | 789 | rds_wake_sk_sleep(rs); |
550a8002 | 790 | |
5c115590 AG |
791 | while (!list_empty(&list)) { |
792 | rm = list_entry(list.next, struct rds_message, m_sock_item); | |
793 | list_del_init(&rm->m_sock_item); | |
5c115590 | 794 | rds_message_wait(rm); |
dfcec251 | 795 | |
796 | /* just in case the code above skipped this message | |
797 | * because RDS_MSG_ON_CONN wasn't set, run it again here | |
798 | * taking m_rs_lock is the only thing that keeps us | |
799 | * from racing with ack processing. | |
800 | */ | |
801 | spin_lock_irqsave(&rm->m_rs_lock, flags); | |
802 | ||
803 | spin_lock(&rs->rs_lock); | |
804 | __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); | |
805 | spin_unlock(&rs->rs_lock); | |
806 | ||
dfcec251 | 807 | spin_unlock_irqrestore(&rm->m_rs_lock, flags); |
808 | ||
5c115590 AG |
809 | rds_message_put(rm); |
810 | } | |
811 | } | |
812 | ||
813 | /* | |
814 | * we only want this to fire once so we use the callers 'queued'. It's | |
815 | * possible that another thread can race with us and remove the | |
816 | * message from the flow with RDS_CANCEL_SENT_TO. | |
817 | */ | |
818 | static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, | |
780a6d9e | 819 | struct rds_conn_path *cp, |
5c115590 AG |
820 | struct rds_message *rm, __be16 sport, |
821 | __be16 dport, int *queued) | |
822 | { | |
823 | unsigned long flags; | |
824 | u32 len; | |
825 | ||
826 | if (*queued) | |
827 | goto out; | |
828 | ||
829 | len = be32_to_cpu(rm->m_inc.i_hdr.h_len); | |
830 | ||
831 | /* this is the only place which holds both the socket's rs_lock | |
832 | * and the connection's c_lock */ | |
833 | spin_lock_irqsave(&rs->rs_lock, flags); | |
834 | ||
835 | /* | |
836 | * If there is a little space in sndbuf, we don't queue anything, | |
837 | * and userspace gets -EAGAIN. But poll() indicates there's send | |
838 | * room. This can lead to bad behavior (spinning) if snd_bytes isn't | |
839 | * freed up by incoming acks. So we check the *old* value of | |
840 | * rs_snd_bytes here to allow the last msg to exceed the buffer, | |
841 | * and poll() now knows no more data can be sent. | |
842 | */ | |
843 | if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { | |
844 | rs->rs_snd_bytes += len; | |
845 | ||
846 | /* let recv side know we are close to send space exhaustion. | |
847 | * This is probably not the optimal way to do it, as this | |
848 | * means we set the flag on *all* messages as soon as our | |
849 | * throughput hits a certain threshold. | |
850 | */ | |
851 | if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) | |
f530f39f | 852 | set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); |
5c115590 AG |
853 | |
854 | list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); | |
855 | set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); | |
856 | rds_message_addref(rm); | |
ea8994cb | 857 | sock_hold(rds_rs_to_sk(rs)); |
5c115590 AG |
858 | rm->m_rs = rs; |
859 | ||
860 | /* The code ordering is a little weird, but we're | |
861 | trying to minimize the time we hold c_lock */ | |
862 | rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); | |
863 | rm->m_inc.i_conn = conn; | |
780a6d9e | 864 | rm->m_inc.i_conn_path = cp; |
5c115590 AG |
865 | rds_message_addref(rm); |
866 | ||
780a6d9e SV |
867 | spin_lock(&cp->cp_lock); |
868 | rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); | |
869 | list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); | |
5c115590 | 870 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
780a6d9e | 871 | spin_unlock(&cp->cp_lock); |
5c115590 AG |
872 | |
873 | rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", | |
874 | rm, len, rs, rs->rs_snd_bytes, | |
875 | (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); | |
876 | ||
877 | *queued = 1; | |
878 | } | |
879 | ||
880 | spin_unlock_irqrestore(&rs->rs_lock, flags); | |
881 | out: | |
882 | return *queued; | |
883 | } | |
884 | ||
fc445084 AG |
885 | /* |
886 | * rds_message is getting to be quite complicated, and we'd like to allocate | |
887 | * it all in one go. This figures out how big it needs to be up front. | |
888 | */ | |
ea010070 | 889 | static int rds_rm_size(struct msghdr *msg, int num_sgs, |
890 | struct rds_iov_vector_arr *vct) | |
fc445084 | 891 | { |
ff87e97a | 892 | struct cmsghdr *cmsg; |
fc445084 | 893 | int size = 0; |
aa0a4ef4 | 894 | int cmsg_groups = 0; |
ff87e97a | 895 | int retval; |
0cebacce | 896 | bool zcopy_cookie = false; |
ea010070 | 897 | struct rds_iov_vector *iov, *tmp_iov; |
ff87e97a | 898 | |
c75ab8a5 | 899 | if (num_sgs < 0) |
900 | return -EINVAL; | |
901 | ||
f95b414e | 902 | for_each_cmsghdr(cmsg, msg) { |
ff87e97a AG |
903 | if (!CMSG_OK(msg, cmsg)) |
904 | return -EINVAL; | |
905 | ||
906 | if (cmsg->cmsg_level != SOL_RDS) | |
907 | continue; | |
908 | ||
909 | switch (cmsg->cmsg_type) { | |
910 | case RDS_CMSG_RDMA_ARGS: | |
ea010070 | 911 | if (vct->indx >= vct->len) { |
912 | vct->len += vct->incr; | |
913 | tmp_iov = | |
914 | krealloc(vct->vec, | |
915 | vct->len * | |
916 | sizeof(struct rds_iov_vector), | |
917 | GFP_KERNEL); | |
918 | if (!tmp_iov) { | |
919 | vct->len -= vct->incr; | |
920 | return -ENOMEM; | |
921 | } | |
922 | vct->vec = tmp_iov; | |
923 | } | |
924 | iov = &vct->vec[vct->indx]; | |
925 | memset(iov, 0, sizeof(struct rds_iov_vector)); | |
926 | vct->indx++; | |
aa0a4ef4 | 927 | cmsg_groups |= 1; |
ea010070 | 928 | retval = rds_rdma_extra_size(CMSG_DATA(cmsg), iov); |
ff87e97a AG |
929 | if (retval < 0) |
930 | return retval; | |
931 | size += retval; | |
aa0a4ef4 | 932 | |
ff87e97a AG |
933 | break; |
934 | ||
0cebacce SV |
935 | case RDS_CMSG_ZCOPY_COOKIE: |
936 | zcopy_cookie = true; | |
df561f66 | 937 | fallthrough; |
f9053113 | 938 | |
ff87e97a AG |
939 | case RDS_CMSG_RDMA_DEST: |
940 | case RDS_CMSG_RDMA_MAP: | |
aa0a4ef4 | 941 | cmsg_groups |= 2; |
ff87e97a AG |
942 | /* these are valid but do no add any size */ |
943 | break; | |
944 | ||
15133f6e AG |
945 | case RDS_CMSG_ATOMIC_CSWP: |
946 | case RDS_CMSG_ATOMIC_FADD: | |
20c72bd5 AG |
947 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
948 | case RDS_CMSG_MASKED_ATOMIC_FADD: | |
aa0a4ef4 | 949 | cmsg_groups |= 1; |
15133f6e AG |
950 | size += sizeof(struct scatterlist); |
951 | break; | |
952 | ||
ff87e97a AG |
953 | default: |
954 | return -EINVAL; | |
955 | } | |
956 | ||
957 | } | |
fc445084 | 958 | |
0cebacce SV |
959 | if ((msg->msg_flags & MSG_ZEROCOPY) && !zcopy_cookie) |
960 | return -EINVAL; | |
961 | ||
962 | size += num_sgs * sizeof(struct scatterlist); | |
fc445084 | 963 | |
aa0a4ef4 AG |
964 | /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ |
965 | if (cmsg_groups == 3) | |
966 | return -EINVAL; | |
967 | ||
fc445084 AG |
968 | return size; |
969 | } | |
970 | ||
0cebacce SV |
971 | static int rds_cmsg_zcopy(struct rds_sock *rs, struct rds_message *rm, |
972 | struct cmsghdr *cmsg) | |
973 | { | |
974 | u32 *cookie; | |
975 | ||
79a5b972 SV |
976 | if (cmsg->cmsg_len < CMSG_LEN(sizeof(*cookie)) || |
977 | !rm->data.op_mmp_znotifier) | |
0cebacce SV |
978 | return -EINVAL; |
979 | cookie = CMSG_DATA(cmsg); | |
980 | rm->data.op_mmp_znotifier->z_cookie = *cookie; | |
981 | return 0; | |
982 | } | |
983 | ||
5c115590 | 984 | static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, |
ea010070 | 985 | struct msghdr *msg, int *allocated_mr, |
986 | struct rds_iov_vector_arr *vct) | |
5c115590 AG |
987 | { |
988 | struct cmsghdr *cmsg; | |
ea010070 | 989 | int ret = 0, ind = 0; |
5c115590 | 990 | |
f95b414e | 991 | for_each_cmsghdr(cmsg, msg) { |
5c115590 AG |
992 | if (!CMSG_OK(msg, cmsg)) |
993 | return -EINVAL; | |
994 | ||
995 | if (cmsg->cmsg_level != SOL_RDS) | |
996 | continue; | |
997 | ||
998 | /* As a side effect, RDMA_DEST and RDMA_MAP will set | |
15133f6e | 999 | * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. |
5c115590 AG |
1000 | */ |
1001 | switch (cmsg->cmsg_type) { | |
1002 | case RDS_CMSG_RDMA_ARGS: | |
ea010070 | 1003 | if (ind >= vct->indx) |
1004 | return -ENOMEM; | |
1005 | ret = rds_cmsg_rdma_args(rs, rm, cmsg, &vct->vec[ind]); | |
1006 | ind++; | |
5c115590 AG |
1007 | break; |
1008 | ||
1009 | case RDS_CMSG_RDMA_DEST: | |
1010 | ret = rds_cmsg_rdma_dest(rs, rm, cmsg); | |
1011 | break; | |
1012 | ||
1013 | case RDS_CMSG_RDMA_MAP: | |
1014 | ret = rds_cmsg_rdma_map(rs, rm, cmsg); | |
1015 | if (!ret) | |
1016 | *allocated_mr = 1; | |
584a8279 SS |
1017 | else if (ret == -ENODEV) |
1018 | /* Accommodate the get_mr() case which can fail | |
1019 | * if connection isn't established yet. | |
1020 | */ | |
1021 | ret = -EAGAIN; | |
5c115590 | 1022 | break; |
15133f6e AG |
1023 | case RDS_CMSG_ATOMIC_CSWP: |
1024 | case RDS_CMSG_ATOMIC_FADD: | |
20c72bd5 AG |
1025 | case RDS_CMSG_MASKED_ATOMIC_CSWP: |
1026 | case RDS_CMSG_MASKED_ATOMIC_FADD: | |
15133f6e AG |
1027 | ret = rds_cmsg_atomic(rs, rm, cmsg); |
1028 | break; | |
5c115590 | 1029 | |
0cebacce SV |
1030 | case RDS_CMSG_ZCOPY_COOKIE: |
1031 | ret = rds_cmsg_zcopy(rs, rm, cmsg); | |
1032 | break; | |
1033 | ||
5c115590 AG |
1034 | default: |
1035 | return -EINVAL; | |
1036 | } | |
1037 | ||
1038 | if (ret) | |
1039 | break; | |
1040 | } | |
1041 | ||
1042 | return ret; | |
1043 | } | |
1044 | ||
9a4890bd KCP |
1045 | static int rds_send_mprds_hash(struct rds_sock *rs, |
1046 | struct rds_connection *conn, int nonblock) | |
5916e2c1 SV |
1047 | { |
1048 | int hash; | |
1049 | ||
1050 | if (conn->c_npaths == 0) | |
1051 | hash = RDS_MPATH_HASH(rs, RDS_MPATH_WORKERS); | |
1052 | else | |
1053 | hash = RDS_MPATH_HASH(rs, conn->c_npaths); | |
1054 | if (conn->c_npaths == 0 && hash != 0) { | |
69b92b5b | 1055 | rds_send_ping(conn, 0); |
5916e2c1 | 1056 | |
a43cced9 KCP |
1057 | /* The underlying connection is not up yet. Need to wait |
1058 | * until it is up to be sure that the non-zero c_path can be | |
1059 | * used. But if we are interrupted, we have to use the zero | |
1060 | * c_path in case the connection ends up being non-MP capable. | |
1061 | */ | |
9a4890bd KCP |
1062 | if (conn->c_npaths == 0) { |
1063 | /* Cannot wait for the connection be made, so just use | |
1064 | * the base c_path. | |
1065 | */ | |
1066 | if (nonblock) | |
1067 | return 0; | |
a43cced9 KCP |
1068 | if (wait_event_interruptible(conn->c_hs_waitq, |
1069 | conn->c_npaths != 0)) | |
1070 | hash = 0; | |
9a4890bd | 1071 | } |
5916e2c1 SV |
1072 | if (conn->c_npaths == 1) |
1073 | hash = 0; | |
1074 | } | |
1075 | return hash; | |
1076 | } | |
1077 | ||
f9fb69ad AR |
1078 | static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes) |
1079 | { | |
1080 | struct rds_rdma_args *args; | |
1081 | struct cmsghdr *cmsg; | |
1082 | ||
1083 | for_each_cmsghdr(cmsg, msg) { | |
1084 | if (!CMSG_OK(msg, cmsg)) | |
1085 | return -EINVAL; | |
1086 | ||
1087 | if (cmsg->cmsg_level != SOL_RDS) | |
1088 | continue; | |
1089 | ||
1090 | if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) { | |
14e138a8 AR |
1091 | if (cmsg->cmsg_len < |
1092 | CMSG_LEN(sizeof(struct rds_rdma_args))) | |
1093 | return -EINVAL; | |
f9fb69ad AR |
1094 | args = CMSG_DATA(cmsg); |
1095 | *rdma_bytes += args->remote_vec.bytes; | |
1096 | } | |
1097 | } | |
1098 | return 0; | |
1099 | } | |
1100 | ||
1b784140 | 1101 | int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) |
5c115590 AG |
1102 | { |
1103 | struct sock *sk = sock->sk; | |
1104 | struct rds_sock *rs = rds_sk_to_rs(sk); | |
eee2fa6a | 1105 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
342dfc30 | 1106 | DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); |
5c115590 AG |
1107 | __be16 dport; |
1108 | struct rds_message *rm = NULL; | |
1109 | struct rds_connection *conn; | |
1110 | int ret = 0; | |
1111 | int queued = 0, allocated_mr = 0; | |
1112 | int nonblock = msg->msg_flags & MSG_DONTWAIT; | |
1123fd73 | 1113 | long timeo = sock_sndtimeo(sk, nonblock); |
780a6d9e | 1114 | struct rds_conn_path *cpath; |
eee2fa6a KCP |
1115 | struct in6_addr daddr; |
1116 | __u32 scope_id = 0; | |
f9fb69ad | 1117 | size_t total_payload_len = payload_len, rdma_payload_len = 0; |
0cebacce SV |
1118 | bool zcopy = ((msg->msg_flags & MSG_ZEROCOPY) && |
1119 | sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY)); | |
eeb2c4fb | 1120 | int num_sgs = DIV_ROUND_UP(payload_len, PAGE_SIZE); |
eee2fa6a | 1121 | int namelen; |
d84e7bc0 | 1122 | struct rds_iov_vector_arr vct; |
ea010070 | 1123 | int ind; |
1124 | ||
d84e7bc0 DM |
1125 | memset(&vct, 0, sizeof(vct)); |
1126 | ||
ea010070 | 1127 | /* expect 1 RDMA CMSG per rds_sendmsg. can still grow if more needed. */ |
1128 | vct.incr = 1; | |
5c115590 AG |
1129 | |
1130 | /* Mirror Linux UDP mirror of BSD error message compatibility */ | |
1131 | /* XXX: Perhaps MSG_MORE someday */ | |
0cebacce | 1132 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT | MSG_ZEROCOPY)) { |
5c115590 AG |
1133 | ret = -EOPNOTSUPP; |
1134 | goto out; | |
1135 | } | |
1136 | ||
eee2fa6a KCP |
1137 | namelen = msg->msg_namelen; |
1138 | if (namelen != 0) { | |
1139 | if (namelen < sizeof(*usin)) { | |
1140 | ret = -EINVAL; | |
1141 | goto out; | |
1142 | } | |
1e2b44e7 KCP |
1143 | switch (usin->sin_family) { |
1144 | case AF_INET: | |
1145 | if (usin->sin_addr.s_addr == htonl(INADDR_ANY) || | |
eee2fa6a | 1146 | usin->sin_addr.s_addr == htonl(INADDR_BROADCAST) || |
842841ec | 1147 | ipv4_is_multicast(usin->sin_addr.s_addr)) { |
eee2fa6a KCP |
1148 | ret = -EINVAL; |
1149 | goto out; | |
1150 | } | |
1151 | ipv6_addr_set_v4mapped(usin->sin_addr.s_addr, &daddr); | |
1152 | dport = usin->sin_port; | |
1153 | break; | |
1154 | ||
e65d4d96 | 1155 | #if IS_ENABLED(CONFIG_IPV6) |
1e2b44e7 KCP |
1156 | case AF_INET6: { |
1157 | int addr_type; | |
1158 | ||
1159 | if (namelen < sizeof(*sin6)) { | |
1160 | ret = -EINVAL; | |
1161 | goto out; | |
1162 | } | |
1163 | addr_type = ipv6_addr_type(&sin6->sin6_addr); | |
1164 | if (!(addr_type & IPV6_ADDR_UNICAST)) { | |
1165 | __be32 addr4; | |
1166 | ||
1167 | if (!(addr_type & IPV6_ADDR_MAPPED)) { | |
1168 | ret = -EINVAL; | |
1169 | goto out; | |
1170 | } | |
1171 | ||
1172 | /* It is a mapped address. Need to do some | |
1173 | * sanity checks. | |
1174 | */ | |
1175 | addr4 = sin6->sin6_addr.s6_addr32[3]; | |
1176 | if (addr4 == htonl(INADDR_ANY) || | |
1177 | addr4 == htonl(INADDR_BROADCAST) || | |
842841ec | 1178 | ipv4_is_multicast(addr4)) { |
dc66fe43 | 1179 | ret = -EINVAL; |
1e2b44e7 KCP |
1180 | goto out; |
1181 | } | |
1182 | } | |
1183 | if (addr_type & IPV6_ADDR_LINKLOCAL) { | |
1184 | if (sin6->sin6_scope_id == 0) { | |
1185 | ret = -EINVAL; | |
1186 | goto out; | |
1187 | } | |
1188 | scope_id = sin6->sin6_scope_id; | |
1189 | } | |
1190 | ||
1191 | daddr = sin6->sin6_addr; | |
1192 | dport = sin6->sin6_port; | |
1193 | break; | |
eee2fa6a | 1194 | } |
e65d4d96 | 1195 | #endif |
eee2fa6a KCP |
1196 | |
1197 | default: | |
5c115590 AG |
1198 | ret = -EINVAL; |
1199 | goto out; | |
1200 | } | |
5c115590 AG |
1201 | } else { |
1202 | /* We only care about consistency with ->connect() */ | |
1203 | lock_sock(sk); | |
1204 | daddr = rs->rs_conn_addr; | |
1205 | dport = rs->rs_conn_port; | |
eee2fa6a | 1206 | scope_id = rs->rs_bound_scope_id; |
5c115590 AG |
1207 | release_sock(sk); |
1208 | } | |
1209 | ||
8c7188b2 | 1210 | lock_sock(sk); |
eee2fa6a | 1211 | if (ipv6_addr_any(&rs->rs_bound_addr) || ipv6_addr_any(&daddr)) { |
8c7188b2 | 1212 | release_sock(sk); |
eee2fa6a | 1213 | ret = -ENOTCONN; |
5c115590 | 1214 | goto out; |
eee2fa6a KCP |
1215 | } else if (namelen != 0) { |
1216 | /* Cannot send to an IPv4 address using an IPv6 source | |
1217 | * address and cannot send to an IPv6 address using an | |
1218 | * IPv4 source address. | |
1219 | */ | |
1220 | if (ipv6_addr_v4mapped(&daddr) ^ | |
1221 | ipv6_addr_v4mapped(&rs->rs_bound_addr)) { | |
1222 | release_sock(sk); | |
1223 | ret = -EOPNOTSUPP; | |
1224 | goto out; | |
1225 | } | |
1e2b44e7 KCP |
1226 | /* If the socket is already bound to a link local address, |
1227 | * it can only send to peers on the same link. But allow | |
1228 | * communicating beween link local and non-link local address. | |
1229 | */ | |
1230 | if (scope_id != rs->rs_bound_scope_id) { | |
1231 | if (!scope_id) { | |
1232 | scope_id = rs->rs_bound_scope_id; | |
1233 | } else if (rs->rs_bound_scope_id) { | |
1234 | release_sock(sk); | |
1235 | ret = -EINVAL; | |
1236 | goto out; | |
1237 | } | |
1238 | } | |
5c115590 | 1239 | } |
8c7188b2 | 1240 | release_sock(sk); |
5c115590 | 1241 | |
f9fb69ad AR |
1242 | ret = rds_rdma_bytes(msg, &rdma_payload_len); |
1243 | if (ret) | |
1244 | goto out; | |
1245 | ||
1246 | total_payload_len += rdma_payload_len; | |
1247 | if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) { | |
1248 | ret = -EMSGSIZE; | |
1249 | goto out; | |
1250 | } | |
1251 | ||
06e8941e MK |
1252 | if (payload_len > rds_sk_sndbuf(rs)) { |
1253 | ret = -EMSGSIZE; | |
1254 | goto out; | |
1255 | } | |
1256 | ||
0cebacce SV |
1257 | if (zcopy) { |
1258 | if (rs->rs_transport->t_type != RDS_TRANS_TCP) { | |
1259 | ret = -EOPNOTSUPP; | |
1260 | goto out; | |
1261 | } | |
1262 | num_sgs = iov_iter_npages(&msg->msg_iter, INT_MAX); | |
1263 | } | |
fc445084 | 1264 | /* size of rm including all sgs */ |
ea010070 | 1265 | ret = rds_rm_size(msg, num_sgs, &vct); |
fc445084 AG |
1266 | if (ret < 0) |
1267 | goto out; | |
1268 | ||
1269 | rm = rds_message_alloc(ret, GFP_KERNEL); | |
1270 | if (!rm) { | |
1271 | ret = -ENOMEM; | |
5c115590 AG |
1272 | goto out; |
1273 | } | |
1274 | ||
372cd7de AG |
1275 | /* Attach data to the rm */ |
1276 | if (payload_len) { | |
7dba9203 JG |
1277 | rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); |
1278 | if (IS_ERR(rm->data.op_sg)) { | |
1279 | ret = PTR_ERR(rm->data.op_sg); | |
d139ff09 | 1280 | goto out; |
7dba9203 | 1281 | } |
0cebacce | 1282 | ret = rds_message_copy_from_user(rm, &msg->msg_iter, zcopy); |
372cd7de AG |
1283 | if (ret) |
1284 | goto out; | |
1285 | } | |
1286 | rm->data.op_active = 1; | |
fc445084 | 1287 | |
5c115590 AG |
1288 | rm->m_daddr = daddr; |
1289 | ||
5c115590 AG |
1290 | /* rds_conn_create has a spinlock that runs with IRQ off. |
1291 | * Caching the conn in the socket helps a lot. */ | |
fd261ce6 SS |
1292 | if (rs->rs_conn && ipv6_addr_equal(&rs->rs_conn->c_faddr, &daddr) && |
1293 | rs->rs_tos == rs->rs_conn->c_tos) { | |
5c115590 | 1294 | conn = rs->rs_conn; |
3eb45036 | 1295 | } else { |
d5a8ac28 | 1296 | conn = rds_conn_create_outgoing(sock_net(sock->sk), |
eee2fa6a | 1297 | &rs->rs_bound_addr, &daddr, |
fd261ce6 | 1298 | rs->rs_transport, rs->rs_tos, |
eee2fa6a KCP |
1299 | sock->sk->sk_allocation, |
1300 | scope_id); | |
5c115590 AG |
1301 | if (IS_ERR(conn)) { |
1302 | ret = PTR_ERR(conn); | |
1303 | goto out; | |
1304 | } | |
1305 | rs->rs_conn = conn; | |
1306 | } | |
1307 | ||
9e630bcb | 1308 | if (conn->c_trans->t_mp_capable) |
9a4890bd | 1309 | cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)]; |
9e630bcb AR |
1310 | else |
1311 | cpath = &conn->c_path[0]; | |
1312 | ||
1313 | rm->m_conn_path = cpath; | |
1314 | ||
49f69691 | 1315 | /* Parse any control messages the user may have included. */ |
ea010070 | 1316 | ret = rds_cmsg_send(rs, rm, msg, &allocated_mr, &vct); |
584a8279 SS |
1317 | if (ret) { |
1318 | /* Trigger connection so that its ready for the next retry */ | |
1319 | if (ret == -EAGAIN) | |
1320 | rds_conn_connect_if_down(conn); | |
49f69691 | 1321 | goto out; |
584a8279 | 1322 | } |
49f69691 | 1323 | |
2c3a5f9a | 1324 | if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { |
cb0a6056 | 1325 | printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", |
f8b3aaf2 | 1326 | &rm->rdma, conn->c_trans->xmit_rdma); |
15133f6e AG |
1327 | ret = -EOPNOTSUPP; |
1328 | goto out; | |
1329 | } | |
1330 | ||
1331 | if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { | |
cb0a6056 | 1332 | printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", |
15133f6e | 1333 | &rm->atomic, conn->c_trans->xmit_atomic); |
5c115590 AG |
1334 | ret = -EOPNOTSUPP; |
1335 | goto out; | |
1336 | } | |
1337 | ||
ebeeb1ad | 1338 | if (rds_destroy_pending(conn)) { |
3db6e0d1 SV |
1339 | ret = -EAGAIN; |
1340 | goto out; | |
1341 | } | |
1342 | ||
9ef845f8 RS |
1343 | if (rds_conn_path_down(cpath)) |
1344 | rds_check_all_paths(conn); | |
5c115590 AG |
1345 | |
1346 | ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); | |
b98ba52f AG |
1347 | if (ret) { |
1348 | rs->rs_seen_congestion = 1; | |
5c115590 | 1349 | goto out; |
b98ba52f | 1350 | } |
780a6d9e | 1351 | while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port, |
5c115590 AG |
1352 | dport, &queued)) { |
1353 | rds_stats_inc(s_send_queue_full); | |
06e8941e | 1354 | |
5c115590 AG |
1355 | if (nonblock) { |
1356 | ret = -EAGAIN; | |
1357 | goto out; | |
1358 | } | |
1359 | ||
aa395145 | 1360 | timeo = wait_event_interruptible_timeout(*sk_sleep(sk), |
780a6d9e | 1361 | rds_send_queue_rm(rs, conn, cpath, rm, |
5c115590 AG |
1362 | rs->rs_bound_port, |
1363 | dport, | |
1364 | &queued), | |
1365 | timeo); | |
1366 | rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); | |
1367 | if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) | |
1368 | continue; | |
1369 | ||
1370 | ret = timeo; | |
1371 | if (ret == 0) | |
1372 | ret = -ETIMEDOUT; | |
1373 | goto out; | |
1374 | } | |
1375 | ||
1376 | /* | |
1377 | * By now we've committed to the send. We reuse rds_send_worker() | |
1378 | * to retry sends in the rds thread if the transport asks us to. | |
1379 | */ | |
1380 | rds_stats_inc(s_send_queued); | |
1381 | ||
1f9ecd7e | 1382 | ret = rds_send_xmit(cpath); |
3db6e0d1 SV |
1383 | if (ret == -ENOMEM || ret == -EAGAIN) { |
1384 | ret = 0; | |
1385 | rcu_read_lock(); | |
ebeeb1ad | 1386 | if (rds_destroy_pending(cpath->cp_conn)) |
3db6e0d1 SV |
1387 | ret = -ENETUNREACH; |
1388 | else | |
1389 | queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); | |
1390 | rcu_read_unlock(); | |
1391 | } | |
1392 | if (ret) | |
1393 | goto out; | |
5c115590 | 1394 | rds_message_put(rm); |
ea010070 | 1395 | |
1396 | for (ind = 0; ind < vct.indx; ind++) | |
1397 | kfree(vct.vec[ind].iov); | |
1398 | kfree(vct.vec); | |
1399 | ||
5c115590 AG |
1400 | return payload_len; |
1401 | ||
1402 | out: | |
ea010070 | 1403 | for (ind = 0; ind < vct.indx; ind++) |
1404 | kfree(vct.vec[ind].iov); | |
1405 | kfree(vct.vec); | |
1406 | ||
5c115590 AG |
1407 | /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. |
1408 | * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN | |
1409 | * or in any other way, we need to destroy the MR again */ | |
1410 | if (allocated_mr) | |
1411 | rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); | |
1412 | ||
1413 | if (rm) | |
1414 | rds_message_put(rm); | |
1415 | return ret; | |
1416 | } | |
1417 | ||
1418 | /* | |
5916e2c1 SV |
1419 | * send out a probe. Can be shared by rds_send_ping, |
1420 | * rds_send_pong, rds_send_hb. | |
1421 | * rds_send_hb should use h_flags | |
1422 | * RDS_FLAG_HB_PING|RDS_FLAG_ACK_REQUIRED | |
1423 | * or | |
1424 | * RDS_FLAG_HB_PONG|RDS_FLAG_ACK_REQUIRED | |
5c115590 | 1425 | */ |
bb789763 | 1426 | static int |
5916e2c1 SV |
1427 | rds_send_probe(struct rds_conn_path *cp, __be16 sport, |
1428 | __be16 dport, u8 h_flags) | |
5c115590 AG |
1429 | { |
1430 | struct rds_message *rm; | |
1431 | unsigned long flags; | |
1432 | int ret = 0; | |
1433 | ||
1434 | rm = rds_message_alloc(0, GFP_ATOMIC); | |
8690bfa1 | 1435 | if (!rm) { |
5c115590 AG |
1436 | ret = -ENOMEM; |
1437 | goto out; | |
1438 | } | |
1439 | ||
45997e9e | 1440 | rm->m_daddr = cp->cp_conn->c_faddr; |
acfcd4d4 | 1441 | rm->data.op_active = 1; |
5c115590 | 1442 | |
3c0a5900 | 1443 | rds_conn_path_connect_if_down(cp); |
5c115590 | 1444 | |
45997e9e | 1445 | ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); |
5c115590 AG |
1446 | if (ret) |
1447 | goto out; | |
1448 | ||
45997e9e SV |
1449 | spin_lock_irqsave(&cp->cp_lock, flags); |
1450 | list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); | |
5c115590 AG |
1451 | set_bit(RDS_MSG_ON_CONN, &rm->m_flags); |
1452 | rds_message_addref(rm); | |
45997e9e SV |
1453 | rm->m_inc.i_conn = cp->cp_conn; |
1454 | rm->m_inc.i_conn_path = cp; | |
5c115590 | 1455 | |
5916e2c1 | 1456 | rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, |
45997e9e | 1457 | cp->cp_next_tx_seq); |
5916e2c1 | 1458 | rm->m_inc.i_hdr.h_flags |= h_flags; |
45997e9e | 1459 | cp->cp_next_tx_seq++; |
5916e2c1 | 1460 | |
00354de5 SV |
1461 | if (RDS_HS_PROBE(be16_to_cpu(sport), be16_to_cpu(dport)) && |
1462 | cp->cp_conn->c_trans->t_mp_capable) { | |
1463 | u16 npaths = cpu_to_be16(RDS_MPATH_WORKERS); | |
1464 | u32 my_gen_num = cpu_to_be32(cp->cp_conn->c_my_gen_num); | |
5916e2c1 SV |
1465 | |
1466 | rds_message_add_extension(&rm->m_inc.i_hdr, | |
1467 | RDS_EXTHDR_NPATHS, &npaths, | |
1468 | sizeof(npaths)); | |
905dd418 SV |
1469 | rds_message_add_extension(&rm->m_inc.i_hdr, |
1470 | RDS_EXTHDR_GEN_NUM, | |
00354de5 | 1471 | &my_gen_num, |
905dd418 | 1472 | sizeof(u32)); |
5916e2c1 | 1473 | } |
45997e9e | 1474 | spin_unlock_irqrestore(&cp->cp_lock, flags); |
5c115590 AG |
1475 | |
1476 | rds_stats_inc(s_send_queued); | |
1477 | rds_stats_inc(s_send_pong); | |
1478 | ||
7b4b0009 | 1479 | /* schedule the send work on rds_wq */ |
3db6e0d1 | 1480 | rcu_read_lock(); |
ebeeb1ad | 1481 | if (!rds_destroy_pending(cp->cp_conn)) |
3db6e0d1 SV |
1482 | queue_delayed_work(rds_wq, &cp->cp_send_w, 1); |
1483 | rcu_read_unlock(); | |
acfcd4d4 | 1484 | |
5c115590 AG |
1485 | rds_message_put(rm); |
1486 | return 0; | |
1487 | ||
1488 | out: | |
1489 | if (rm) | |
1490 | rds_message_put(rm); | |
1491 | return ret; | |
1492 | } | |
5916e2c1 SV |
1493 | |
1494 | int | |
1495 | rds_send_pong(struct rds_conn_path *cp, __be16 dport) | |
1496 | { | |
1497 | return rds_send_probe(cp, 0, dport, 0); | |
1498 | } | |
1499 | ||
69b92b5b SV |
1500 | void |
1501 | rds_send_ping(struct rds_connection *conn, int cp_index) | |
5916e2c1 SV |
1502 | { |
1503 | unsigned long flags; | |
69b92b5b | 1504 | struct rds_conn_path *cp = &conn->c_path[cp_index]; |
5916e2c1 SV |
1505 | |
1506 | spin_lock_irqsave(&cp->cp_lock, flags); | |
1507 | if (conn->c_ping_triggered) { | |
1508 | spin_unlock_irqrestore(&cp->cp_lock, flags); | |
1509 | return; | |
1510 | } | |
1511 | conn->c_ping_triggered = 1; | |
1512 | spin_unlock_irqrestore(&cp->cp_lock, flags); | |
69b92b5b | 1513 | rds_send_probe(cp, cpu_to_be16(RDS_FLAG_PROBE_PORT), 0, 0); |
5916e2c1 | 1514 | } |
69b92b5b | 1515 | EXPORT_SYMBOL_GPL(rds_send_ping); |