]> Git Repo - linux.git/blob - net/rds/message.c
rds: support for zcopy completion notification
[linux.git] / net / rds / message.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/export.h>
36 #include <linux/skbuff.h>
37 #include <linux/list.h>
38 #include <linux/errqueue.h>
39
40 #include "rds.h"
41
42 static unsigned int     rds_exthdr_size[__RDS_EXTHDR_MAX] = {
43 [RDS_EXTHDR_NONE]       = 0,
44 [RDS_EXTHDR_VERSION]    = sizeof(struct rds_ext_header_version),
45 [RDS_EXTHDR_RDMA]       = sizeof(struct rds_ext_header_rdma),
46 [RDS_EXTHDR_RDMA_DEST]  = sizeof(struct rds_ext_header_rdma_dest),
47 [RDS_EXTHDR_NPATHS]     = sizeof(u16),
48 [RDS_EXTHDR_GEN_NUM]    = sizeof(u32),
49 };
50
51
52 void rds_message_addref(struct rds_message *rm)
53 {
54         rdsdebug("addref rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
55         refcount_inc(&rm->m_refcount);
56 }
57 EXPORT_SYMBOL_GPL(rds_message_addref);
58
59 static inline bool skb_zcookie_add(struct sk_buff *skb, u32 cookie)
60 {
61         struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
62         int ncookies;
63         u32 *ptr;
64
65         if (serr->ee.ee_origin != SO_EE_ORIGIN_ZCOOKIE)
66                 return false;
67         ncookies = serr->ee.ee_data;
68         if (ncookies == SO_EE_ORIGIN_MAX_ZCOOKIES)
69                 return false;
70         ptr = skb_put(skb, sizeof(u32));
71         *ptr = cookie;
72         serr->ee.ee_data = ++ncookies;
73         return true;
74 }
75
76 static void rds_rm_zerocopy_callback(struct rds_sock *rs,
77                                      struct rds_znotifier *znotif)
78 {
79         struct sock *sk = rds_rs_to_sk(rs);
80         struct sk_buff *skb, *tail;
81         struct sock_exterr_skb *serr;
82         unsigned long flags;
83         struct sk_buff_head *q;
84         u32 cookie = znotif->z_cookie;
85
86         q = &sk->sk_error_queue;
87         spin_lock_irqsave(&q->lock, flags);
88         tail = skb_peek_tail(q);
89
90         if (tail && skb_zcookie_add(tail, cookie)) {
91                 spin_unlock_irqrestore(&q->lock, flags);
92                 mm_unaccount_pinned_pages(&znotif->z_mmp);
93                 consume_skb(rds_skb_from_znotifier(znotif));
94                 sk->sk_error_report(sk);
95                 return;
96         }
97
98         skb = rds_skb_from_znotifier(znotif);
99         serr = SKB_EXT_ERR(skb);
100         memset(&serr->ee, 0, sizeof(serr->ee));
101         serr->ee.ee_errno = 0;
102         serr->ee.ee_origin = SO_EE_ORIGIN_ZCOOKIE;
103         serr->ee.ee_info = 0;
104         WARN_ON(!skb_zcookie_add(skb, cookie));
105
106         __skb_queue_tail(q, skb);
107
108         spin_unlock_irqrestore(&q->lock, flags);
109         sk->sk_error_report(sk);
110
111         mm_unaccount_pinned_pages(&znotif->z_mmp);
112 }
113
114 /*
115  * This relies on dma_map_sg() not touching sg[].page during merging.
116  */
117 static void rds_message_purge(struct rds_message *rm)
118 {
119         unsigned long i, flags;
120         bool zcopy = false;
121
122         if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
123                 return;
124
125         spin_lock_irqsave(&rm->m_rs_lock, flags);
126         if (rm->m_rs) {
127                 struct rds_sock *rs = rm->m_rs;
128
129                 if (rm->data.op_mmp_znotifier) {
130                         zcopy = true;
131                         rds_rm_zerocopy_callback(rs, rm->data.op_mmp_znotifier);
132                         rm->data.op_mmp_znotifier = NULL;
133                 }
134                 sock_put(rds_rs_to_sk(rs));
135                 rm->m_rs = NULL;
136         }
137         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
138
139         for (i = 0; i < rm->data.op_nents; i++) {
140                 /* XXX will have to put_page for page refs */
141                 if (!zcopy)
142                         __free_page(sg_page(&rm->data.op_sg[i]));
143                 else
144                         put_page(sg_page(&rm->data.op_sg[i]));
145         }
146         rm->data.op_nents = 0;
147
148         if (rm->rdma.op_active)
149                 rds_rdma_free_op(&rm->rdma);
150         if (rm->rdma.op_rdma_mr)
151                 rds_mr_put(rm->rdma.op_rdma_mr);
152
153         if (rm->atomic.op_active)
154                 rds_atomic_free_op(&rm->atomic);
155         if (rm->atomic.op_rdma_mr)
156                 rds_mr_put(rm->atomic.op_rdma_mr);
157 }
158
159 void rds_message_put(struct rds_message *rm)
160 {
161         rdsdebug("put rm %p ref %d\n", rm, refcount_read(&rm->m_refcount));
162         WARN(!refcount_read(&rm->m_refcount), "danger refcount zero on %p\n", rm);
163         if (refcount_dec_and_test(&rm->m_refcount)) {
164                 BUG_ON(!list_empty(&rm->m_sock_item));
165                 BUG_ON(!list_empty(&rm->m_conn_item));
166                 rds_message_purge(rm);
167
168                 kfree(rm);
169         }
170 }
171 EXPORT_SYMBOL_GPL(rds_message_put);
172
173 void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
174                                  __be16 dport, u64 seq)
175 {
176         hdr->h_flags = 0;
177         hdr->h_sport = sport;
178         hdr->h_dport = dport;
179         hdr->h_sequence = cpu_to_be64(seq);
180         hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
181 }
182 EXPORT_SYMBOL_GPL(rds_message_populate_header);
183
184 int rds_message_add_extension(struct rds_header *hdr, unsigned int type,
185                               const void *data, unsigned int len)
186 {
187         unsigned int ext_len = sizeof(u8) + len;
188         unsigned char *dst;
189
190         /* For now, refuse to add more than one extension header */
191         if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
192                 return 0;
193
194         if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
195                 return 0;
196
197         if (ext_len >= RDS_HEADER_EXT_SPACE)
198                 return 0;
199         dst = hdr->h_exthdr;
200
201         *dst++ = type;
202         memcpy(dst, data, len);
203
204         dst[len] = RDS_EXTHDR_NONE;
205         return 1;
206 }
207 EXPORT_SYMBOL_GPL(rds_message_add_extension);
208
209 /*
210  * If a message has extension headers, retrieve them here.
211  * Call like this:
212  *
213  * unsigned int pos = 0;
214  *
215  * while (1) {
216  *      buflen = sizeof(buffer);
217  *      type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
218  *      if (type == RDS_EXTHDR_NONE)
219  *              break;
220  *      ...
221  * }
222  */
223 int rds_message_next_extension(struct rds_header *hdr,
224                 unsigned int *pos, void *buf, unsigned int *buflen)
225 {
226         unsigned int offset, ext_type, ext_len;
227         u8 *src = hdr->h_exthdr;
228
229         offset = *pos;
230         if (offset >= RDS_HEADER_EXT_SPACE)
231                 goto none;
232
233         /* Get the extension type and length. For now, the
234          * length is implied by the extension type. */
235         ext_type = src[offset++];
236
237         if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
238                 goto none;
239         ext_len = rds_exthdr_size[ext_type];
240         if (offset + ext_len > RDS_HEADER_EXT_SPACE)
241                 goto none;
242
243         *pos = offset + ext_len;
244         if (ext_len < *buflen)
245                 *buflen = ext_len;
246         memcpy(buf, src + offset, *buflen);
247         return ext_type;
248
249 none:
250         *pos = RDS_HEADER_EXT_SPACE;
251         *buflen = 0;
252         return RDS_EXTHDR_NONE;
253 }
254
255 int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
256 {
257         struct rds_ext_header_rdma_dest ext_hdr;
258
259         ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
260         ext_hdr.h_rdma_offset = cpu_to_be32(offset);
261         return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
262 }
263 EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
264
265 /*
266  * Each rds_message is allocated with extra space for the scatterlist entries
267  * rds ops will need. This is to minimize memory allocation count. Then, each rds op
268  * can grab SGs when initializing its part of the rds_message.
269  */
270 struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
271 {
272         struct rds_message *rm;
273
274         if (extra_len > KMALLOC_MAX_SIZE - sizeof(struct rds_message))
275                 return NULL;
276
277         rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
278         if (!rm)
279                 goto out;
280
281         rm->m_used_sgs = 0;
282         rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
283
284         refcount_set(&rm->m_refcount, 1);
285         INIT_LIST_HEAD(&rm->m_sock_item);
286         INIT_LIST_HEAD(&rm->m_conn_item);
287         spin_lock_init(&rm->m_rs_lock);
288         init_waitqueue_head(&rm->m_flush_wait);
289
290 out:
291         return rm;
292 }
293
294 /*
295  * RDS ops use this to grab SG entries from the rm's sg pool.
296  */
297 struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
298 {
299         struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
300         struct scatterlist *sg_ret;
301
302         WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
303         WARN_ON(!nents);
304
305         if (rm->m_used_sgs + nents > rm->m_total_sgs)
306                 return NULL;
307
308         sg_ret = &sg_first[rm->m_used_sgs];
309         sg_init_table(sg_ret, nents);
310         rm->m_used_sgs += nents;
311
312         return sg_ret;
313 }
314
315 struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
316 {
317         struct rds_message *rm;
318         unsigned int i;
319         int num_sgs = ceil(total_len, PAGE_SIZE);
320         int extra_bytes = num_sgs * sizeof(struct scatterlist);
321
322         rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
323         if (!rm)
324                 return ERR_PTR(-ENOMEM);
325
326         set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
327         rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
328         rm->data.op_nents = ceil(total_len, PAGE_SIZE);
329         rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
330         if (!rm->data.op_sg) {
331                 rds_message_put(rm);
332                 return ERR_PTR(-ENOMEM);
333         }
334
335         for (i = 0; i < rm->data.op_nents; ++i) {
336                 sg_set_page(&rm->data.op_sg[i],
337                                 virt_to_page(page_addrs[i]),
338                                 PAGE_SIZE, 0);
339         }
340
341         return rm;
342 }
343
344 int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
345 {
346         unsigned long to_copy, nbytes;
347         unsigned long sg_off;
348         struct scatterlist *sg;
349         int ret = 0;
350
351         rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));
352
353         /*
354          * now allocate and copy in the data payload.
355          */
356         sg = rm->data.op_sg;
357         sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
358
359         while (iov_iter_count(from)) {
360                 if (!sg_page(sg)) {
361                         ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
362                                                        GFP_HIGHUSER);
363                         if (ret)
364                                 return ret;
365                         rm->data.op_nents++;
366                         sg_off = 0;
367                 }
368
369                 to_copy = min_t(unsigned long, iov_iter_count(from),
370                                 sg->length - sg_off);
371
372                 rds_stats_add(s_copy_from_user, to_copy);
373                 nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
374                                              to_copy, from);
375                 if (nbytes != to_copy)
376                         return -EFAULT;
377
378                 sg_off += to_copy;
379
380                 if (sg_off == sg->length)
381                         sg++;
382         }
383
384         return ret;
385 }
386
387 int rds_message_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to)
388 {
389         struct rds_message *rm;
390         struct scatterlist *sg;
391         unsigned long to_copy;
392         unsigned long vec_off;
393         int copied;
394         int ret;
395         u32 len;
396
397         rm = container_of(inc, struct rds_message, m_inc);
398         len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
399
400         sg = rm->data.op_sg;
401         vec_off = 0;
402         copied = 0;
403
404         while (iov_iter_count(to) && copied < len) {
405                 to_copy = min_t(unsigned long, iov_iter_count(to),
406                                 sg->length - vec_off);
407                 to_copy = min_t(unsigned long, to_copy, len - copied);
408
409                 rds_stats_add(s_copy_to_user, to_copy);
410                 ret = copy_page_to_iter(sg_page(sg), sg->offset + vec_off,
411                                         to_copy, to);
412                 if (ret != to_copy)
413                         return -EFAULT;
414
415                 vec_off += to_copy;
416                 copied += to_copy;
417
418                 if (vec_off == sg->length) {
419                         vec_off = 0;
420                         sg++;
421                 }
422         }
423
424         return copied;
425 }
426
427 /*
428  * If the message is still on the send queue, wait until the transport
429  * is done with it. This is particularly important for RDMA operations.
430  */
431 void rds_message_wait(struct rds_message *rm)
432 {
433         wait_event_interruptible(rm->m_flush_wait,
434                         !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
435 }
436
437 void rds_message_unmapped(struct rds_message *rm)
438 {
439         clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
440         wake_up_interruptible(&rm->m_flush_wait);
441 }
442 EXPORT_SYMBOL_GPL(rds_message_unmapped);
443
This page took 0.055916 seconds and 4 git commands to generate.