]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Request reply cache. This is currently a global cache, but this may |
3 | * change in the future and be a per-client cache. | |
4 | * | |
5 | * This code is heavily inspired by the 44BSD implementation, although | |
6 | * it does things a bit differently. | |
7 | * | |
8 | * Copyright (C) 1995, 1996 Olaf Kirch <[email protected]> | |
9 | */ | |
10 | ||
5a0e3ad6 | 11 | #include <linux/slab.h> |
5976687a | 12 | #include <linux/sunrpc/addr.h> |
0338dd15 | 13 | #include <linux/highmem.h> |
01a7decf | 14 | #include <net/checksum.h> |
5a0e3ad6 | 15 | |
9a74af21 BH |
16 | #include "nfsd.h" |
17 | #include "cache.h" | |
1da177e4 | 18 | |
0338dd15 JL |
19 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
20 | ||
1da177e4 | 21 | #define HASHSIZE 64 |
1da177e4 | 22 | |
fca4217c | 23 | static struct hlist_head * cache_hash; |
1da177e4 | 24 | static struct list_head lru_head; |
8a8bc40d | 25 | static struct kmem_cache *drc_slab; |
0ee0bf7e | 26 | static unsigned int num_drc_entries; |
0338dd15 | 27 | static unsigned int max_drc_entries; |
1da177e4 | 28 | |
fca4217c GB |
29 | /* |
30 | * Calculate the hash index from an XID. | |
31 | */ | |
32 | static inline u32 request_hash(u32 xid) | |
33 | { | |
34 | u32 h = xid; | |
35 | h ^= (xid >> 24); | |
36 | return h & (HASHSIZE-1); | |
37 | } | |
38 | ||
1da177e4 | 39 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
aca8a23d | 40 | static void cache_cleaner_func(struct work_struct *unused); |
b4e7f2c9 JL |
41 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, |
42 | struct shrink_control *sc); | |
43 | ||
44 | struct shrinker nfsd_reply_cache_shrinker = { | |
45 | .shrink = nfsd_reply_cache_shrink, | |
46 | .seeks = 1, | |
47 | }; | |
1da177e4 | 48 | |
fca4217c | 49 | /* |
1da177e4 LT |
50 | * locking for the reply cache: |
51 | * A cache entry is "single use" if c_state == RC_INPROG | |
52 | * Otherwise, it when accessing _prev or _next, the lock must be held. | |
53 | */ | |
54 | static DEFINE_SPINLOCK(cache_lock); | |
aca8a23d | 55 | static DECLARE_DELAYED_WORK(cache_cleaner, cache_cleaner_func); |
1da177e4 | 56 | |
0338dd15 JL |
57 | /* |
58 | * Put a cap on the size of the DRC based on the amount of available | |
59 | * low memory in the machine. | |
60 | * | |
61 | * 64MB: 8192 | |
62 | * 128MB: 11585 | |
63 | * 256MB: 16384 | |
64 | * 512MB: 23170 | |
65 | * 1GB: 32768 | |
66 | * 2GB: 46340 | |
67 | * 4GB: 65536 | |
68 | * 8GB: 92681 | |
69 | * 16GB: 131072 | |
70 | * | |
71 | * ...with a hard cap of 256k entries. In the worst case, each entry will be | |
72 | * ~1k, so the above numbers should give a rough max of the amount of memory | |
73 | * used in k. | |
74 | */ | |
75 | static unsigned int | |
76 | nfsd_cache_size_limit(void) | |
77 | { | |
78 | unsigned int limit; | |
79 | unsigned long low_pages = totalram_pages - totalhigh_pages; | |
80 | ||
81 | limit = (16 * int_sqrt(low_pages)) << (PAGE_SHIFT-10); | |
82 | return min_t(unsigned int, limit, 256*1024); | |
83 | } | |
84 | ||
f09841fd JL |
85 | static struct svc_cacherep * |
86 | nfsd_reply_cache_alloc(void) | |
1da177e4 LT |
87 | { |
88 | struct svc_cacherep *rp; | |
1da177e4 | 89 | |
f09841fd JL |
90 | rp = kmem_cache_alloc(drc_slab, GFP_KERNEL); |
91 | if (rp) { | |
1da177e4 LT |
92 | rp->c_state = RC_UNUSED; |
93 | rp->c_type = RC_NOCACHE; | |
f09841fd | 94 | INIT_LIST_HEAD(&rp->c_lru); |
1da177e4 | 95 | INIT_HLIST_NODE(&rp->c_hash); |
1da177e4 | 96 | } |
f09841fd JL |
97 | return rp; |
98 | } | |
1da177e4 | 99 | |
f09841fd JL |
100 | static void |
101 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | |
102 | { | |
25e6b8b0 | 103 | if (rp->c_type == RC_REPLBUFF) |
f09841fd | 104 | kfree(rp->c_replvec.iov_base); |
0338dd15 | 105 | hlist_del(&rp->c_hash); |
f09841fd | 106 | list_del(&rp->c_lru); |
0ee0bf7e | 107 | --num_drc_entries; |
f09841fd JL |
108 | kmem_cache_free(drc_slab, rp); |
109 | } | |
110 | ||
2c6b691c JL |
111 | static void |
112 | nfsd_reply_cache_free(struct svc_cacherep *rp) | |
113 | { | |
114 | spin_lock(&cache_lock); | |
115 | nfsd_reply_cache_free_locked(rp); | |
116 | spin_unlock(&cache_lock); | |
117 | } | |
118 | ||
f09841fd JL |
119 | int nfsd_reply_cache_init(void) |
120 | { | |
b4e7f2c9 | 121 | register_shrinker(&nfsd_reply_cache_shrinker); |
8a8bc40d JL |
122 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
123 | 0, 0, NULL); | |
124 | if (!drc_slab) | |
125 | goto out_nomem; | |
126 | ||
0338dd15 | 127 | cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); |
fca4217c | 128 | if (!cache_hash) |
d5c3428b | 129 | goto out_nomem; |
1da177e4 | 130 | |
0338dd15 JL |
131 | INIT_LIST_HEAD(&lru_head); |
132 | max_drc_entries = nfsd_cache_size_limit(); | |
133 | num_drc_entries = 0; | |
01a7decf | 134 | |
d5c3428b BF |
135 | return 0; |
136 | out_nomem: | |
137 | printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); | |
138 | nfsd_reply_cache_shutdown(); | |
139 | return -ENOMEM; | |
1da177e4 LT |
140 | } |
141 | ||
d5c3428b | 142 | void nfsd_reply_cache_shutdown(void) |
1da177e4 LT |
143 | { |
144 | struct svc_cacherep *rp; | |
145 | ||
b4e7f2c9 | 146 | unregister_shrinker(&nfsd_reply_cache_shrinker); |
aca8a23d JL |
147 | cancel_delayed_work_sync(&cache_cleaner); |
148 | ||
1da177e4 LT |
149 | while (!list_empty(&lru_head)) { |
150 | rp = list_entry(lru_head.next, struct svc_cacherep, c_lru); | |
f09841fd | 151 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
152 | } |
153 | ||
fca4217c GB |
154 | kfree (cache_hash); |
155 | cache_hash = NULL; | |
8a8bc40d JL |
156 | |
157 | if (drc_slab) { | |
158 | kmem_cache_destroy(drc_slab); | |
159 | drc_slab = NULL; | |
160 | } | |
1da177e4 LT |
161 | } |
162 | ||
163 | /* | |
aca8a23d JL |
164 | * Move cache entry to end of LRU list, and queue the cleaner to run if it's |
165 | * not already scheduled. | |
1da177e4 LT |
166 | */ |
167 | static void | |
168 | lru_put_end(struct svc_cacherep *rp) | |
169 | { | |
56c2548b | 170 | rp->c_timestamp = jiffies; |
f116629d | 171 | list_move_tail(&rp->c_lru, &lru_head); |
aca8a23d | 172 | schedule_delayed_work(&cache_cleaner, RC_EXPIRE); |
1da177e4 LT |
173 | } |
174 | ||
175 | /* | |
176 | * Move a cache entry from one hash list to another | |
177 | */ | |
178 | static void | |
179 | hash_refile(struct svc_cacherep *rp) | |
180 | { | |
181 | hlist_del_init(&rp->c_hash); | |
fca4217c | 182 | hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); |
1da177e4 LT |
183 | } |
184 | ||
d1a0774d JL |
185 | static inline bool |
186 | nfsd_cache_entry_expired(struct svc_cacherep *rp) | |
187 | { | |
188 | return rp->c_state != RC_INPROG && | |
189 | time_after(jiffies, rp->c_timestamp + RC_EXPIRE); | |
190 | } | |
191 | ||
aca8a23d JL |
192 | /* |
193 | * Walk the LRU list and prune off entries that are older than RC_EXPIRE. | |
194 | * Also prune the oldest ones when the total exceeds the max number of entries. | |
195 | */ | |
196 | static void | |
197 | prune_cache_entries(void) | |
198 | { | |
199 | struct svc_cacherep *rp, *tmp; | |
200 | ||
201 | list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { | |
202 | if (!nfsd_cache_entry_expired(rp) && | |
203 | num_drc_entries <= max_drc_entries) | |
204 | break; | |
205 | nfsd_reply_cache_free_locked(rp); | |
206 | } | |
207 | ||
208 | /* | |
209 | * Conditionally rearm the job. If we cleaned out the list, then | |
210 | * cancel any pending run (since there won't be any work to do). | |
211 | * Otherwise, we rearm the job or modify the existing one to run in | |
212 | * RC_EXPIRE since we just ran the pruner. | |
213 | */ | |
214 | if (list_empty(&lru_head)) | |
215 | cancel_delayed_work(&cache_cleaner); | |
216 | else | |
217 | mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); | |
218 | } | |
219 | ||
220 | static void | |
221 | cache_cleaner_func(struct work_struct *unused) | |
222 | { | |
223 | spin_lock(&cache_lock); | |
224 | prune_cache_entries(); | |
225 | spin_unlock(&cache_lock); | |
226 | } | |
227 | ||
b4e7f2c9 JL |
228 | static int |
229 | nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) | |
230 | { | |
231 | unsigned int num; | |
232 | ||
233 | spin_lock(&cache_lock); | |
234 | if (sc->nr_to_scan) | |
235 | prune_cache_entries(); | |
236 | num = num_drc_entries; | |
237 | spin_unlock(&cache_lock); | |
238 | ||
239 | return num; | |
240 | } | |
241 | ||
01a7decf JL |
242 | /* |
243 | * Walk an xdr_buf and get a CRC for at most the first RC_CSUMLEN bytes | |
244 | */ | |
245 | static __wsum | |
246 | nfsd_cache_csum(struct svc_rqst *rqstp) | |
247 | { | |
248 | int idx; | |
249 | unsigned int base; | |
250 | __wsum csum; | |
251 | struct xdr_buf *buf = &rqstp->rq_arg; | |
252 | const unsigned char *p = buf->head[0].iov_base; | |
253 | size_t csum_len = min_t(size_t, buf->head[0].iov_len + buf->page_len, | |
254 | RC_CSUMLEN); | |
255 | size_t len = min(buf->head[0].iov_len, csum_len); | |
256 | ||
257 | /* rq_arg.head first */ | |
258 | csum = csum_partial(p, len, 0); | |
259 | csum_len -= len; | |
260 | ||
261 | /* Continue into page array */ | |
262 | idx = buf->page_base / PAGE_SIZE; | |
263 | base = buf->page_base & ~PAGE_MASK; | |
264 | while (csum_len) { | |
265 | p = page_address(buf->pages[idx]) + base; | |
56edc86b | 266 | len = min_t(size_t, PAGE_SIZE - base, csum_len); |
01a7decf JL |
267 | csum = csum_partial(p, len, csum); |
268 | csum_len -= len; | |
269 | base = 0; | |
270 | ++idx; | |
271 | } | |
272 | return csum; | |
273 | } | |
274 | ||
a4a3ec32 JL |
275 | /* |
276 | * Search the request hash for an entry that matches the given rqstp. | |
277 | * Must be called with cache_lock held. Returns the found entry or | |
278 | * NULL on failure. | |
279 | */ | |
280 | static struct svc_cacherep * | |
01a7decf | 281 | nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) |
a4a3ec32 JL |
282 | { |
283 | struct svc_cacherep *rp; | |
a4a3ec32 JL |
284 | struct hlist_head *rh; |
285 | __be32 xid = rqstp->rq_xid; | |
286 | u32 proto = rqstp->rq_prot, | |
287 | vers = rqstp->rq_vers, | |
288 | proc = rqstp->rq_proc; | |
289 | ||
290 | rh = &cache_hash[request_hash(xid)]; | |
b6669737 | 291 | hlist_for_each_entry(rp, rh, c_hash) { |
2c6b691c | 292 | if (xid == rp->c_xid && proc == rp->c_proc && |
a4a3ec32 | 293 | proto == rp->c_prot && vers == rp->c_vers && |
01a7decf | 294 | rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum && |
a4a3ec32 JL |
295 | rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && |
296 | rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) | |
297 | return rp; | |
298 | } | |
299 | return NULL; | |
300 | } | |
301 | ||
1da177e4 LT |
302 | /* |
303 | * Try to find an entry matching the current call in the cache. When none | |
1ac83629 JL |
304 | * is found, we try to grab the oldest expired entry off the LRU list. If |
305 | * a suitable one isn't there, then drop the cache_lock and allocate a | |
306 | * new one, then search again in case one got inserted while this thread | |
307 | * didn't hold the lock. | |
1da177e4 LT |
308 | */ |
309 | int | |
1091006c | 310 | nfsd_cache_lookup(struct svc_rqst *rqstp) |
1da177e4 | 311 | { |
0338dd15 | 312 | struct svc_cacherep *rp, *found; |
c7afef1f AV |
313 | __be32 xid = rqstp->rq_xid; |
314 | u32 proto = rqstp->rq_prot, | |
1da177e4 LT |
315 | vers = rqstp->rq_vers, |
316 | proc = rqstp->rq_proc; | |
01a7decf | 317 | __wsum csum; |
1da177e4 | 318 | unsigned long age; |
1091006c | 319 | int type = rqstp->rq_cachetype; |
1da177e4 LT |
320 | int rtn; |
321 | ||
322 | rqstp->rq_cacherep = NULL; | |
13cc8a78 | 323 | if (type == RC_NOCACHE) { |
1da177e4 LT |
324 | nfsdstats.rcnocache++; |
325 | return RC_DOIT; | |
326 | } | |
327 | ||
01a7decf JL |
328 | csum = nfsd_cache_csum(rqstp); |
329 | ||
1da177e4 LT |
330 | spin_lock(&cache_lock); |
331 | rtn = RC_DOIT; | |
332 | ||
01a7decf | 333 | rp = nfsd_cache_search(rqstp, csum); |
0338dd15 | 334 | if (rp) |
a4a3ec32 | 335 | goto found_entry; |
0338dd15 JL |
336 | |
337 | /* Try to use the first entry on the LRU */ | |
338 | if (!list_empty(&lru_head)) { | |
339 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); | |
340 | if (nfsd_cache_entry_expired(rp) || | |
aca8a23d JL |
341 | num_drc_entries >= max_drc_entries) { |
342 | lru_put_end(rp); | |
343 | prune_cache_entries(); | |
0338dd15 | 344 | goto setup_entry; |
1da177e4 LT |
345 | } |
346 | } | |
1da177e4 | 347 | |
1ac83629 | 348 | /* Drop the lock and allocate a new entry */ |
0338dd15 JL |
349 | spin_unlock(&cache_lock); |
350 | rp = nfsd_reply_cache_alloc(); | |
351 | if (!rp) { | |
352 | dprintk("nfsd: unable to allocate DRC entry!\n"); | |
353 | return RC_DOIT; | |
1da177e4 | 354 | } |
0338dd15 JL |
355 | spin_lock(&cache_lock); |
356 | ++num_drc_entries; | |
357 | ||
358 | /* | |
359 | * Must search again just in case someone inserted one | |
360 | * after we dropped the lock above. | |
361 | */ | |
01a7decf | 362 | found = nfsd_cache_search(rqstp, csum); |
0338dd15 JL |
363 | if (found) { |
364 | nfsd_reply_cache_free_locked(rp); | |
365 | rp = found; | |
366 | goto found_entry; | |
1da177e4 LT |
367 | } |
368 | ||
0338dd15 JL |
369 | /* |
370 | * We're keeping the one we just allocated. Are we now over the | |
371 | * limit? Prune one off the tip of the LRU in trade for the one we | |
372 | * just allocated if so. | |
373 | */ | |
374 | if (num_drc_entries >= max_drc_entries) | |
375 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, | |
376 | struct svc_cacherep, c_lru)); | |
1da177e4 | 377 | |
0338dd15 JL |
378 | setup_entry: |
379 | nfsdstats.rcmisses++; | |
1da177e4 LT |
380 | rqstp->rq_cacherep = rp; |
381 | rp->c_state = RC_INPROG; | |
382 | rp->c_xid = xid; | |
383 | rp->c_proc = proc; | |
7b9e8522 JL |
384 | rpc_copy_addr((struct sockaddr *)&rp->c_addr, svc_addr(rqstp)); |
385 | rpc_set_port((struct sockaddr *)&rp->c_addr, rpc_get_port(svc_addr(rqstp))); | |
1da177e4 LT |
386 | rp->c_prot = proto; |
387 | rp->c_vers = vers; | |
01a7decf JL |
388 | rp->c_len = rqstp->rq_arg.len; |
389 | rp->c_csum = csum; | |
1da177e4 LT |
390 | |
391 | hash_refile(rp); | |
56c2548b | 392 | lru_put_end(rp); |
1da177e4 LT |
393 | |
394 | /* release any buffer */ | |
395 | if (rp->c_type == RC_REPLBUFF) { | |
396 | kfree(rp->c_replvec.iov_base); | |
397 | rp->c_replvec.iov_base = NULL; | |
398 | } | |
399 | rp->c_type = RC_NOCACHE; | |
400 | out: | |
401 | spin_unlock(&cache_lock); | |
402 | return rtn; | |
403 | ||
404 | found_entry: | |
0338dd15 | 405 | nfsdstats.rchits++; |
1da177e4 LT |
406 | /* We found a matching entry which is either in progress or done. */ |
407 | age = jiffies - rp->c_timestamp; | |
1da177e4 LT |
408 | lru_put_end(rp); |
409 | ||
410 | rtn = RC_DROPIT; | |
411 | /* Request being processed or excessive rexmits */ | |
412 | if (rp->c_state == RC_INPROG || age < RC_DELAY) | |
413 | goto out; | |
414 | ||
415 | /* From the hall of fame of impractical attacks: | |
416 | * Is this a user who tries to snoop on the cache? */ | |
417 | rtn = RC_DOIT; | |
418 | if (!rqstp->rq_secure && rp->c_secure) | |
419 | goto out; | |
420 | ||
421 | /* Compose RPC reply header */ | |
422 | switch (rp->c_type) { | |
423 | case RC_NOCACHE: | |
424 | break; | |
425 | case RC_REPLSTAT: | |
426 | svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat); | |
427 | rtn = RC_REPLY; | |
428 | break; | |
429 | case RC_REPLBUFF: | |
430 | if (!nfsd_cache_append(rqstp, &rp->c_replvec)) | |
431 | goto out; /* should not happen */ | |
432 | rtn = RC_REPLY; | |
433 | break; | |
434 | default: | |
435 | printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type); | |
0338dd15 | 436 | nfsd_reply_cache_free_locked(rp); |
1da177e4 LT |
437 | } |
438 | ||
439 | goto out; | |
440 | } | |
441 | ||
442 | /* | |
443 | * Update a cache entry. This is called from nfsd_dispatch when | |
444 | * the procedure has been executed and the complete reply is in | |
445 | * rqstp->rq_res. | |
446 | * | |
447 | * We're copying around data here rather than swapping buffers because | |
448 | * the toplevel loop requires max-sized buffers, which would be a waste | |
449 | * of memory for a cache with a max reply size of 100 bytes (diropokres). | |
450 | * | |
451 | * If we should start to use different types of cache entries tailored | |
452 | * specifically for attrstat and fh's, we may save even more space. | |
453 | * | |
454 | * Also note that a cachetype of RC_NOCACHE can legally be passed when | |
455 | * nfsd failed to encode a reply that otherwise would have been cached. | |
456 | * In this case, nfsd_cache_update is called with statp == NULL. | |
457 | */ | |
458 | void | |
c7afef1f | 459 | nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) |
1da177e4 | 460 | { |
13cc8a78 | 461 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
1da177e4 LT |
462 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
463 | int len; | |
464 | ||
13cc8a78 | 465 | if (!rp) |
1da177e4 LT |
466 | return; |
467 | ||
468 | len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); | |
469 | len >>= 2; | |
fca4217c | 470 | |
1da177e4 LT |
471 | /* Don't cache excessive amounts of data and XDR failures */ |
472 | if (!statp || len > (256 >> 2)) { | |
2c6b691c | 473 | nfsd_reply_cache_free(rp); |
1da177e4 LT |
474 | return; |
475 | } | |
476 | ||
477 | switch (cachetype) { | |
478 | case RC_REPLSTAT: | |
479 | if (len != 1) | |
480 | printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); | |
481 | rp->c_replstat = *statp; | |
482 | break; | |
483 | case RC_REPLBUFF: | |
484 | cachv = &rp->c_replvec; | |
485 | cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); | |
486 | if (!cachv->iov_base) { | |
2c6b691c | 487 | nfsd_reply_cache_free(rp); |
1da177e4 LT |
488 | return; |
489 | } | |
490 | cachv->iov_len = len << 2; | |
491 | memcpy(cachv->iov_base, statp, len << 2); | |
492 | break; | |
2c6b691c JL |
493 | case RC_NOCACHE: |
494 | nfsd_reply_cache_free(rp); | |
495 | return; | |
1da177e4 LT |
496 | } |
497 | spin_lock(&cache_lock); | |
498 | lru_put_end(rp); | |
499 | rp->c_secure = rqstp->rq_secure; | |
500 | rp->c_type = cachetype; | |
501 | rp->c_state = RC_DONE; | |
1da177e4 LT |
502 | spin_unlock(&cache_lock); |
503 | return; | |
504 | } | |
505 | ||
506 | /* | |
507 | * Copy cached reply to current reply buffer. Should always fit. | |
508 | * FIXME as reply is in a page, we should just attach the page, and | |
509 | * keep a refcount.... | |
510 | */ | |
511 | static int | |
512 | nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |
513 | { | |
514 | struct kvec *vec = &rqstp->rq_res.head[0]; | |
515 | ||
516 | if (vec->iov_len + data->iov_len > PAGE_SIZE) { | |
517 | printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", | |
518 | data->iov_len); | |
519 | return 0; | |
520 | } | |
521 | memcpy((char*)vec->iov_base + vec->iov_len, data->iov_base, data->iov_len); | |
522 | vec->iov_len += data->iov_len; | |
523 | return 1; | |
524 | } |