]>
Commit | Line | Data |
---|---|---|
7875e18e AG |
1 | /* |
2 | * Copyright (c) 2006 Oracle. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | * | |
32 | */ | |
33 | #include <linux/highmem.h> | |
5a0e3ad6 | 34 | #include <linux/gfp.h> |
7875e18e AG |
35 | |
36 | #include "rds.h" | |
37 | ||
38 | struct rds_page_remainder { | |
39 | struct page *r_page; | |
40 | unsigned long r_offset; | |
41 | }; | |
42 | ||
b9bf3121 | 43 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders); |
7875e18e AG |
44 | |
45 | /* | |
46 | * returns 0 on success or -errno on failure. | |
47 | * | |
48 | * We don't have to worry about flush_dcache_page() as this only works | |
49 | * with private pages. If, say, we were to do directed receive to pinned | |
50 | * user pages we'd have to worry more about cache coherence. (Though | |
51 | * the flush_dcache_page() in get_user_pages() would probably be enough). | |
52 | */ | |
53 | int rds_page_copy_user(struct page *page, unsigned long offset, | |
54 | void __user *ptr, unsigned long bytes, | |
55 | int to_user) | |
56 | { | |
57 | unsigned long ret; | |
58 | void *addr; | |
59 | ||
60 | if (to_user) | |
61 | rds_stats_add(s_copy_to_user, bytes); | |
62 | else | |
63 | rds_stats_add(s_copy_from_user, bytes); | |
64 | ||
65 | addr = kmap_atomic(page, KM_USER0); | |
66 | if (to_user) | |
67 | ret = __copy_to_user_inatomic(ptr, addr + offset, bytes); | |
68 | else | |
69 | ret = __copy_from_user_inatomic(addr + offset, ptr, bytes); | |
70 | kunmap_atomic(addr, KM_USER0); | |
71 | ||
72 | if (ret) { | |
73 | addr = kmap(page); | |
74 | if (to_user) | |
75 | ret = copy_to_user(ptr, addr + offset, bytes); | |
76 | else | |
77 | ret = copy_from_user(addr + offset, ptr, bytes); | |
78 | kunmap(page); | |
79 | if (ret) | |
80 | return -EFAULT; | |
81 | } | |
82 | ||
83 | return 0; | |
84 | } | |
616b757a | 85 | EXPORT_SYMBOL_GPL(rds_page_copy_user); |
7875e18e AG |
86 | |
87 | /* | |
88 | * Message allocation uses this to build up regions of a message. | |
89 | * | |
90 | * @bytes - the number of bytes needed. | |
91 | * @gfp - the waiting behaviour of the allocation | |
92 | * | |
93 | * @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to | |
94 | * kmap the pages, etc. | |
95 | * | |
96 | * If @bytes is at least a full page then this just returns a page from | |
97 | * alloc_page(). | |
98 | * | |
99 | * If @bytes is a partial page then this stores the unused region of the | |
100 | * page in a per-cpu structure. Future partial-page allocations may be | |
101 | * satisfied from that cached region. This lets us waste less memory on | |
102 | * small allocations with minimal complexity. It works because the transmit | |
103 | * path passes read-only page regions down to devices. They hold a page | |
104 | * reference until they are done with the region. | |
105 | */ | |
106 | int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, | |
107 | gfp_t gfp) | |
108 | { | |
109 | struct rds_page_remainder *rem; | |
110 | unsigned long flags; | |
111 | struct page *page; | |
112 | int ret; | |
113 | ||
114 | gfp |= __GFP_HIGHMEM; | |
115 | ||
116 | /* jump straight to allocation if we're trying for a huge page */ | |
117 | if (bytes >= PAGE_SIZE) { | |
118 | page = alloc_page(gfp); | |
119 | if (page == NULL) { | |
120 | ret = -ENOMEM; | |
121 | } else { | |
122 | sg_set_page(scat, page, PAGE_SIZE, 0); | |
123 | ret = 0; | |
124 | } | |
125 | goto out; | |
126 | } | |
127 | ||
128 | rem = &per_cpu(rds_page_remainders, get_cpu()); | |
129 | local_irq_save(flags); | |
130 | ||
131 | while (1) { | |
132 | /* avoid a tiny region getting stuck by tossing it */ | |
133 | if (rem->r_page && bytes > (PAGE_SIZE - rem->r_offset)) { | |
134 | rds_stats_inc(s_page_remainder_miss); | |
135 | __free_page(rem->r_page); | |
136 | rem->r_page = NULL; | |
137 | } | |
138 | ||
139 | /* hand out a fragment from the cached page */ | |
140 | if (rem->r_page && bytes <= (PAGE_SIZE - rem->r_offset)) { | |
141 | sg_set_page(scat, rem->r_page, bytes, rem->r_offset); | |
142 | get_page(sg_page(scat)); | |
143 | ||
144 | if (rem->r_offset != 0) | |
145 | rds_stats_inc(s_page_remainder_hit); | |
146 | ||
147 | rem->r_offset += bytes; | |
148 | if (rem->r_offset == PAGE_SIZE) { | |
149 | __free_page(rem->r_page); | |
150 | rem->r_page = NULL; | |
151 | } | |
152 | ret = 0; | |
153 | break; | |
154 | } | |
155 | ||
156 | /* alloc if there is nothing for us to use */ | |
157 | local_irq_restore(flags); | |
158 | put_cpu(); | |
159 | ||
160 | page = alloc_page(gfp); | |
161 | ||
162 | rem = &per_cpu(rds_page_remainders, get_cpu()); | |
163 | local_irq_save(flags); | |
164 | ||
165 | if (page == NULL) { | |
166 | ret = -ENOMEM; | |
167 | break; | |
168 | } | |
169 | ||
170 | /* did someone race to fill the remainder before us? */ | |
171 | if (rem->r_page) { | |
172 | __free_page(page); | |
173 | continue; | |
174 | } | |
175 | ||
176 | /* otherwise install our page and loop around to alloc */ | |
177 | rem->r_page = page; | |
178 | rem->r_offset = 0; | |
179 | } | |
180 | ||
181 | local_irq_restore(flags); | |
182 | put_cpu(); | |
183 | out: | |
184 | rdsdebug("bytes %lu ret %d %p %u %u\n", bytes, ret, | |
185 | ret ? NULL : sg_page(scat), ret ? 0 : scat->offset, | |
186 | ret ? 0 : scat->length); | |
187 | return ret; | |
188 | } | |
189 | ||
190 | static int rds_page_remainder_cpu_notify(struct notifier_block *self, | |
191 | unsigned long action, void *hcpu) | |
192 | { | |
193 | struct rds_page_remainder *rem; | |
194 | long cpu = (long)hcpu; | |
195 | ||
196 | rem = &per_cpu(rds_page_remainders, cpu); | |
197 | ||
198 | rdsdebug("cpu %ld action 0x%lx\n", cpu, action); | |
199 | ||
200 | switch (action) { | |
201 | case CPU_DEAD: | |
202 | if (rem->r_page) | |
203 | __free_page(rem->r_page); | |
204 | rem->r_page = NULL; | |
205 | break; | |
206 | } | |
207 | ||
208 | return 0; | |
209 | } | |
210 | ||
211 | static struct notifier_block rds_page_remainder_nb = { | |
212 | .notifier_call = rds_page_remainder_cpu_notify, | |
213 | }; | |
214 | ||
215 | void rds_page_exit(void) | |
216 | { | |
217 | int i; | |
218 | ||
219 | for_each_possible_cpu(i) | |
220 | rds_page_remainder_cpu_notify(&rds_page_remainder_nb, | |
221 | (unsigned long)CPU_DEAD, | |
222 | (void *)(long)i); | |
223 | } |