]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * File...........: linux/include/asm-s390x/idals.h | |
3 | * Author(s)......: Holger Smolinski <[email protected]> | |
4 | * Martin Schwidefsky <[email protected]> | |
5 | * Bugreports.to..: <[email protected]> | |
6 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000a | |
7 | ||
8 | * History of changes | |
9 | * 07/24/00 new file | |
10 | * 05/04/02 code restructuring. | |
11 | */ | |
12 | ||
13 | #ifndef _S390_IDALS_H | |
14 | #define _S390_IDALS_H | |
15 | ||
1da177e4 LT |
16 | #include <linux/errno.h> |
17 | #include <linux/err.h> | |
18 | #include <linux/types.h> | |
19 | #include <linux/slab.h> | |
20 | #include <asm/cio.h> | |
21 | #include <asm/uaccess.h> | |
22 | ||
23 | #ifdef __s390x__ | |
24 | #define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ | |
25 | #else | |
26 | #define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */ | |
27 | #endif | |
28 | #define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG) | |
29 | ||
30 | /* | |
31 | * Test if an address/length pair needs an idal list. | |
32 | */ | |
33 | static inline int | |
34 | idal_is_needed(void *vaddr, unsigned int length) | |
35 | { | |
36 | #ifdef __s390x__ | |
37 | return ((__pa(vaddr) + length - 1) >> 31) != 0; | |
38 | #else | |
39 | return 0; | |
40 | #endif | |
41 | } | |
42 | ||
43 | ||
44 | /* | |
45 | * Return the number of idal words needed for an address/length pair. | |
46 | */ | |
47 | static inline unsigned int | |
48 | idal_nr_words(void *vaddr, unsigned int length) | |
49 | { | |
50 | #ifdef __s390x__ | |
51 | if (idal_is_needed(vaddr, length)) | |
52 | return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length + | |
53 | (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; | |
54 | #endif | |
55 | return 0; | |
56 | } | |
57 | ||
58 | /* | |
59 | * Create the list of idal words for an address/length pair. | |
60 | */ | |
61 | static inline unsigned long * | |
62 | idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length) | |
63 | { | |
64 | #ifdef __s390x__ | |
65 | unsigned long paddr; | |
66 | unsigned int cidaw; | |
67 | ||
68 | paddr = __pa(vaddr); | |
69 | cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length + | |
70 | (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG; | |
71 | *idaws++ = paddr; | |
72 | paddr &= -IDA_BLOCK_SIZE; | |
73 | while (--cidaw > 0) { | |
74 | paddr += IDA_BLOCK_SIZE; | |
75 | *idaws++ = paddr; | |
76 | } | |
77 | #endif | |
78 | return idaws; | |
79 | } | |
80 | ||
81 | /* | |
82 | * Sets the address of the data in CCW. | |
83 | * If necessary it allocates an IDAL and sets the appropriate flags. | |
84 | */ | |
85 | static inline int | |
86 | set_normalized_cda(struct ccw1 * ccw, void *vaddr) | |
87 | { | |
88 | #ifdef __s390x__ | |
89 | unsigned int nridaws; | |
90 | unsigned long *idal; | |
91 | ||
92 | if (ccw->flags & CCW_FLAG_IDA) | |
93 | return -EINVAL; | |
94 | nridaws = idal_nr_words(vaddr, ccw->count); | |
95 | if (nridaws > 0) { | |
96 | idal = kmalloc(nridaws * sizeof(unsigned long), | |
97 | GFP_ATOMIC | GFP_DMA ); | |
98 | if (idal == NULL) | |
99 | return -ENOMEM; | |
100 | idal_create_words(idal, vaddr, ccw->count); | |
101 | ccw->flags |= CCW_FLAG_IDA; | |
102 | vaddr = idal; | |
103 | } | |
104 | #endif | |
105 | ccw->cda = (__u32)(unsigned long) vaddr; | |
106 | return 0; | |
107 | } | |
108 | ||
109 | /* | |
110 | * Releases any allocated IDAL related to the CCW. | |
111 | */ | |
112 | static inline void | |
113 | clear_normalized_cda(struct ccw1 * ccw) | |
114 | { | |
115 | #ifdef __s390x__ | |
116 | if (ccw->flags & CCW_FLAG_IDA) { | |
117 | kfree((void *)(unsigned long) ccw->cda); | |
118 | ccw->flags &= ~CCW_FLAG_IDA; | |
119 | } | |
120 | #endif | |
121 | ccw->cda = 0; | |
122 | } | |
123 | ||
124 | /* | |
125 | * Idal buffer extension | |
126 | */ | |
127 | struct idal_buffer { | |
128 | size_t size; | |
129 | size_t page_order; | |
130 | void *data[0]; | |
131 | }; | |
132 | ||
133 | /* | |
134 | * Allocate an idal buffer | |
135 | */ | |
136 | static inline struct idal_buffer * | |
137 | idal_buffer_alloc(size_t size, int page_order) | |
138 | { | |
139 | struct idal_buffer *ib; | |
140 | int nr_chunks, nr_ptrs, i; | |
141 | ||
142 | nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; | |
143 | nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG; | |
144 | ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *), | |
145 | GFP_DMA | GFP_KERNEL); | |
146 | if (ib == NULL) | |
147 | return ERR_PTR(-ENOMEM); | |
148 | ib->size = size; | |
149 | ib->page_order = page_order; | |
150 | for (i = 0; i < nr_ptrs; i++) { | |
151 | if ((i & (nr_chunks - 1)) != 0) { | |
152 | ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE; | |
153 | continue; | |
154 | } | |
155 | ib->data[i] = (void *) | |
156 | __get_free_pages(GFP_KERNEL, page_order); | |
157 | if (ib->data[i] != NULL) | |
158 | continue; | |
159 | // Not enough memory | |
160 | while (i >= nr_chunks) { | |
161 | i -= nr_chunks; | |
162 | free_pages((unsigned long) ib->data[i], | |
163 | ib->page_order); | |
164 | } | |
165 | kfree(ib); | |
166 | return ERR_PTR(-ENOMEM); | |
167 | } | |
168 | return ib; | |
169 | } | |
170 | ||
171 | /* | |
172 | * Free an idal buffer. | |
173 | */ | |
174 | static inline void | |
175 | idal_buffer_free(struct idal_buffer *ib) | |
176 | { | |
177 | int nr_chunks, nr_ptrs, i; | |
178 | ||
179 | nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG; | |
180 | nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG; | |
181 | for (i = 0; i < nr_ptrs; i += nr_chunks) | |
182 | free_pages((unsigned long) ib->data[i], ib->page_order); | |
183 | kfree(ib); | |
184 | } | |
185 | ||
186 | /* | |
187 | * Test if a idal list is really needed. | |
188 | */ | |
189 | static inline int | |
190 | __idal_buffer_is_needed(struct idal_buffer *ib) | |
191 | { | |
192 | #ifdef __s390x__ | |
193 | return ib->size > (4096ul << ib->page_order) || | |
194 | idal_is_needed(ib->data[0], ib->size); | |
195 | #else | |
196 | return ib->size > (4096ul << ib->page_order); | |
197 | #endif | |
198 | } | |
199 | ||
200 | /* | |
201 | * Set channel data address to idal buffer. | |
202 | */ | |
203 | static inline void | |
204 | idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) | |
205 | { | |
206 | if (__idal_buffer_is_needed(ib)) { | |
207 | // setup idals; | |
208 | ccw->cda = (u32)(addr_t) ib->data; | |
209 | ccw->flags |= CCW_FLAG_IDA; | |
210 | } else | |
211 | // we do not need idals - use direct addressing | |
212 | ccw->cda = (u32)(addr_t) ib->data[0]; | |
213 | ccw->count = ib->size; | |
214 | } | |
215 | ||
216 | /* | |
217 | * Copy count bytes from an idal buffer to user memory | |
218 | */ | |
219 | static inline size_t | |
220 | idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) | |
221 | { | |
222 | size_t left; | |
223 | int i; | |
224 | ||
225 | BUG_ON(count > ib->size); | |
226 | for (i = 0; count > IDA_BLOCK_SIZE; i++) { | |
227 | left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE); | |
228 | if (left) | |
229 | return left + count - IDA_BLOCK_SIZE; | |
230 | to = (void __user *) to + IDA_BLOCK_SIZE; | |
231 | count -= IDA_BLOCK_SIZE; | |
232 | } | |
233 | return copy_to_user(to, ib->data[i], count); | |
234 | } | |
235 | ||
236 | /* | |
237 | * Copy count bytes from user memory to an idal buffer | |
238 | */ | |
239 | static inline size_t | |
240 | idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) | |
241 | { | |
242 | size_t left; | |
243 | int i; | |
244 | ||
245 | BUG_ON(count > ib->size); | |
246 | for (i = 0; count > IDA_BLOCK_SIZE; i++) { | |
247 | left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE); | |
248 | if (left) | |
249 | return left + count - IDA_BLOCK_SIZE; | |
250 | from = (void __user *) from + IDA_BLOCK_SIZE; | |
251 | count -= IDA_BLOCK_SIZE; | |
252 | } | |
253 | return copy_from_user(ib->data[i], from, count); | |
254 | } | |
255 | ||
256 | #endif |