]>
Commit | Line | Data |
---|---|---|
0a82a623 DW |
1 | /* |
2 | * Asynchronous RAID-6 recovery calculations ASYNC_TX API. | |
3 | * Copyright(c) 2009 Intel Corporation | |
4 | * | |
5 | * based on raid6recov.c: | |
6 | * Copyright 2002 H. Peter Anvin | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along with | |
19 | * this program; if not, write to the Free Software Foundation, Inc., 51 | |
20 | * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
21 | * | |
22 | */ | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/interrupt.h> | |
4bb33cc8 | 25 | #include <linux/module.h> |
0a82a623 DW |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/raid/pq.h> | |
28 | #include <linux/async_tx.h> | |
3bbdd498 | 29 | #include <linux/dmaengine.h> |
0a82a623 DW |
30 | |
31 | static struct dma_async_tx_descriptor * | |
32 | async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, | |
33 | size_t len, struct async_submit_ctl *submit) | |
34 | { | |
35 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | |
36 | &dest, 1, srcs, 2, len); | |
37 | struct dma_device *dma = chan ? chan->device : NULL; | |
3bbdd498 | 38 | struct dmaengine_unmap_data *unmap = NULL; |
0a82a623 DW |
39 | const u8 *amul, *bmul; |
40 | u8 ax, bx; | |
41 | u8 *a, *b, *c; | |
42 | ||
3bbdd498 | 43 | if (dma) |
b02bab6b | 44 | unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); |
3bbdd498 DW |
45 | |
46 | if (unmap) { | |
0a82a623 | 47 | struct device *dev = dma->dev; |
3bbdd498 | 48 | dma_addr_t pq[2]; |
0a82a623 | 49 | struct dma_async_tx_descriptor *tx; |
0776ae7b | 50 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; |
0a82a623 | 51 | |
0403e382 DW |
52 | if (submit->flags & ASYNC_TX_FENCE) |
53 | dma_flags |= DMA_PREP_FENCE; | |
3bbdd498 DW |
54 | unmap->addr[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); |
55 | unmap->addr[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); | |
56 | unmap->to_cnt = 2; | |
57 | ||
58 | unmap->addr[2] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | |
59 | unmap->bidi_cnt = 1; | |
60 | /* engine only looks at Q, but expects it to follow P */ | |
61 | pq[1] = unmap->addr[2]; | |
62 | ||
63 | unmap->len = len; | |
64 | tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, | |
0a82a623 DW |
65 | len, dma_flags); |
66 | if (tx) { | |
3bbdd498 | 67 | dma_set_unmap(tx, unmap); |
0a82a623 | 68 | async_tx_submit(chan, tx, submit); |
3bbdd498 | 69 | dmaengine_unmap_put(unmap); |
0a82a623 DW |
70 | return tx; |
71 | } | |
1f6672d4 DW |
72 | |
73 | /* could not get a descriptor, unmap and fall through to | |
74 | * the synchronous path | |
75 | */ | |
3bbdd498 | 76 | dmaengine_unmap_put(unmap); |
0a82a623 DW |
77 | } |
78 | ||
79 | /* run the operation synchronously */ | |
80 | async_tx_quiesce(&submit->depend_tx); | |
81 | amul = raid6_gfmul[coef[0]]; | |
82 | bmul = raid6_gfmul[coef[1]]; | |
83 | a = page_address(srcs[0]); | |
84 | b = page_address(srcs[1]); | |
85 | c = page_address(dest); | |
86 | ||
87 | while (len--) { | |
88 | ax = amul[*a++]; | |
89 | bx = bmul[*b++]; | |
90 | *c++ = ax ^ bx; | |
91 | } | |
92 | ||
93 | return NULL; | |
94 | } | |
95 | ||
96 | static struct dma_async_tx_descriptor * | |
97 | async_mult(struct page *dest, struct page *src, u8 coef, size_t len, | |
98 | struct async_submit_ctl *submit) | |
99 | { | |
100 | struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, | |
101 | &dest, 1, &src, 1, len); | |
102 | struct dma_device *dma = chan ? chan->device : NULL; | |
3bbdd498 | 103 | struct dmaengine_unmap_data *unmap = NULL; |
0a82a623 DW |
104 | const u8 *qmul; /* Q multiplier table */ |
105 | u8 *d, *s; | |
106 | ||
3bbdd498 | 107 | if (dma) |
b02bab6b | 108 | unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); |
3bbdd498 DW |
109 | |
110 | if (unmap) { | |
0a82a623 | 111 | dma_addr_t dma_dest[2]; |
0a82a623 DW |
112 | struct device *dev = dma->dev; |
113 | struct dma_async_tx_descriptor *tx; | |
0776ae7b | 114 | enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; |
0a82a623 | 115 | |
0403e382 DW |
116 | if (submit->flags & ASYNC_TX_FENCE) |
117 | dma_flags |= DMA_PREP_FENCE; | |
3bbdd498 DW |
118 | unmap->addr[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); |
119 | unmap->to_cnt++; | |
120 | unmap->addr[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); | |
121 | dma_dest[1] = unmap->addr[1]; | |
122 | unmap->bidi_cnt++; | |
123 | unmap->len = len; | |
124 | ||
125 | /* this looks funny, but the engine looks for Q at | |
126 | * dma_dest[1] and ignores dma_dest[0] as a dest | |
127 | * due to DMA_PREP_PQ_DISABLE_P | |
128 | */ | |
129 | tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, | |
130 | 1, &coef, len, dma_flags); | |
131 | ||
0a82a623 | 132 | if (tx) { |
3bbdd498 DW |
133 | dma_set_unmap(tx, unmap); |
134 | dmaengine_unmap_put(unmap); | |
0a82a623 DW |
135 | async_tx_submit(chan, tx, submit); |
136 | return tx; | |
137 | } | |
1f6672d4 DW |
138 | |
139 | /* could not get a descriptor, unmap and fall through to | |
140 | * the synchronous path | |
141 | */ | |
3bbdd498 | 142 | dmaengine_unmap_put(unmap); |
0a82a623 DW |
143 | } |
144 | ||
145 | /* no channel available, or failed to allocate a descriptor, so | |
146 | * perform the operation synchronously | |
147 | */ | |
148 | async_tx_quiesce(&submit->depend_tx); | |
149 | qmul = raid6_gfmul[coef]; | |
150 | d = page_address(dest); | |
151 | s = page_address(src); | |
152 | ||
153 | while (len--) | |
154 | *d++ = qmul[*s++]; | |
155 | ||
156 | return NULL; | |
157 | } | |
158 | ||
159 | static struct dma_async_tx_descriptor * | |
da17bf43 DW |
160 | __2data_recov_4(int disks, size_t bytes, int faila, int failb, |
161 | struct page **blocks, struct async_submit_ctl *submit) | |
0a82a623 DW |
162 | { |
163 | struct dma_async_tx_descriptor *tx = NULL; | |
164 | struct page *p, *q, *a, *b; | |
165 | struct page *srcs[2]; | |
166 | unsigned char coef[2]; | |
167 | enum async_tx_flags flags = submit->flags; | |
168 | dma_async_tx_callback cb_fn = submit->cb_fn; | |
169 | void *cb_param = submit->cb_param; | |
170 | void *scribble = submit->scribble; | |
171 | ||
da17bf43 DW |
172 | p = blocks[disks-2]; |
173 | q = blocks[disks-1]; | |
0a82a623 DW |
174 | |
175 | a = blocks[faila]; | |
176 | b = blocks[failb]; | |
177 | ||
178 | /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ | |
179 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | |
180 | srcs[0] = p; | |
181 | srcs[1] = q; | |
182 | coef[0] = raid6_gfexi[failb-faila]; | |
183 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | |
0403e382 | 184 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
0a82a623 DW |
185 | tx = async_sum_product(b, srcs, coef, bytes, submit); |
186 | ||
187 | /* Dy = P+Pxy+Dx */ | |
188 | srcs[0] = p; | |
189 | srcs[1] = b; | |
190 | init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, | |
191 | cb_param, scribble); | |
192 | tx = async_xor(a, srcs, 0, 2, bytes, submit); | |
193 | ||
194 | return tx; | |
195 | ||
196 | } | |
197 | ||
198 | static struct dma_async_tx_descriptor * | |
da17bf43 DW |
199 | __2data_recov_5(int disks, size_t bytes, int faila, int failb, |
200 | struct page **blocks, struct async_submit_ctl *submit) | |
0a82a623 DW |
201 | { |
202 | struct dma_async_tx_descriptor *tx = NULL; | |
203 | struct page *p, *q, *g, *dp, *dq; | |
204 | struct page *srcs[2]; | |
205 | unsigned char coef[2]; | |
206 | enum async_tx_flags flags = submit->flags; | |
207 | dma_async_tx_callback cb_fn = submit->cb_fn; | |
208 | void *cb_param = submit->cb_param; | |
209 | void *scribble = submit->scribble; | |
da17bf43 | 210 | int good_srcs, good, i; |
0a82a623 | 211 | |
da17bf43 DW |
212 | good_srcs = 0; |
213 | good = -1; | |
214 | for (i = 0; i < disks-2; i++) { | |
215 | if (blocks[i] == NULL) | |
216 | continue; | |
0a82a623 DW |
217 | if (i == faila || i == failb) |
218 | continue; | |
da17bf43 DW |
219 | good = i; |
220 | good_srcs++; | |
0a82a623 | 221 | } |
da17bf43 | 222 | BUG_ON(good_srcs > 1); |
0a82a623 | 223 | |
da17bf43 DW |
224 | p = blocks[disks-2]; |
225 | q = blocks[disks-1]; | |
0a82a623 DW |
226 | g = blocks[good]; |
227 | ||
228 | /* Compute syndrome with zero for the missing data pages | |
229 | * Use the dead data pages as temporary storage for delta p and | |
230 | * delta q | |
231 | */ | |
232 | dp = blocks[faila]; | |
233 | dq = blocks[failb]; | |
234 | ||
0403e382 | 235 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
0a82a623 | 236 | tx = async_memcpy(dp, g, 0, 0, bytes, submit); |
0403e382 | 237 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
0a82a623 DW |
238 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); |
239 | ||
240 | /* compute P + Pxy */ | |
241 | srcs[0] = dp; | |
242 | srcs[1] = p; | |
0403e382 DW |
243 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
244 | NULL, NULL, scribble); | |
0a82a623 DW |
245 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); |
246 | ||
247 | /* compute Q + Qxy */ | |
248 | srcs[0] = dq; | |
249 | srcs[1] = q; | |
0403e382 DW |
250 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
251 | NULL, NULL, scribble); | |
0a82a623 DW |
252 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); |
253 | ||
254 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | |
255 | srcs[0] = dp; | |
256 | srcs[1] = dq; | |
257 | coef[0] = raid6_gfexi[failb-faila]; | |
258 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | |
0403e382 | 259 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
0a82a623 DW |
260 | tx = async_sum_product(dq, srcs, coef, bytes, submit); |
261 | ||
262 | /* Dy = P+Pxy+Dx */ | |
263 | srcs[0] = dp; | |
264 | srcs[1] = dq; | |
265 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | |
266 | cb_param, scribble); | |
267 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | |
268 | ||
269 | return tx; | |
270 | } | |
271 | ||
272 | static struct dma_async_tx_descriptor * | |
273 | __2data_recov_n(int disks, size_t bytes, int faila, int failb, | |
274 | struct page **blocks, struct async_submit_ctl *submit) | |
275 | { | |
276 | struct dma_async_tx_descriptor *tx = NULL; | |
277 | struct page *p, *q, *dp, *dq; | |
278 | struct page *srcs[2]; | |
279 | unsigned char coef[2]; | |
280 | enum async_tx_flags flags = submit->flags; | |
281 | dma_async_tx_callback cb_fn = submit->cb_fn; | |
282 | void *cb_param = submit->cb_param; | |
283 | void *scribble = submit->scribble; | |
284 | ||
285 | p = blocks[disks-2]; | |
286 | q = blocks[disks-1]; | |
287 | ||
288 | /* Compute syndrome with zero for the missing data pages | |
289 | * Use the dead data pages as temporary storage for | |
290 | * delta p and delta q | |
291 | */ | |
292 | dp = blocks[faila]; | |
5dd33c9a | 293 | blocks[faila] = NULL; |
0a82a623 DW |
294 | blocks[disks-2] = dp; |
295 | dq = blocks[failb]; | |
5dd33c9a | 296 | blocks[failb] = NULL; |
0a82a623 DW |
297 | blocks[disks-1] = dq; |
298 | ||
0403e382 | 299 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
0a82a623 DW |
300 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); |
301 | ||
302 | /* Restore pointer table */ | |
303 | blocks[faila] = dp; | |
304 | blocks[failb] = dq; | |
305 | blocks[disks-2] = p; | |
306 | blocks[disks-1] = q; | |
307 | ||
308 | /* compute P + Pxy */ | |
309 | srcs[0] = dp; | |
310 | srcs[1] = p; | |
0403e382 DW |
311 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
312 | NULL, NULL, scribble); | |
0a82a623 DW |
313 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); |
314 | ||
315 | /* compute Q + Qxy */ | |
316 | srcs[0] = dq; | |
317 | srcs[1] = q; | |
0403e382 DW |
318 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
319 | NULL, NULL, scribble); | |
0a82a623 DW |
320 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); |
321 | ||
322 | /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ | |
323 | srcs[0] = dp; | |
324 | srcs[1] = dq; | |
325 | coef[0] = raid6_gfexi[failb-faila]; | |
326 | coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; | |
0403e382 | 327 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
0a82a623 DW |
328 | tx = async_sum_product(dq, srcs, coef, bytes, submit); |
329 | ||
330 | /* Dy = P+Pxy+Dx */ | |
331 | srcs[0] = dp; | |
332 | srcs[1] = dq; | |
333 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | |
334 | cb_param, scribble); | |
335 | tx = async_xor(dp, srcs, 0, 2, bytes, submit); | |
336 | ||
337 | return tx; | |
338 | } | |
339 | ||
340 | /** | |
341 | * async_raid6_2data_recov - asynchronously calculate two missing data blocks | |
342 | * @disks: number of disks in the RAID-6 array | |
343 | * @bytes: block size | |
344 | * @faila: first failed drive index | |
345 | * @failb: second failed drive index | |
346 | * @blocks: array of source pointers where the last two entries are p and q | |
347 | * @submit: submission/completion modifiers | |
348 | */ | |
349 | struct dma_async_tx_descriptor * | |
350 | async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | |
351 | struct page **blocks, struct async_submit_ctl *submit) | |
352 | { | |
5157b4aa | 353 | void *scribble = submit->scribble; |
da17bf43 DW |
354 | int non_zero_srcs, i; |
355 | ||
0a82a623 DW |
356 | BUG_ON(faila == failb); |
357 | if (failb < faila) | |
358 | swap(faila, failb); | |
359 | ||
360 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | |
361 | ||
5157b4aa DW |
362 | /* if a dma resource is not available or a scribble buffer is not |
363 | * available punt to the synchronous path. In the 'dma not | |
364 | * available' case be sure to use the scribble buffer to | |
365 | * preserve the content of 'blocks' as the caller intended. | |
0a82a623 | 366 | */ |
5157b4aa DW |
367 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { |
368 | void **ptrs = scribble ? scribble : (void **) blocks; | |
0a82a623 DW |
369 | |
370 | async_tx_quiesce(&submit->depend_tx); | |
371 | for (i = 0; i < disks; i++) | |
5dd33c9a | 372 | if (blocks[i] == NULL) |
da17bf43 | 373 | ptrs[i] = (void *) raid6_empty_zero_page; |
5dd33c9a N |
374 | else |
375 | ptrs[i] = page_address(blocks[i]); | |
0a82a623 DW |
376 | |
377 | raid6_2data_recov(disks, bytes, faila, failb, ptrs); | |
378 | ||
379 | async_tx_sync_epilog(submit); | |
380 | ||
381 | return NULL; | |
382 | } | |
383 | ||
da17bf43 DW |
384 | non_zero_srcs = 0; |
385 | for (i = 0; i < disks-2 && non_zero_srcs < 4; i++) | |
386 | if (blocks[i]) | |
387 | non_zero_srcs++; | |
388 | switch (non_zero_srcs) { | |
389 | case 0: | |
390 | case 1: | |
391 | /* There must be at least 2 sources - the failed devices. */ | |
392 | BUG(); | |
393 | ||
394 | case 2: | |
0a82a623 DW |
395 | /* dma devices do not uniformly understand a zero source pq |
396 | * operation (in contrast to the synchronous case), so | |
da17bf43 DW |
397 | * explicitly handle the special case of a 4 disk array with |
398 | * both data disks missing. | |
0a82a623 | 399 | */ |
da17bf43 DW |
400 | return __2data_recov_4(disks, bytes, faila, failb, blocks, submit); |
401 | case 3: | |
0a82a623 DW |
402 | /* dma devices do not uniformly understand a single |
403 | * source pq operation (in contrast to the synchronous | |
da17bf43 DW |
404 | * case), so explicitly handle the special case of a 5 disk |
405 | * array with 2 of 3 data disks missing. | |
0a82a623 | 406 | */ |
da17bf43 | 407 | return __2data_recov_5(disks, bytes, faila, failb, blocks, submit); |
0a82a623 DW |
408 | default: |
409 | return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); | |
410 | } | |
411 | } | |
412 | EXPORT_SYMBOL_GPL(async_raid6_2data_recov); | |
413 | ||
414 | /** | |
415 | * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block | |
416 | * @disks: number of disks in the RAID-6 array | |
417 | * @bytes: block size | |
418 | * @faila: failed drive index | |
419 | * @blocks: array of source pointers where the last two entries are p and q | |
420 | * @submit: submission/completion modifiers | |
421 | */ | |
422 | struct dma_async_tx_descriptor * | |
423 | async_raid6_datap_recov(int disks, size_t bytes, int faila, | |
424 | struct page **blocks, struct async_submit_ctl *submit) | |
425 | { | |
426 | struct dma_async_tx_descriptor *tx = NULL; | |
427 | struct page *p, *q, *dq; | |
428 | u8 coef; | |
429 | enum async_tx_flags flags = submit->flags; | |
430 | dma_async_tx_callback cb_fn = submit->cb_fn; | |
431 | void *cb_param = submit->cb_param; | |
432 | void *scribble = submit->scribble; | |
da17bf43 | 433 | int good_srcs, good, i; |
0a82a623 DW |
434 | struct page *srcs[2]; |
435 | ||
436 | pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); | |
437 | ||
5157b4aa DW |
438 | /* if a dma resource is not available or a scribble buffer is not |
439 | * available punt to the synchronous path. In the 'dma not | |
440 | * available' case be sure to use the scribble buffer to | |
441 | * preserve the content of 'blocks' as the caller intended. | |
0a82a623 | 442 | */ |
5157b4aa DW |
443 | if (!async_dma_find_channel(DMA_PQ) || !scribble) { |
444 | void **ptrs = scribble ? scribble : (void **) blocks; | |
0a82a623 DW |
445 | |
446 | async_tx_quiesce(&submit->depend_tx); | |
447 | for (i = 0; i < disks; i++) | |
5dd33c9a N |
448 | if (blocks[i] == NULL) |
449 | ptrs[i] = (void*)raid6_empty_zero_page; | |
450 | else | |
451 | ptrs[i] = page_address(blocks[i]); | |
0a82a623 DW |
452 | |
453 | raid6_datap_recov(disks, bytes, faila, ptrs); | |
454 | ||
455 | async_tx_sync_epilog(submit); | |
456 | ||
457 | return NULL; | |
458 | } | |
459 | ||
da17bf43 DW |
460 | good_srcs = 0; |
461 | good = -1; | |
462 | for (i = 0; i < disks-2; i++) { | |
463 | if (i == faila) | |
464 | continue; | |
465 | if (blocks[i]) { | |
466 | good = i; | |
467 | good_srcs++; | |
468 | if (good_srcs > 1) | |
469 | break; | |
470 | } | |
471 | } | |
472 | BUG_ON(good_srcs == 0); | |
473 | ||
0a82a623 DW |
474 | p = blocks[disks-2]; |
475 | q = blocks[disks-1]; | |
476 | ||
477 | /* Compute syndrome with zero for the missing data page | |
478 | * Use the dead data page as temporary storage for delta q | |
479 | */ | |
480 | dq = blocks[faila]; | |
5dd33c9a | 481 | blocks[faila] = NULL; |
0a82a623 DW |
482 | blocks[disks-1] = dq; |
483 | ||
da17bf43 DW |
484 | /* in the 4-disk case we only need to perform a single source |
485 | * multiplication with the one good data block. | |
0a82a623 | 486 | */ |
da17bf43 | 487 | if (good_srcs == 1) { |
0a82a623 DW |
488 | struct page *g = blocks[good]; |
489 | ||
0403e382 DW |
490 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, |
491 | scribble); | |
0a82a623 DW |
492 | tx = async_memcpy(p, g, 0, 0, bytes, submit); |
493 | ||
0403e382 DW |
494 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, |
495 | scribble); | |
0a82a623 DW |
496 | tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); |
497 | } else { | |
0403e382 DW |
498 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, |
499 | scribble); | |
0a82a623 DW |
500 | tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); |
501 | } | |
502 | ||
503 | /* Restore pointer table */ | |
504 | blocks[faila] = dq; | |
505 | blocks[disks-1] = q; | |
506 | ||
507 | /* calculate g^{-faila} */ | |
508 | coef = raid6_gfinv[raid6_gfexp[faila]]; | |
509 | ||
510 | srcs[0] = dq; | |
511 | srcs[1] = q; | |
0403e382 DW |
512 | init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
513 | NULL, NULL, scribble); | |
0a82a623 DW |
514 | tx = async_xor(dq, srcs, 0, 2, bytes, submit); |
515 | ||
0403e382 | 516 | init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); |
0a82a623 DW |
517 | tx = async_mult(dq, dq, coef, bytes, submit); |
518 | ||
519 | srcs[0] = p; | |
520 | srcs[1] = dq; | |
521 | init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, | |
522 | cb_param, scribble); | |
523 | tx = async_xor(p, srcs, 0, 2, bytes, submit); | |
524 | ||
525 | return tx; | |
526 | } | |
527 | EXPORT_SYMBOL_GPL(async_raid6_datap_recov); | |
528 | ||
529 | MODULE_AUTHOR("Dan Williams <[email protected]>"); | |
530 | MODULE_DESCRIPTION("asynchronous RAID-6 recovery api"); | |
531 | MODULE_LICENSE("GPL"); |