]>
Commit | Line | Data |
---|---|---|
0bbd5f4e | 1 | /* |
211a22ce | 2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. |
0bbd5f4e CL |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
0bbd5f4e CL |
14 | * The full GNU General Public License is included in this distribution in the |
15 | * file called COPYING. | |
16 | */ | |
17 | #ifndef IOATDMA_H | |
18 | #define IOATDMA_H | |
19 | ||
20 | #include <linux/dmaengine.h> | |
0bbd5f4e CL |
21 | #include <linux/init.h> |
22 | #include <linux/dmapool.h> | |
23 | #include <linux/cache.h> | |
57c651f7 | 24 | #include <linux/pci_ids.h> |
885b2010 DJ |
25 | #include <linux/circ_buf.h> |
26 | #include <linux/interrupt.h> | |
27 | #include "registers.h" | |
28 | #include "hw.h" | |
0bbd5f4e | 29 | |
3208ca52 | 30 | #define IOAT_DMA_VERSION "4.00" |
5149fd01 | 31 | |
7bb67c14 SN |
32 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
33 | ||
55f878ec DJ |
34 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) |
35 | #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) | |
36 | #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) | |
1f27adc2 | 37 | |
55f878ec | 38 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) |
1f27adc2 | 39 | |
599d49de DJ |
40 | /* ioat hardware assumes at least two sources for raid operations */ |
41 | #define src_cnt_to_sw(x) ((x) + 2) | |
42 | #define src_cnt_to_hw(x) ((x) - 2) | |
43 | #define ndest_to_sw(x) ((x) + 1) | |
44 | #define ndest_to_hw(x) ((x) - 1) | |
45 | #define src16_cnt_to_sw(x) ((x) + 9) | |
46 | #define src16_cnt_to_hw(x) ((x) - 9) | |
47 | ||
1f27adc2 DW |
48 | /* |
49 | * workaround for IOAT ver.3.0 null descriptor issue | |
50 | * (channel returns error when size is 0) | |
51 | */ | |
52 | #define NULL_DESC_BUFFER_SIZE 1 | |
53 | ||
8a52b9ff DJ |
54 | enum ioat_irq_mode { |
55 | IOAT_NOIRQ = 0, | |
56 | IOAT_MSIX, | |
8a52b9ff DJ |
57 | IOAT_MSI, |
58 | IOAT_INTX | |
59 | }; | |
60 | ||
0bbd5f4e | 61 | /** |
8ab89567 | 62 | * struct ioatdma_device - internal representation of a IOAT device |
0bbd5f4e CL |
63 | * @pdev: PCI-Express device |
64 | * @reg_base: MMIO register space base address | |
65 | * @dma_pool: for allocating DMA descriptors | |
c7b0e8d7 DJ |
66 | * @completion_pool: DMA buffers for completion ops |
67 | * @sed_hw_pool: DMA super descriptor pools | |
55f878ec | 68 | * @dma_dev: embedded struct dma_device |
8ab89567 | 69 | * @version: version of ioatdma device |
7bb67c14 SN |
70 | * @msix_entries: irq handlers |
71 | * @idx: per channel data | |
f2427e27 | 72 | * @dca: direct cache access context |
c7b0e8d7 DJ |
73 | * @irq_mode: interrupt mode (INTX, MSI, MSIX) |
74 | * @cap: read DMA capabilities register | |
0bbd5f4e | 75 | */ |
8ab89567 | 76 | struct ioatdma_device { |
0bbd5f4e | 77 | struct pci_dev *pdev; |
47b16539 | 78 | void __iomem *reg_base; |
0bbd5f4e CL |
79 | struct pci_pool *dma_pool; |
80 | struct pci_pool *completion_pool; | |
7727eaa4 DJ |
81 | #define MAX_SED_POOLS 5 |
82 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | |
55f878ec | 83 | struct dma_device dma_dev; |
8ab89567 | 84 | u8 version; |
ad4a7b50 DJ |
85 | #define IOAT_MAX_CHANS 4 |
86 | struct msix_entry msix_entries[IOAT_MAX_CHANS]; | |
87 | struct ioatdma_chan *idx[IOAT_MAX_CHANS]; | |
f2427e27 | 88 | struct dca_provider *dca; |
8a52b9ff | 89 | enum ioat_irq_mode irq_mode; |
75c6f0ab | 90 | u32 cap; |
0bbd5f4e CL |
91 | }; |
92 | ||
5a976888 DJ |
93 | struct ioatdma_chan { |
94 | struct dma_chan dma_chan; | |
47b16539 | 95 | void __iomem *reg_base; |
27502935 | 96 | dma_addr_t last_completion; |
0bbd5f4e | 97 | spinlock_t cleanup_lock; |
09c8a5b8 | 98 | unsigned long state; |
ad4a7b50 | 99 | #define IOAT_CHAN_DOWN 0 |
09c8a5b8 DW |
100 | #define IOAT_COMPLETION_ACK 1 |
101 | #define IOAT_RESET_PENDING 2 | |
5669e31c | 102 | #define IOAT_KOBJ_INIT_FAIL 3 |
074cc476 | 103 | #define IOAT_RESHAPE_PENDING 4 |
556ab45f | 104 | #define IOAT_RUN 5 |
4dec23d7 | 105 | #define IOAT_CHAN_ACTIVE 6 |
09c8a5b8 DW |
106 | struct timer_list timer; |
107 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | |
a309218a | 108 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) |
09c8a5b8 | 109 | #define RESET_DELAY msecs_to_jiffies(100) |
55f878ec | 110 | struct ioatdma_device *ioat_dma; |
4fb9b9e8 DW |
111 | dma_addr_t completion_dma; |
112 | u64 *completion; | |
3e037454 | 113 | struct tasklet_struct cleanup_task; |
5669e31c | 114 | struct kobject kobj; |
5a976888 DJ |
115 | |
116 | /* ioat v2 / v3 channel attributes | |
117 | * @xfercap_log; log2 of channel max transfer length (for fast division) | |
118 | * @head: allocated index | |
119 | * @issued: hardware notification point | |
120 | * @tail: cleanup index | |
121 | * @dmacount: identical to 'head' except for occasionally resetting to zero | |
122 | * @alloc_order: log2 of the number of allocated descriptors | |
123 | * @produce: number of descriptors to produce at submit time | |
124 | * @ring: software ring buffer implementation of hardware ring | |
125 | * @prep_lock: serializes descriptor preparation (producers) | |
126 | */ | |
127 | size_t xfercap_log; | |
128 | u16 head; | |
129 | u16 issued; | |
130 | u16 tail; | |
131 | u16 dmacount; | |
132 | u16 alloc_order; | |
133 | u16 produce; | |
134 | struct ioat_ring_ent **ring; | |
135 | spinlock_t prep_lock; | |
0bbd5f4e CL |
136 | }; |
137 | ||
5669e31c DW |
138 | struct ioat_sysfs_entry { |
139 | struct attribute attr; | |
140 | ssize_t (*show)(struct dma_chan *, char *); | |
141 | }; | |
5cbafa65 | 142 | |
7727eaa4 DJ |
143 | /** |
144 | * struct ioat_sed_ent - wrapper around super extended hardware descriptor | |
145 | * @hw: hardware SED | |
c7b0e8d7 | 146 | * @dma: dma address for the SED |
7727eaa4 | 147 | * @parent: point to the dma descriptor that's the parent |
c7b0e8d7 | 148 | * @hw_pool: descriptor pool index |
7727eaa4 DJ |
149 | */ |
150 | struct ioat_sed_ent { | |
151 | struct ioat_sed_raw_descriptor *hw; | |
152 | dma_addr_t dma; | |
153 | struct ioat_ring_ent *parent; | |
154 | unsigned int hw_pool; | |
155 | }; | |
156 | ||
885b2010 DJ |
157 | /** |
158 | * struct ioat_ring_ent - wrapper around hardware descriptor | |
159 | * @hw: hardware DMA descriptor (for memcpy) | |
885b2010 DJ |
160 | * @xor: hardware xor descriptor |
161 | * @xor_ex: hardware xor extension descriptor | |
162 | * @pq: hardware pq descriptor | |
163 | * @pq_ex: hardware pq extension descriptor | |
164 | * @pqu: hardware pq update descriptor | |
165 | * @raw: hardware raw (un-typed) descriptor | |
166 | * @txd: the generic software descriptor for all engines | |
167 | * @len: total transaction length for unmap | |
168 | * @result: asynchronous result of validate operations | |
169 | * @id: identifier for debug | |
c7b0e8d7 | 170 | * @sed: pointer to super extended descriptor sw desc |
885b2010 DJ |
171 | */ |
172 | ||
173 | struct ioat_ring_ent { | |
174 | union { | |
175 | struct ioat_dma_descriptor *hw; | |
176 | struct ioat_xor_descriptor *xor; | |
177 | struct ioat_xor_ext_descriptor *xor_ex; | |
178 | struct ioat_pq_descriptor *pq; | |
179 | struct ioat_pq_ext_descriptor *pq_ex; | |
180 | struct ioat_pq_update_descriptor *pqu; | |
181 | struct ioat_raw_descriptor *raw; | |
182 | }; | |
183 | size_t len; | |
184 | struct dma_async_tx_descriptor txd; | |
185 | enum sum_check_flags *result; | |
186 | #ifdef DEBUG | |
187 | int id; | |
188 | #endif | |
189 | struct ioat_sed_ent *sed; | |
190 | }; | |
191 | ||
599d49de DJ |
192 | extern const struct sysfs_ops ioat_sysfs_ops; |
193 | extern struct ioat_sysfs_entry ioat_version_attr; | |
194 | extern struct ioat_sysfs_entry ioat_cap_attr; | |
195 | extern int ioat_pending_level; | |
196 | extern int ioat_ring_alloc_order; | |
197 | extern struct kobj_type ioat_ktype; | |
198 | extern struct kmem_cache *ioat_cache; | |
199 | extern int ioat_ring_max_alloc_order; | |
200 | extern struct kmem_cache *ioat_sed_cache; | |
201 | ||
5a976888 | 202 | static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) |
dcbc853a | 203 | { |
5a976888 | 204 | return container_of(c, struct ioatdma_chan, dma_chan); |
dcbc853a DW |
205 | } |
206 | ||
0bbd5f4e | 207 | /* wrapper around hardware descriptor format + additional software fields */ |
6df9183a DW |
208 | #ifdef DEBUG |
209 | #define set_desc_id(desc, i) ((desc)->id = (i)) | |
210 | #define desc_id(desc) ((desc)->id) | |
211 | #else | |
212 | #define set_desc_id(desc, i) | |
213 | #define desc_id(desc) (0) | |
214 | #endif | |
215 | ||
216 | static inline void | |
5a976888 | 217 | __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, |
6df9183a DW |
218 | struct dma_async_tx_descriptor *tx, int id) |
219 | { | |
5a976888 | 220 | struct device *dev = to_dev(ioat_chan); |
6df9183a DW |
221 | |
222 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" | |
50f9f97e | 223 | " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, |
6df9183a DW |
224 | (unsigned long long) tx->phys, |
225 | (unsigned long long) hw->next, tx->cookie, tx->flags, | |
226 | hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); | |
227 | } | |
228 | ||
229 | #define dump_desc_dbg(c, d) \ | |
5a976888 | 230 | ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) |
6df9183a | 231 | |
5a976888 | 232 | static inline struct ioatdma_chan * |
55f878ec | 233 | ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) |
5cbafa65 | 234 | { |
55f878ec | 235 | return ioat_dma->idx[index]; |
5cbafa65 DW |
236 | } |
237 | ||
5a976888 | 238 | static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) |
d92a8d7c | 239 | { |
d3cd63f9 | 240 | return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); |
d92a8d7c DJ |
241 | } |
242 | ||
09c8a5b8 DW |
243 | static inline u64 ioat_chansts_to_addr(u64 status) |
244 | { | |
245 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
246 | } | |
247 | ||
5a976888 | 248 | static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) |
09c8a5b8 | 249 | { |
5a976888 | 250 | return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
09c8a5b8 DW |
251 | } |
252 | ||
5a976888 | 253 | static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) |
09c8a5b8 | 254 | { |
55f878ec | 255 | u8 ver = ioat_chan->ioat_dma->version; |
09c8a5b8 | 256 | |
5a976888 DJ |
257 | writeb(IOAT_CHANCMD_SUSPEND, |
258 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | |
09c8a5b8 DW |
259 | } |
260 | ||
5a976888 | 261 | static inline void ioat_reset(struct ioatdma_chan *ioat_chan) |
a6d52d70 | 262 | { |
55f878ec | 263 | u8 ver = ioat_chan->ioat_dma->version; |
a6d52d70 | 264 | |
5a976888 DJ |
265 | writeb(IOAT_CHANCMD_RESET, |
266 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | |
a6d52d70 DW |
267 | } |
268 | ||
5a976888 | 269 | static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) |
a6d52d70 | 270 | { |
55f878ec | 271 | u8 ver = ioat_chan->ioat_dma->version; |
a6d52d70 DW |
272 | u8 cmd; |
273 | ||
5a976888 | 274 | cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); |
a6d52d70 DW |
275 | return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; |
276 | } | |
277 | ||
09c8a5b8 DW |
278 | static inline bool is_ioat_active(unsigned long status) |
279 | { | |
280 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | |
281 | } | |
282 | ||
283 | static inline bool is_ioat_idle(unsigned long status) | |
284 | { | |
285 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); | |
286 | } | |
287 | ||
288 | static inline bool is_ioat_halted(unsigned long status) | |
289 | { | |
290 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); | |
291 | } | |
292 | ||
293 | static inline bool is_ioat_suspended(unsigned long status) | |
294 | { | |
295 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); | |
296 | } | |
297 | ||
298 | /* channel was fatally programmed */ | |
299 | static inline bool is_ioat_bug(unsigned long err) | |
300 | { | |
b57014de | 301 | return !!err; |
09c8a5b8 DW |
302 | } |
303 | ||
885b2010 DJ |
304 | #define IOAT_MAX_ORDER 16 |
305 | #define ioat_get_alloc_order() \ | |
306 | (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) | |
307 | #define ioat_get_max_alloc_order() \ | |
308 | (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) | |
309 | ||
310 | static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) | |
311 | { | |
312 | return 1 << ioat_chan->alloc_order; | |
313 | } | |
314 | ||
315 | /* count of descriptors in flight with the engine */ | |
316 | static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) | |
317 | { | |
318 | return CIRC_CNT(ioat_chan->head, ioat_chan->tail, | |
319 | ioat_ring_size(ioat_chan)); | |
320 | } | |
321 | ||
322 | /* count of descriptors pending submission to hardware */ | |
323 | static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) | |
324 | { | |
325 | return CIRC_CNT(ioat_chan->head, ioat_chan->issued, | |
326 | ioat_ring_size(ioat_chan)); | |
327 | } | |
328 | ||
329 | static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) | |
330 | { | |
331 | return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); | |
332 | } | |
333 | ||
334 | static inline u16 | |
335 | ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) | |
336 | { | |
337 | u16 num_descs = len >> ioat_chan->xfercap_log; | |
338 | ||
339 | num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); | |
340 | return num_descs; | |
341 | } | |
342 | ||
343 | static inline struct ioat_ring_ent * | |
344 | ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) | |
345 | { | |
346 | return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; | |
347 | } | |
348 | ||
349 | static inline void | |
350 | ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) | |
351 | { | |
352 | writel(addr & 0x00000000FFFFFFFF, | |
353 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
354 | writel(addr >> 32, | |
355 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
356 | } | |
357 | ||
599d49de DJ |
358 | /* IOAT Prep functions */ |
359 | struct dma_async_tx_descriptor * | |
360 | ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |
361 | dma_addr_t dma_src, size_t len, unsigned long flags); | |
c0f28ce6 DJ |
362 | struct dma_async_tx_descriptor * |
363 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); | |
364 | struct dma_async_tx_descriptor * | |
365 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
366 | unsigned int src_cnt, size_t len, unsigned long flags); | |
367 | struct dma_async_tx_descriptor * | |
368 | ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | |
369 | unsigned int src_cnt, size_t len, | |
370 | enum sum_check_flags *result, unsigned long flags); | |
371 | struct dma_async_tx_descriptor * | |
372 | ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |
373 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
374 | unsigned long flags); | |
375 | struct dma_async_tx_descriptor * | |
376 | ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |
377 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
378 | enum sum_check_flags *pqres, unsigned long flags); | |
379 | struct dma_async_tx_descriptor * | |
380 | ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |
381 | unsigned int src_cnt, size_t len, unsigned long flags); | |
382 | struct dma_async_tx_descriptor * | |
383 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |
384 | unsigned int src_cnt, size_t len, | |
385 | enum sum_check_flags *result, unsigned long flags); | |
599d49de DJ |
386 | |
387 | /* IOAT Operation functions */ | |
388 | irqreturn_t ioat_dma_do_interrupt(int irq, void *data); | |
389 | irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); | |
390 | struct ioat_ring_ent ** | |
391 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); | |
392 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); | |
393 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); | |
394 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan); | |
c0f28ce6 DJ |
395 | enum dma_status |
396 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |
397 | struct dma_tx_state *txstate); | |
398 | void ioat_cleanup_event(unsigned long data); | |
399 | void ioat_timer_event(unsigned long data); | |
885b2010 | 400 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); |
885b2010 | 401 | void ioat_issue_pending(struct dma_chan *chan); |
885b2010 | 402 | void ioat_timer_event(unsigned long data); |
885b2010 | 403 | |
599d49de DJ |
404 | /* IOAT Init functions */ |
405 | bool is_bwd_ioat(struct pci_dev *pdev); | |
3372de58 | 406 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
599d49de DJ |
407 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); |
408 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); | |
409 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); | |
410 | void ioat_stop(struct ioatdma_chan *ioat_chan); | |
0bbd5f4e | 411 | #endif /* IOATDMA_H */ |