]>
Commit | Line | Data |
---|---|---|
0bbd5f4e | 1 | /* |
211a22ce | 2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. |
0bbd5f4e CL |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
0bbd5f4e CL |
14 | * The full GNU General Public License is included in this distribution in the |
15 | * file called COPYING. | |
16 | */ | |
17 | #ifndef IOATDMA_H | |
18 | #define IOATDMA_H | |
19 | ||
20 | #include <linux/dmaengine.h> | |
0bbd5f4e CL |
21 | #include <linux/init.h> |
22 | #include <linux/dmapool.h> | |
23 | #include <linux/cache.h> | |
57c651f7 | 24 | #include <linux/pci_ids.h> |
885b2010 DJ |
25 | #include <linux/circ_buf.h> |
26 | #include <linux/interrupt.h> | |
27 | #include "registers.h" | |
28 | #include "hw.h" | |
0bbd5f4e | 29 | |
3208ca52 | 30 | #define IOAT_DMA_VERSION "4.00" |
5149fd01 | 31 | |
7bb67c14 SN |
32 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
33 | ||
55f878ec DJ |
34 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev) |
35 | #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev) | |
36 | #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev) | |
1f27adc2 | 37 | |
55f878ec | 38 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80) |
1f27adc2 | 39 | |
599d49de DJ |
40 | /* ioat hardware assumes at least two sources for raid operations */ |
41 | #define src_cnt_to_sw(x) ((x) + 2) | |
42 | #define src_cnt_to_hw(x) ((x) - 2) | |
43 | #define ndest_to_sw(x) ((x) + 1) | |
44 | #define ndest_to_hw(x) ((x) - 1) | |
45 | #define src16_cnt_to_sw(x) ((x) + 9) | |
46 | #define src16_cnt_to_hw(x) ((x) - 9) | |
47 | ||
1f27adc2 DW |
48 | /* |
49 | * workaround for IOAT ver.3.0 null descriptor issue | |
50 | * (channel returns error when size is 0) | |
51 | */ | |
52 | #define NULL_DESC_BUFFER_SIZE 1 | |
53 | ||
8a52b9ff DJ |
54 | enum ioat_irq_mode { |
55 | IOAT_NOIRQ = 0, | |
56 | IOAT_MSIX, | |
8a52b9ff DJ |
57 | IOAT_MSI, |
58 | IOAT_INTX | |
59 | }; | |
60 | ||
0bbd5f4e | 61 | /** |
8ab89567 | 62 | * struct ioatdma_device - internal representation of a IOAT device |
0bbd5f4e CL |
63 | * @pdev: PCI-Express device |
64 | * @reg_base: MMIO register space base address | |
c7b0e8d7 DJ |
65 | * @completion_pool: DMA buffers for completion ops |
66 | * @sed_hw_pool: DMA super descriptor pools | |
55f878ec | 67 | * @dma_dev: embedded struct dma_device |
8ab89567 | 68 | * @version: version of ioatdma device |
7bb67c14 SN |
69 | * @msix_entries: irq handlers |
70 | * @idx: per channel data | |
f2427e27 | 71 | * @dca: direct cache access context |
c7b0e8d7 DJ |
72 | * @irq_mode: interrupt mode (INTX, MSI, MSIX) |
73 | * @cap: read DMA capabilities register | |
0bbd5f4e | 74 | */ |
8ab89567 | 75 | struct ioatdma_device { |
0bbd5f4e | 76 | struct pci_dev *pdev; |
47b16539 | 77 | void __iomem *reg_base; |
679cfbf7 | 78 | struct dma_pool *completion_pool; |
7727eaa4 DJ |
79 | #define MAX_SED_POOLS 5 |
80 | struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; | |
55f878ec | 81 | struct dma_device dma_dev; |
8ab89567 | 82 | u8 version; |
ad4a7b50 DJ |
83 | #define IOAT_MAX_CHANS 4 |
84 | struct msix_entry msix_entries[IOAT_MAX_CHANS]; | |
85 | struct ioatdma_chan *idx[IOAT_MAX_CHANS]; | |
f2427e27 | 86 | struct dca_provider *dca; |
8a52b9ff | 87 | enum ioat_irq_mode irq_mode; |
75c6f0ab | 88 | u32 cap; |
c997e30e DJ |
89 | |
90 | /* shadow version for CB3.3 chan reset errata workaround */ | |
91 | u64 msixtba0; | |
92 | u64 msixdata0; | |
93 | u32 msixpba; | |
0bbd5f4e CL |
94 | }; |
95 | ||
dd4645eb DJ |
96 | struct ioat_descs { |
97 | void *virt; | |
98 | dma_addr_t hw; | |
99 | }; | |
100 | ||
5a976888 DJ |
101 | struct ioatdma_chan { |
102 | struct dma_chan dma_chan; | |
47b16539 | 103 | void __iomem *reg_base; |
27502935 | 104 | dma_addr_t last_completion; |
0bbd5f4e | 105 | spinlock_t cleanup_lock; |
09c8a5b8 | 106 | unsigned long state; |
ad4a7b50 | 107 | #define IOAT_CHAN_DOWN 0 |
09c8a5b8 DW |
108 | #define IOAT_COMPLETION_ACK 1 |
109 | #define IOAT_RESET_PENDING 2 | |
5669e31c | 110 | #define IOAT_KOBJ_INIT_FAIL 3 |
556ab45f | 111 | #define IOAT_RUN 5 |
4dec23d7 | 112 | #define IOAT_CHAN_ACTIVE 6 |
09c8a5b8 DW |
113 | struct timer_list timer; |
114 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | |
a309218a | 115 | #define IDLE_TIMEOUT msecs_to_jiffies(2000) |
09c8a5b8 | 116 | #define RESET_DELAY msecs_to_jiffies(100) |
55f878ec | 117 | struct ioatdma_device *ioat_dma; |
4fb9b9e8 DW |
118 | dma_addr_t completion_dma; |
119 | u64 *completion; | |
3e037454 | 120 | struct tasklet_struct cleanup_task; |
5669e31c | 121 | struct kobject kobj; |
5a976888 DJ |
122 | |
123 | /* ioat v2 / v3 channel attributes | |
124 | * @xfercap_log; log2 of channel max transfer length (for fast division) | |
125 | * @head: allocated index | |
126 | * @issued: hardware notification point | |
127 | * @tail: cleanup index | |
128 | * @dmacount: identical to 'head' except for occasionally resetting to zero | |
129 | * @alloc_order: log2 of the number of allocated descriptors | |
130 | * @produce: number of descriptors to produce at submit time | |
131 | * @ring: software ring buffer implementation of hardware ring | |
132 | * @prep_lock: serializes descriptor preparation (producers) | |
133 | */ | |
134 | size_t xfercap_log; | |
135 | u16 head; | |
136 | u16 issued; | |
137 | u16 tail; | |
138 | u16 dmacount; | |
139 | u16 alloc_order; | |
140 | u16 produce; | |
141 | struct ioat_ring_ent **ring; | |
142 | spinlock_t prep_lock; | |
dd4645eb DJ |
143 | struct ioat_descs descs[2]; |
144 | int desc_chunks; | |
268e2519 US |
145 | int intr_coalesce; |
146 | int prev_intr_coalesce; | |
0bbd5f4e CL |
147 | }; |
148 | ||
5669e31c DW |
149 | struct ioat_sysfs_entry { |
150 | struct attribute attr; | |
151 | ssize_t (*show)(struct dma_chan *, char *); | |
268e2519 | 152 | ssize_t (*store)(struct dma_chan *, const char *, size_t); |
5669e31c | 153 | }; |
5cbafa65 | 154 | |
7727eaa4 DJ |
155 | /** |
156 | * struct ioat_sed_ent - wrapper around super extended hardware descriptor | |
157 | * @hw: hardware SED | |
c7b0e8d7 | 158 | * @dma: dma address for the SED |
7727eaa4 | 159 | * @parent: point to the dma descriptor that's the parent |
c7b0e8d7 | 160 | * @hw_pool: descriptor pool index |
7727eaa4 DJ |
161 | */ |
162 | struct ioat_sed_ent { | |
163 | struct ioat_sed_raw_descriptor *hw; | |
164 | dma_addr_t dma; | |
165 | struct ioat_ring_ent *parent; | |
166 | unsigned int hw_pool; | |
167 | }; | |
168 | ||
885b2010 DJ |
169 | /** |
170 | * struct ioat_ring_ent - wrapper around hardware descriptor | |
171 | * @hw: hardware DMA descriptor (for memcpy) | |
885b2010 DJ |
172 | * @xor: hardware xor descriptor |
173 | * @xor_ex: hardware xor extension descriptor | |
174 | * @pq: hardware pq descriptor | |
175 | * @pq_ex: hardware pq extension descriptor | |
176 | * @pqu: hardware pq update descriptor | |
177 | * @raw: hardware raw (un-typed) descriptor | |
178 | * @txd: the generic software descriptor for all engines | |
179 | * @len: total transaction length for unmap | |
180 | * @result: asynchronous result of validate operations | |
181 | * @id: identifier for debug | |
c7b0e8d7 | 182 | * @sed: pointer to super extended descriptor sw desc |
885b2010 DJ |
183 | */ |
184 | ||
185 | struct ioat_ring_ent { | |
186 | union { | |
187 | struct ioat_dma_descriptor *hw; | |
188 | struct ioat_xor_descriptor *xor; | |
189 | struct ioat_xor_ext_descriptor *xor_ex; | |
190 | struct ioat_pq_descriptor *pq; | |
191 | struct ioat_pq_ext_descriptor *pq_ex; | |
192 | struct ioat_pq_update_descriptor *pqu; | |
193 | struct ioat_raw_descriptor *raw; | |
194 | }; | |
195 | size_t len; | |
196 | struct dma_async_tx_descriptor txd; | |
197 | enum sum_check_flags *result; | |
198 | #ifdef DEBUG | |
199 | int id; | |
200 | #endif | |
201 | struct ioat_sed_ent *sed; | |
202 | }; | |
203 | ||
599d49de DJ |
204 | extern const struct sysfs_ops ioat_sysfs_ops; |
205 | extern struct ioat_sysfs_entry ioat_version_attr; | |
206 | extern struct ioat_sysfs_entry ioat_cap_attr; | |
207 | extern int ioat_pending_level; | |
208 | extern int ioat_ring_alloc_order; | |
209 | extern struct kobj_type ioat_ktype; | |
210 | extern struct kmem_cache *ioat_cache; | |
211 | extern int ioat_ring_max_alloc_order; | |
212 | extern struct kmem_cache *ioat_sed_cache; | |
213 | ||
5a976888 | 214 | static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c) |
dcbc853a | 215 | { |
5a976888 | 216 | return container_of(c, struct ioatdma_chan, dma_chan); |
dcbc853a DW |
217 | } |
218 | ||
0bbd5f4e | 219 | /* wrapper around hardware descriptor format + additional software fields */ |
6df9183a DW |
220 | #ifdef DEBUG |
221 | #define set_desc_id(desc, i) ((desc)->id = (i)) | |
222 | #define desc_id(desc) ((desc)->id) | |
223 | #else | |
224 | #define set_desc_id(desc, i) | |
225 | #define desc_id(desc) (0) | |
226 | #endif | |
227 | ||
228 | static inline void | |
5a976888 | 229 | __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw, |
6df9183a DW |
230 | struct dma_async_tx_descriptor *tx, int id) |
231 | { | |
5a976888 | 232 | struct device *dev = to_dev(ioat_chan); |
6df9183a DW |
233 | |
234 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" | |
50f9f97e | 235 | " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, |
6df9183a DW |
236 | (unsigned long long) tx->phys, |
237 | (unsigned long long) hw->next, tx->cookie, tx->flags, | |
238 | hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); | |
239 | } | |
240 | ||
241 | #define dump_desc_dbg(c, d) \ | |
5a976888 | 242 | ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; }) |
6df9183a | 243 | |
5a976888 | 244 | static inline struct ioatdma_chan * |
55f878ec | 245 | ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index) |
5cbafa65 | 246 | { |
55f878ec | 247 | return ioat_dma->idx[index]; |
5cbafa65 DW |
248 | } |
249 | ||
5a976888 | 250 | static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan) |
d92a8d7c | 251 | { |
d3cd63f9 | 252 | return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET); |
d92a8d7c DJ |
253 | } |
254 | ||
09c8a5b8 DW |
255 | static inline u64 ioat_chansts_to_addr(u64 status) |
256 | { | |
257 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
258 | } | |
259 | ||
5a976888 | 260 | static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan) |
09c8a5b8 | 261 | { |
5a976888 | 262 | return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); |
09c8a5b8 DW |
263 | } |
264 | ||
5a976888 | 265 | static inline void ioat_suspend(struct ioatdma_chan *ioat_chan) |
09c8a5b8 | 266 | { |
55f878ec | 267 | u8 ver = ioat_chan->ioat_dma->version; |
09c8a5b8 | 268 | |
5a976888 DJ |
269 | writeb(IOAT_CHANCMD_SUSPEND, |
270 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | |
09c8a5b8 DW |
271 | } |
272 | ||
5a976888 | 273 | static inline void ioat_reset(struct ioatdma_chan *ioat_chan) |
a6d52d70 | 274 | { |
55f878ec | 275 | u8 ver = ioat_chan->ioat_dma->version; |
a6d52d70 | 276 | |
5a976888 DJ |
277 | writeb(IOAT_CHANCMD_RESET, |
278 | ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | |
a6d52d70 DW |
279 | } |
280 | ||
5a976888 | 281 | static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan) |
a6d52d70 | 282 | { |
55f878ec | 283 | u8 ver = ioat_chan->ioat_dma->version; |
a6d52d70 DW |
284 | u8 cmd; |
285 | ||
5a976888 | 286 | cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); |
a6d52d70 DW |
287 | return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET; |
288 | } | |
289 | ||
09c8a5b8 DW |
290 | static inline bool is_ioat_active(unsigned long status) |
291 | { | |
292 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | |
293 | } | |
294 | ||
295 | static inline bool is_ioat_idle(unsigned long status) | |
296 | { | |
297 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); | |
298 | } | |
299 | ||
300 | static inline bool is_ioat_halted(unsigned long status) | |
301 | { | |
302 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); | |
303 | } | |
304 | ||
305 | static inline bool is_ioat_suspended(unsigned long status) | |
306 | { | |
307 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); | |
308 | } | |
309 | ||
310 | /* channel was fatally programmed */ | |
311 | static inline bool is_ioat_bug(unsigned long err) | |
312 | { | |
b57014de | 313 | return !!err; |
09c8a5b8 DW |
314 | } |
315 | ||
885b2010 | 316 | #define IOAT_MAX_ORDER 16 |
dd4645eb DJ |
317 | #define IOAT_MAX_DESCS 65536 |
318 | #define IOAT_DESCS_PER_2M 32768 | |
885b2010 DJ |
319 | |
320 | static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan) | |
321 | { | |
322 | return 1 << ioat_chan->alloc_order; | |
323 | } | |
324 | ||
325 | /* count of descriptors in flight with the engine */ | |
326 | static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan) | |
327 | { | |
328 | return CIRC_CNT(ioat_chan->head, ioat_chan->tail, | |
329 | ioat_ring_size(ioat_chan)); | |
330 | } | |
331 | ||
332 | /* count of descriptors pending submission to hardware */ | |
333 | static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan) | |
334 | { | |
335 | return CIRC_CNT(ioat_chan->head, ioat_chan->issued, | |
336 | ioat_ring_size(ioat_chan)); | |
337 | } | |
338 | ||
339 | static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan) | |
340 | { | |
341 | return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan); | |
342 | } | |
343 | ||
344 | static inline u16 | |
345 | ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len) | |
346 | { | |
347 | u16 num_descs = len >> ioat_chan->xfercap_log; | |
348 | ||
349 | num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1)); | |
350 | return num_descs; | |
351 | } | |
352 | ||
353 | static inline struct ioat_ring_ent * | |
354 | ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx) | |
355 | { | |
356 | return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)]; | |
357 | } | |
358 | ||
359 | static inline void | |
360 | ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr) | |
361 | { | |
362 | writel(addr & 0x00000000FFFFFFFF, | |
363 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); | |
364 | writel(addr >> 32, | |
365 | ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); | |
366 | } | |
367 | ||
599d49de DJ |
368 | /* IOAT Prep functions */ |
369 | struct dma_async_tx_descriptor * | |
370 | ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, | |
371 | dma_addr_t dma_src, size_t len, unsigned long flags); | |
c0f28ce6 DJ |
372 | struct dma_async_tx_descriptor * |
373 | ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags); | |
374 | struct dma_async_tx_descriptor * | |
375 | ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
376 | unsigned int src_cnt, size_t len, unsigned long flags); | |
377 | struct dma_async_tx_descriptor * | |
378 | ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, | |
379 | unsigned int src_cnt, size_t len, | |
380 | enum sum_check_flags *result, unsigned long flags); | |
381 | struct dma_async_tx_descriptor * | |
382 | ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | |
383 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
384 | unsigned long flags); | |
385 | struct dma_async_tx_descriptor * | |
386 | ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | |
387 | unsigned int src_cnt, const unsigned char *scf, size_t len, | |
388 | enum sum_check_flags *pqres, unsigned long flags); | |
389 | struct dma_async_tx_descriptor * | |
390 | ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, | |
391 | unsigned int src_cnt, size_t len, unsigned long flags); | |
392 | struct dma_async_tx_descriptor * | |
393 | ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, | |
394 | unsigned int src_cnt, size_t len, | |
395 | enum sum_check_flags *result, unsigned long flags); | |
599d49de DJ |
396 | |
397 | /* IOAT Operation functions */ | |
398 | irqreturn_t ioat_dma_do_interrupt(int irq, void *data); | |
399 | irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data); | |
400 | struct ioat_ring_ent ** | |
401 | ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags); | |
402 | void ioat_start_null_desc(struct ioatdma_chan *ioat_chan); | |
403 | void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan); | |
404 | int ioat_reset_hw(struct ioatdma_chan *ioat_chan); | |
c0f28ce6 DJ |
405 | enum dma_status |
406 | ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, | |
407 | struct dma_tx_state *txstate); | |
408 | void ioat_cleanup_event(unsigned long data); | |
bcdc4bd3 | 409 | void ioat_timer_event(struct timer_list *t); |
885b2010 | 410 | int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs); |
885b2010 | 411 | void ioat_issue_pending(struct dma_chan *chan); |
885b2010 | 412 | |
599d49de DJ |
413 | /* IOAT Init functions */ |
414 | bool is_bwd_ioat(struct pci_dev *pdev); | |
3372de58 | 415 | struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); |
599d49de DJ |
416 | void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type); |
417 | void ioat_kobject_del(struct ioatdma_device *ioat_dma); | |
418 | int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma); | |
419 | void ioat_stop(struct ioatdma_chan *ioat_chan); | |
0bbd5f4e | 420 | #endif /* IOATDMA_H */ |