]>
Commit | Line | Data |
---|---|---|
0bbd5f4e | 1 | /* |
211a22ce | 2 | * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. |
0bbd5f4e CL |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | #ifndef IOATDMA_H | |
22 | #define IOATDMA_H | |
23 | ||
24 | #include <linux/dmaengine.h> | |
584ec227 | 25 | #include "hw.h" |
09c8a5b8 | 26 | #include "registers.h" |
0bbd5f4e CL |
27 | #include <linux/init.h> |
28 | #include <linux/dmapool.h> | |
29 | #include <linux/cache.h> | |
57c651f7 | 30 | #include <linux/pci_ids.h> |
16a37aca | 31 | #include <net/tcp.h> |
0bbd5f4e | 32 | |
211a22ce | 33 | #define IOAT_DMA_VERSION "3.64" |
5149fd01 | 34 | |
0bbd5f4e | 35 | #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 |
7bb67c14 SN |
36 | #define IOAT_DMA_DCA_ANY_CPU ~0 |
37 | ||
1f27adc2 DW |
38 | #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) |
39 | #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) | |
bc3c7025 DW |
40 | #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) |
41 | #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) | |
1f27adc2 DW |
42 | |
43 | #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) | |
44 | ||
1f27adc2 DW |
45 | /* |
46 | * workaround for IOAT ver.3.0 null descriptor issue | |
47 | * (channel returns error when size is 0) | |
48 | */ | |
49 | #define NULL_DESC_BUFFER_SIZE 1 | |
50 | ||
0bbd5f4e | 51 | /** |
8ab89567 | 52 | * struct ioatdma_device - internal representation of a IOAT device |
0bbd5f4e CL |
53 | * @pdev: PCI-Express device |
54 | * @reg_base: MMIO register space base address | |
55 | * @dma_pool: for allocating DMA descriptors | |
56 | * @common: embedded struct dma_device | |
8ab89567 | 57 | * @version: version of ioatdma device |
7bb67c14 SN |
58 | * @msix_entries: irq handlers |
59 | * @idx: per channel data | |
f2427e27 DW |
60 | * @dca: direct cache access context |
61 | * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) | |
5cbafa65 | 62 | * @enumerate_channels: hw version specific channel enumeration |
0bbd5f4e CL |
63 | */ |
64 | ||
8ab89567 | 65 | struct ioatdma_device { |
0bbd5f4e | 66 | struct pci_dev *pdev; |
47b16539 | 67 | void __iomem *reg_base; |
0bbd5f4e CL |
68 | struct pci_pool *dma_pool; |
69 | struct pci_pool *completion_pool; | |
0bbd5f4e | 70 | struct dma_device common; |
8ab89567 | 71 | u8 version; |
3e037454 | 72 | struct msix_entry msix_entries[4]; |
dcbc853a | 73 | struct ioat_chan_common *idx[4]; |
f2427e27 DW |
74 | struct dca_provider *dca; |
75 | void (*intr_quirk)(struct ioatdma_device *device); | |
5cbafa65 | 76 | int (*enumerate_channels)(struct ioatdma_device *device); |
0bbd5f4e CL |
77 | }; |
78 | ||
dcbc853a | 79 | struct ioat_chan_common { |
09c8a5b8 | 80 | struct dma_chan common; |
47b16539 | 81 | void __iomem *reg_base; |
0bbd5f4e | 82 | unsigned long last_completion; |
0bbd5f4e | 83 | spinlock_t cleanup_lock; |
dcbc853a | 84 | dma_cookie_t completed_cookie; |
09c8a5b8 DW |
85 | unsigned long state; |
86 | #define IOAT_COMPLETION_PENDING 0 | |
87 | #define IOAT_COMPLETION_ACK 1 | |
88 | #define IOAT_RESET_PENDING 2 | |
89 | struct timer_list timer; | |
90 | #define COMPLETION_TIMEOUT msecs_to_jiffies(100) | |
91 | #define RESET_DELAY msecs_to_jiffies(100) | |
8ab89567 | 92 | struct ioatdma_device *device; |
4fb9b9e8 DW |
93 | dma_addr_t completion_dma; |
94 | u64 *completion; | |
3e037454 | 95 | struct tasklet_struct cleanup_task; |
0bbd5f4e CL |
96 | }; |
97 | ||
5cbafa65 | 98 | |
dcbc853a DW |
99 | /** |
100 | * struct ioat_dma_chan - internal representation of a DMA channel | |
101 | */ | |
102 | struct ioat_dma_chan { | |
103 | struct ioat_chan_common base; | |
104 | ||
105 | size_t xfercap; /* XFERCAP register value expanded out */ | |
106 | ||
107 | spinlock_t desc_lock; | |
108 | struct list_head free_desc; | |
109 | struct list_head used_desc; | |
110 | ||
111 | int pending; | |
dcbc853a DW |
112 | u16 desccount; |
113 | }; | |
114 | ||
115 | static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) | |
116 | { | |
117 | return container_of(c, struct ioat_chan_common, common); | |
118 | } | |
119 | ||
120 | static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) | |
121 | { | |
122 | struct ioat_chan_common *chan = to_chan_common(c); | |
123 | ||
124 | return container_of(chan, struct ioat_dma_chan, base); | |
125 | } | |
126 | ||
5cbafa65 DW |
127 | /** |
128 | * ioat_is_complete - poll the status of an ioat transaction | |
129 | * @c: channel handle | |
130 | * @cookie: transaction identifier | |
131 | * @done: if set, updated with last completed transaction | |
132 | * @used: if set, updated with last used transaction | |
133 | */ | |
134 | static inline enum dma_status | |
135 | ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, | |
136 | dma_cookie_t *done, dma_cookie_t *used) | |
137 | { | |
138 | struct ioat_chan_common *chan = to_chan_common(c); | |
139 | dma_cookie_t last_used; | |
140 | dma_cookie_t last_complete; | |
141 | ||
142 | last_used = c->cookie; | |
143 | last_complete = chan->completed_cookie; | |
5cbafa65 DW |
144 | |
145 | if (done) | |
146 | *done = last_complete; | |
147 | if (used) | |
148 | *used = last_used; | |
149 | ||
150 | return dma_async_is_complete(cookie, last_complete, last_used); | |
151 | } | |
152 | ||
0bbd5f4e CL |
153 | /* wrapper around hardware descriptor format + additional software fields */ |
154 | ||
155 | /** | |
156 | * struct ioat_desc_sw - wrapper around hardware descriptor | |
157 | * @hw: hardware DMA descriptor | |
7405f74b DW |
158 | * @node: this descriptor will either be on the free list, |
159 | * or attached to a transaction list (async_tx.tx_list) | |
bc3c7025 | 160 | * @txd: the generic software descriptor for all engines |
6df9183a | 161 | * @id: identifier for debug |
0bbd5f4e | 162 | */ |
0bbd5f4e CL |
163 | struct ioat_desc_sw { |
164 | struct ioat_dma_descriptor *hw; | |
165 | struct list_head node; | |
7f2b291f | 166 | size_t len; |
bc3c7025 | 167 | struct dma_async_tx_descriptor txd; |
6df9183a DW |
168 | #ifdef DEBUG |
169 | int id; | |
170 | #endif | |
0bbd5f4e CL |
171 | }; |
172 | ||
6df9183a DW |
173 | #ifdef DEBUG |
174 | #define set_desc_id(desc, i) ((desc)->id = (i)) | |
175 | #define desc_id(desc) ((desc)->id) | |
176 | #else | |
177 | #define set_desc_id(desc, i) | |
178 | #define desc_id(desc) (0) | |
179 | #endif | |
180 | ||
181 | static inline void | |
182 | __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, | |
183 | struct dma_async_tx_descriptor *tx, int id) | |
184 | { | |
185 | struct device *dev = to_dev(chan); | |
186 | ||
187 | dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" | |
188 | " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, | |
189 | (unsigned long long) tx->phys, | |
190 | (unsigned long long) hw->next, tx->cookie, tx->flags, | |
191 | hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); | |
192 | } | |
193 | ||
194 | #define dump_desc_dbg(c, d) \ | |
195 | ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) | |
196 | ||
f2427e27 | 197 | static inline void ioat_set_tcp_copy_break(unsigned long copybreak) |
16a37aca MS |
198 | { |
199 | #ifdef CONFIG_NET_DMA | |
f2427e27 | 200 | sysctl_tcp_dma_copybreak = copybreak; |
16a37aca MS |
201 | #endif |
202 | } | |
203 | ||
5cbafa65 DW |
204 | static inline struct ioat_chan_common * |
205 | ioat_chan_by_index(struct ioatdma_device *device, int index) | |
206 | { | |
207 | return device->idx[index]; | |
208 | } | |
209 | ||
09c8a5b8 DW |
210 | static inline u64 ioat_chansts(struct ioat_chan_common *chan) |
211 | { | |
212 | u8 ver = chan->device->version; | |
213 | u64 status; | |
214 | u32 status_lo; | |
215 | ||
216 | /* We need to read the low address first as this causes the | |
217 | * chipset to latch the upper bits for the subsequent read | |
218 | */ | |
219 | status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); | |
220 | status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); | |
221 | status <<= 32; | |
222 | status |= status_lo; | |
223 | ||
224 | return status; | |
225 | } | |
226 | ||
227 | static inline void ioat_start(struct ioat_chan_common *chan) | |
228 | { | |
229 | u8 ver = chan->device->version; | |
230 | ||
231 | writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | |
232 | } | |
233 | ||
234 | static inline u64 ioat_chansts_to_addr(u64 status) | |
235 | { | |
236 | return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; | |
237 | } | |
238 | ||
239 | static inline u32 ioat_chanerr(struct ioat_chan_common *chan) | |
240 | { | |
241 | return readl(chan->reg_base + IOAT_CHANERR_OFFSET); | |
242 | } | |
243 | ||
244 | static inline void ioat_suspend(struct ioat_chan_common *chan) | |
245 | { | |
246 | u8 ver = chan->device->version; | |
247 | ||
248 | writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); | |
249 | } | |
250 | ||
251 | static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) | |
252 | { | |
253 | struct ioat_chan_common *chan = &ioat->base; | |
254 | ||
255 | writel(addr & 0x00000000FFFFFFFF, | |
256 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); | |
257 | writel(addr >> 32, | |
258 | chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); | |
259 | } | |
260 | ||
261 | static inline bool is_ioat_active(unsigned long status) | |
262 | { | |
263 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); | |
264 | } | |
265 | ||
266 | static inline bool is_ioat_idle(unsigned long status) | |
267 | { | |
268 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); | |
269 | } | |
270 | ||
271 | static inline bool is_ioat_halted(unsigned long status) | |
272 | { | |
273 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); | |
274 | } | |
275 | ||
276 | static inline bool is_ioat_suspended(unsigned long status) | |
277 | { | |
278 | return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); | |
279 | } | |
280 | ||
281 | /* channel was fatally programmed */ | |
282 | static inline bool is_ioat_bug(unsigned long err) | |
283 | { | |
284 | return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| | |
285 | IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| | |
286 | IOAT_CHANERR_LENGTH_ERR)); | |
287 | } | |
288 | ||
345d8523 DW |
289 | int __devinit ioat_probe(struct ioatdma_device *device); |
290 | int __devinit ioat_register(struct ioatdma_device *device); | |
291 | int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); | |
292 | void __devexit ioat_dma_remove(struct ioatdma_device *device); | |
293 | struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, | |
294 | void __iomem *iobase); | |
5cbafa65 DW |
295 | unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); |
296 | void ioat_init_channel(struct ioatdma_device *device, | |
297 | struct ioat_chan_common *chan, int idx, | |
09c8a5b8 DW |
298 | void (*timer_fn)(unsigned long), |
299 | void (*tasklet)(unsigned long), | |
300 | unsigned long ioat); | |
5cbafa65 DW |
301 | void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, |
302 | size_t len, struct ioat_dma_descriptor *hw); | |
09c8a5b8 DW |
303 | bool ioat_cleanup_preamble(struct ioat_chan_common *chan, |
304 | unsigned long *phys_complete); | |
0bbd5f4e | 305 | #endif /* IOATDMA_H */ |