]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Setup routines for AGP 3.5 compliant bridges. | |
3 | */ | |
4 | ||
5 | #include <linux/list.h> | |
6 | #include <linux/pci.h> | |
7 | #include <linux/agp_backend.h> | |
8 | #include <linux/module.h> | |
4e57b681 | 9 | #include <linux/slab.h> |
1da177e4 LT |
10 | |
11 | #include "agp.h" | |
12 | ||
13 | /* Generic AGP 3.5 enabling routines */ | |
14 | ||
15 | struct agp_3_5_dev { | |
16 | struct list_head list; | |
17 | u8 capndx; | |
18 | u32 maxbw; | |
19 | struct pci_dev *dev; | |
20 | }; | |
21 | ||
22 | static void agp_3_5_dev_list_insert(struct list_head *head, struct list_head *new) | |
23 | { | |
24 | struct agp_3_5_dev *cur, *n = list_entry(new, struct agp_3_5_dev, list); | |
25 | struct list_head *pos; | |
26 | ||
27 | list_for_each(pos, head) { | |
28 | cur = list_entry(pos, struct agp_3_5_dev, list); | |
6a92a4e0 | 29 | if (cur->maxbw > n->maxbw) |
1da177e4 LT |
30 | break; |
31 | } | |
32 | list_add_tail(new, pos); | |
33 | } | |
34 | ||
35 | static void agp_3_5_dev_list_sort(struct agp_3_5_dev *list, unsigned int ndevs) | |
36 | { | |
37 | struct agp_3_5_dev *cur; | |
38 | struct pci_dev *dev; | |
39 | struct list_head *pos, *tmp, *head = &list->list, *start = head->next; | |
40 | u32 nistat; | |
41 | ||
42 | INIT_LIST_HEAD(head); | |
43 | ||
44 | for (pos=start; pos!=head; ) { | |
45 | cur = list_entry(pos, struct agp_3_5_dev, list); | |
46 | dev = cur->dev; | |
47 | ||
48 | pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &nistat); | |
49 | cur->maxbw = (nistat >> 16) & 0xff; | |
50 | ||
51 | tmp = pos; | |
52 | pos = pos->next; | |
53 | agp_3_5_dev_list_insert(head, tmp); | |
54 | } | |
55 | } | |
56 | ||
6a92a4e0 DJ |
57 | /* |
58 | * Initialize all isochronous transfer parameters for an AGP 3.0 | |
59 | * node (i.e. a host bridge in combination with the adapters | |
1da177e4 LT |
60 | * lying behind it...) |
61 | */ | |
62 | ||
63 | static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge, | |
64 | struct agp_3_5_dev *dev_list, unsigned int ndevs) | |
65 | { | |
66 | /* | |
67 | * Convenience structure to make the calculations clearer | |
68 | * here. The field names come straight from the AGP 3.0 spec. | |
69 | */ | |
70 | struct isoch_data { | |
71 | u32 maxbw; | |
72 | u32 n; | |
73 | u32 y; | |
74 | u32 l; | |
75 | u32 rq; | |
76 | struct agp_3_5_dev *dev; | |
77 | }; | |
78 | ||
79 | struct pci_dev *td = bridge->dev, *dev; | |
80 | struct list_head *head = &dev_list->list, *pos; | |
81 | struct agp_3_5_dev *cur; | |
82 | struct isoch_data *master, target; | |
83 | unsigned int cdev = 0; | |
84 | u32 mnistat, tnistat, tstatus, mcmd; | |
85 | u16 tnicmd, mnicmd; | |
86 | u8 mcapndx; | |
87 | u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; | |
88 | u32 step, rem, rem_isoch, rem_async; | |
89 | int ret = 0; | |
90 | ||
91 | /* | |
92 | * We'll work with an array of isoch_data's (one for each | |
93 | * device in dev_list) throughout this function. | |
94 | */ | |
95 | if ((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) { | |
96 | ret = -ENOMEM; | |
97 | goto get_out; | |
98 | } | |
99 | ||
100 | /* | |
101 | * Sort the device list by maxbw. We need to do this because the | |
102 | * spec suggests that the devices with the smallest requirements | |
103 | * have their resources allocated first, with all remaining resources | |
104 | * falling to the device with the largest requirement. | |
105 | * | |
106 | * We don't exactly do this, we divide target resources by ndevs | |
107 | * and split them amongst the AGP 3.0 devices. The remainder of such | |
108 | * division operations are dropped on the last device, sort of like | |
109 | * the spec mentions it should be done. | |
110 | * | |
111 | * We can't do this sort when we initially construct the dev_list | |
112 | * because we don't know until this function whether isochronous | |
113 | * transfers are enabled and consequently whether maxbw will mean | |
114 | * anything. | |
115 | */ | |
116 | agp_3_5_dev_list_sort(dev_list, ndevs); | |
117 | ||
118 | pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); | |
119 | pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); | |
120 | ||
121 | /* Extract power-on defaults from the target */ | |
122 | target.maxbw = (tnistat >> 16) & 0xff; | |
123 | target.n = (tnistat >> 8) & 0xff; | |
124 | target.y = (tnistat >> 6) & 0x3; | |
125 | target.l = (tnistat >> 3) & 0x7; | |
126 | target.rq = (tstatus >> 24) & 0xff; | |
127 | ||
128 | y_max = target.y; | |
129 | ||
130 | /* | |
131 | * Extract power-on defaults for each device in dev_list. Along | |
132 | * the way, calculate the total isochronous bandwidth required | |
133 | * by these devices and the largest requested payload size. | |
134 | */ | |
135 | list_for_each(pos, head) { | |
136 | cur = list_entry(pos, struct agp_3_5_dev, list); | |
137 | dev = cur->dev; | |
138 | ||
139 | mcapndx = cur->capndx; | |
140 | ||
141 | pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat); | |
142 | ||
143 | master[cdev].maxbw = (mnistat >> 16) & 0xff; | |
144 | master[cdev].n = (mnistat >> 8) & 0xff; | |
145 | master[cdev].y = (mnistat >> 6) & 0x3; | |
146 | master[cdev].dev = cur; | |
147 | ||
148 | tot_bw += master[cdev].maxbw; | |
149 | y_max = max(y_max, master[cdev].y); | |
150 | ||
151 | cdev++; | |
152 | } | |
153 | ||
154 | /* Check if this configuration has any chance of working */ | |
155 | if (tot_bw > target.maxbw) { | |
e3cf6951 | 156 | dev_err(&td->dev, "isochronous bandwidth required " |
1da177e4 LT |
157 | "by AGP 3.0 devices exceeds that which is supported by " |
158 | "the AGP 3.0 bridge!\n"); | |
159 | ret = -ENODEV; | |
160 | goto free_and_exit; | |
161 | } | |
162 | ||
163 | target.y = y_max; | |
164 | ||
165 | /* | |
166 | * Write the calculated payload size into the target's NICMD | |
167 | * register. Doing this directly effects the ISOCH_N value | |
168 | * in the target's NISTAT register, so we need to do this now | |
169 | * to get an accurate value for ISOCH_N later. | |
170 | */ | |
171 | pci_read_config_word(td, bridge->capndx+AGPNICMD, &tnicmd); | |
172 | tnicmd &= ~(0x3 << 6); | |
173 | tnicmd |= target.y << 6; | |
174 | pci_write_config_word(td, bridge->capndx+AGPNICMD, tnicmd); | |
175 | ||
176 | /* Reread the target's ISOCH_N */ | |
177 | pci_read_config_dword(td, bridge->capndx+AGPNISTAT, &tnistat); | |
178 | target.n = (tnistat >> 8) & 0xff; | |
179 | ||
180 | /* Calculate the minimum ISOCH_N needed by each master */ | |
181 | for (cdev=0; cdev<ndevs; cdev++) { | |
182 | master[cdev].y = target.y; | |
183 | master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1); | |
184 | ||
185 | tot_n += master[cdev].n; | |
186 | } | |
187 | ||
188 | /* Exit if the minimal ISOCH_N allocation among the masters is more | |
189 | * than the target can handle. */ | |
190 | if (tot_n > target.n) { | |
e3cf6951 | 191 | dev_err(&td->dev, "number of isochronous " |
1da177e4 LT |
192 | "transactions per period required by AGP 3.0 devices " |
193 | "exceeds that which is supported by the AGP 3.0 " | |
194 | "bridge!\n"); | |
195 | ret = -ENODEV; | |
196 | goto free_and_exit; | |
197 | } | |
198 | ||
199 | /* Calculate left over ISOCH_N capability in the target. We'll give | |
200 | * this to the hungriest device (as per the spec) */ | |
201 | rem = target.n - tot_n; | |
202 | ||
6a92a4e0 | 203 | /* |
1da177e4 LT |
204 | * Calculate the minimum isochronous RQ depth needed by each master. |
205 | * Along the way, distribute the extra ISOCH_N capability calculated | |
206 | * above. | |
207 | */ | |
208 | for (cdev=0; cdev<ndevs; cdev++) { | |
209 | /* | |
210 | * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y | |
211 | * byte isochronous writes will be broken into 64B pieces. | |
212 | * This means we need to budget more RQ depth to account for | |
213 | * these kind of writes (each isochronous write is actually | |
214 | * many writes on the AGP bus). | |
215 | */ | |
216 | master[cdev].rq = master[cdev].n; | |
6a92a4e0 | 217 | if (master[cdev].y > 0x1) |
1da177e4 LT |
218 | master[cdev].rq *= (1 << (master[cdev].y - 1)); |
219 | ||
220 | tot_rq += master[cdev].rq; | |
1da177e4 | 221 | } |
496ebd38 | 222 | master[ndevs-1].n += rem; |
1da177e4 LT |
223 | |
224 | /* Figure the number of isochronous and asynchronous RQ slots the | |
225 | * target is providing. */ | |
226 | rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; | |
227 | rq_async = target.rq - rq_isoch; | |
228 | ||
229 | /* Exit if the minimal RQ needs of the masters exceeds what the target | |
230 | * can provide. */ | |
231 | if (tot_rq > rq_isoch) { | |
e3cf6951 | 232 | dev_err(&td->dev, "number of request queue slots " |
1da177e4 LT |
233 | "required by the isochronous bandwidth requested by " |
234 | "AGP 3.0 devices exceeds the number provided by the " | |
235 | "AGP 3.0 bridge!\n"); | |
236 | ret = -ENODEV; | |
237 | goto free_and_exit; | |
238 | } | |
239 | ||
240 | /* Calculate asynchronous RQ capability in the target (per master) as | |
241 | * well as the total number of leftover isochronous RQ slots. */ | |
242 | step = rq_async / ndevs; | |
243 | rem_async = step + (rq_async % ndevs); | |
244 | rem_isoch = rq_isoch - tot_rq; | |
245 | ||
246 | /* Distribute the extra RQ slots calculated above and write our | |
247 | * isochronous settings out to the actual devices. */ | |
248 | for (cdev=0; cdev<ndevs; cdev++) { | |
249 | cur = master[cdev].dev; | |
250 | dev = cur->dev; | |
251 | ||
252 | mcapndx = cur->capndx; | |
253 | ||
254 | master[cdev].rq += (cdev == ndevs - 1) | |
255 | ? (rem_async + rem_isoch) : step; | |
256 | ||
257 | pci_read_config_word(dev, cur->capndx+AGPNICMD, &mnicmd); | |
258 | pci_read_config_dword(dev, cur->capndx+AGPCMD, &mcmd); | |
259 | ||
260 | mnicmd &= ~(0xff << 8); | |
261 | mnicmd &= ~(0x3 << 6); | |
262 | mcmd &= ~(0xff << 24); | |
263 | ||
264 | mnicmd |= master[cdev].n << 8; | |
265 | mnicmd |= master[cdev].y << 6; | |
266 | mcmd |= master[cdev].rq << 24; | |
267 | ||
268 | pci_write_config_dword(dev, cur->capndx+AGPCMD, mcmd); | |
269 | pci_write_config_word(dev, cur->capndx+AGPNICMD, mnicmd); | |
270 | } | |
271 | ||
272 | free_and_exit: | |
273 | kfree(master); | |
274 | ||
275 | get_out: | |
276 | return ret; | |
277 | } | |
278 | ||
279 | /* | |
280 | * This function basically allocates request queue slots among the | |
281 | * AGP 3.0 systems in nonisochronous nodes. The algorithm is | |
282 | * pretty stupid, divide the total number of RQ slots provided by the | |
283 | * target by ndevs. Distribute this many slots to each AGP 3.0 device, | |
284 | * giving any left over slots to the last device in dev_list. | |
285 | */ | |
286 | static void agp_3_5_nonisochronous_node_enable(struct agp_bridge_data *bridge, | |
287 | struct agp_3_5_dev *dev_list, unsigned int ndevs) | |
288 | { | |
289 | struct agp_3_5_dev *cur; | |
290 | struct list_head *head = &dev_list->list, *pos; | |
291 | u32 tstatus, mcmd; | |
292 | u32 trq, mrq, rem; | |
293 | unsigned int cdev = 0; | |
294 | ||
295 | pci_read_config_dword(bridge->dev, bridge->capndx+AGPSTAT, &tstatus); | |
296 | ||
297 | trq = (tstatus >> 24) & 0xff; | |
298 | mrq = trq / ndevs; | |
299 | ||
300 | rem = mrq + (trq % ndevs); | |
301 | ||
302 | for (pos=head->next; cdev<ndevs; cdev++, pos=pos->next) { | |
303 | cur = list_entry(pos, struct agp_3_5_dev, list); | |
304 | ||
305 | pci_read_config_dword(cur->dev, cur->capndx+AGPCMD, &mcmd); | |
306 | mcmd &= ~(0xff << 24); | |
307 | mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24; | |
308 | pci_write_config_dword(cur->dev, cur->capndx+AGPCMD, mcmd); | |
309 | } | |
310 | } | |
311 | ||
312 | /* | |
313 | * Fully configure and enable an AGP 3.0 host bridge and all the devices | |
314 | * lying behind it. | |
315 | */ | |
316 | int agp_3_5_enable(struct agp_bridge_data *bridge) | |
317 | { | |
318 | struct pci_dev *td = bridge->dev, *dev = NULL; | |
319 | u8 mcapndx; | |
320 | u32 isoch, arqsz; | |
321 | u32 tstatus, mstatus, ncapid; | |
322 | u32 mmajor; | |
323 | u16 mpstat; | |
324 | struct agp_3_5_dev *dev_list, *cur; | |
325 | struct list_head *head, *pos; | |
326 | unsigned int ndevs = 0; | |
327 | int ret = 0; | |
328 | ||
329 | /* Extract some power-on defaults from the target */ | |
330 | pci_read_config_dword(td, bridge->capndx+AGPSTAT, &tstatus); | |
331 | isoch = (tstatus >> 17) & 0x1; | |
332 | if (isoch == 0) /* isoch xfers not available, bail out. */ | |
333 | return -ENODEV; | |
334 | ||
335 | arqsz = (tstatus >> 13) & 0x7; | |
336 | ||
6a92a4e0 | 337 | /* |
1da177e4 | 338 | * Allocate a head for our AGP 3.5 device list |
6a92a4e0 | 339 | * (multiple AGP v3 devices are allowed behind a single bridge). |
1da177e4 LT |
340 | */ |
341 | if ((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) { | |
342 | ret = -ENOMEM; | |
343 | goto get_out; | |
344 | } | |
345 | head = &dev_list->list; | |
346 | INIT_LIST_HEAD(head); | |
347 | ||
348 | /* Find all AGP devices, and add them to dev_list. */ | |
349 | for_each_pci_dev(dev) { | |
350 | mcapndx = pci_find_capability(dev, PCI_CAP_ID_AGP); | |
351 | if (mcapndx == 0) | |
352 | continue; | |
353 | ||
354 | switch ((dev->class >>8) & 0xff00) { | |
355 | case 0x0600: /* Bridge */ | |
356 | /* Skip bridges. We should call this function for each one. */ | |
357 | continue; | |
358 | ||
359 | case 0x0001: /* Unclassified device */ | |
360 | /* Don't know what this is, but log it for investigation. */ | |
361 | if (mcapndx != 0) { | |
e3cf6951 BH |
362 | dev_info(&td->dev, "wacky, found unclassified AGP device %s [%04x/%04x]\n", |
363 | pci_name(dev), | |
364 | dev->vendor, dev->device); | |
1da177e4 LT |
365 | } |
366 | continue; | |
367 | ||
368 | case 0x0300: /* Display controller */ | |
369 | case 0x0400: /* Multimedia controller */ | |
6a92a4e0 | 370 | if ((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { |
1da177e4 LT |
371 | ret = -ENOMEM; |
372 | goto free_and_exit; | |
373 | } | |
374 | cur->dev = dev; | |
375 | ||
376 | pos = &cur->list; | |
377 | list_add(pos, head); | |
378 | ndevs++; | |
379 | continue; | |
380 | ||
381 | default: | |
382 | continue; | |
383 | } | |
384 | } | |
385 | ||
386 | /* | |
387 | * Take an initial pass through the devices lying behind our host | |
388 | * bridge. Make sure each one is actually an AGP 3.0 device, otherwise | |
389 | * exit with an error message. Along the way store the AGP 3.0 | |
390 | * cap_ptr for each device | |
391 | */ | |
392 | list_for_each(pos, head) { | |
393 | cur = list_entry(pos, struct agp_3_5_dev, list); | |
394 | dev = cur->dev; | |
6a92a4e0 | 395 | |
1da177e4 LT |
396 | pci_read_config_word(dev, PCI_STATUS, &mpstat); |
397 | if ((mpstat & PCI_STATUS_CAP_LIST) == 0) | |
398 | continue; | |
399 | ||
400 | pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx); | |
401 | if (mcapndx != 0) { | |
402 | do { | |
403 | pci_read_config_dword(dev, mcapndx, &ncapid); | |
404 | if ((ncapid & 0xff) != 2) | |
405 | mcapndx = (ncapid >> 8) & 0xff; | |
406 | } | |
407 | while (((ncapid & 0xff) != 2) && (mcapndx != 0)); | |
408 | } | |
409 | ||
410 | if (mcapndx == 0) { | |
e3cf6951 BH |
411 | dev_err(&td->dev, "woah! Non-AGP device %s on " |
412 | "secondary bus of AGP 3.5 bridge!\n", | |
413 | pci_name(dev)); | |
1da177e4 LT |
414 | ret = -ENODEV; |
415 | goto free_and_exit; | |
416 | } | |
417 | ||
418 | mmajor = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf; | |
419 | if (mmajor < 3) { | |
e3cf6951 BH |
420 | dev_err(&td->dev, "woah! AGP 2.0 device %s on " |
421 | "secondary bus of AGP 3.5 bridge operating " | |
422 | "with AGP 3.0 electricals!\n", pci_name(dev)); | |
1da177e4 LT |
423 | ret = -ENODEV; |
424 | goto free_and_exit; | |
425 | } | |
426 | ||
427 | cur->capndx = mcapndx; | |
428 | ||
429 | pci_read_config_dword(dev, cur->capndx+AGPSTAT, &mstatus); | |
430 | ||
431 | if (((mstatus >> 3) & 0x1) == 0) { | |
e3cf6951 BH |
432 | dev_err(&td->dev, "woah! AGP 3.x device %s not " |
433 | "operating in AGP 3.x mode on secondary bus " | |
434 | "of AGP 3.5 bridge operating with AGP 3.0 " | |
435 | "electricals!\n", pci_name(dev)); | |
1da177e4 LT |
436 | ret = -ENODEV; |
437 | goto free_and_exit; | |
438 | } | |
439 | } | |
440 | ||
441 | /* | |
442 | * Call functions to divide target resources amongst the AGP 3.0 | |
443 | * masters. This process is dramatically different depending on | |
444 | * whether isochronous transfers are supported. | |
445 | */ | |
446 | if (isoch) { | |
447 | ret = agp_3_5_isochronous_node_enable(bridge, dev_list, ndevs); | |
448 | if (ret) { | |
e3cf6951 BH |
449 | dev_info(&td->dev, "something bad happened setting " |
450 | "up isochronous xfers; falling back to " | |
451 | "non-isochronous xfer mode\n"); | |
1da177e4 LT |
452 | } else { |
453 | goto free_and_exit; | |
454 | } | |
455 | } | |
456 | agp_3_5_nonisochronous_node_enable(bridge, dev_list, ndevs); | |
457 | ||
458 | free_and_exit: | |
459 | /* Be sure to free the dev_list */ | |
460 | for (pos=head->next; pos!=head; ) { | |
461 | cur = list_entry(pos, struct agp_3_5_dev, list); | |
462 | ||
463 | pos = pos->next; | |
464 | kfree(cur); | |
465 | } | |
466 | kfree(dev_list); | |
467 | ||
468 | get_out: | |
469 | return ret; | |
470 | } |