]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _SCSI_SCSI_HOST_H |
2 | #define _SCSI_SCSI_HOST_H | |
3 | ||
4 | #include <linux/device.h> | |
5 | #include <linux/list.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/workqueue.h> | |
0b950672 | 8 | #include <linux/mutex.h> |
1da177e4 | 9 | |
b58d9154 | 10 | struct request_queue; |
1da177e4 | 11 | struct block_device; |
7dfdc9a5 | 12 | struct completion; |
1da177e4 LT |
13 | struct module; |
14 | struct scsi_cmnd; | |
15 | struct scsi_device; | |
a283bd37 | 16 | struct scsi_target; |
1da177e4 LT |
17 | struct Scsi_Host; |
18 | struct scsi_host_cmd_pool; | |
19 | struct scsi_transport_template; | |
86e33a29 | 20 | struct blk_queue_tags; |
1da177e4 LT |
21 | |
22 | ||
23 | /* | |
24 | * The various choices mean: | |
25 | * NONE: Self evident. Host adapter is not capable of scatter-gather. | |
26 | * ALL: Means that the host adapter module can do scatter-gather, | |
27 | * and that there is no limit to the size of the table to which | |
28 | * we scatter/gather data. | |
29 | * Anything else: Indicates the maximum number of chains that can be | |
30 | * used in one scatter-gather request. | |
31 | */ | |
32 | #define SG_NONE 0 | |
33 | #define SG_ALL 0xff | |
34 | ||
5dc2b89e FT |
35 | #define MODE_UNKNOWN 0x00 |
36 | #define MODE_INITIATOR 0x01 | |
37 | #define MODE_TARGET 0x02 | |
1da177e4 LT |
38 | |
39 | #define DISABLE_CLUSTERING 0 | |
40 | #define ENABLE_CLUSTERING 1 | |
41 | ||
42 | enum scsi_eh_timer_return { | |
43 | EH_NOT_HANDLED, | |
44 | EH_HANDLED, | |
45 | EH_RESET_TIMER, | |
46 | }; | |
47 | ||
48 | ||
49 | struct scsi_host_template { | |
50 | struct module *module; | |
51 | const char *name; | |
52 | ||
53 | /* | |
54 | * Used to initialize old-style drivers. For new-style drivers | |
55 | * just perform all work in your module initialization function. | |
56 | * | |
57 | * Status: OBSOLETE | |
58 | */ | |
59 | int (* detect)(struct scsi_host_template *); | |
60 | ||
61 | /* | |
62 | * Used as unload callback for hosts with old-style drivers. | |
63 | * | |
64 | * Status: OBSOLETE | |
65 | */ | |
66 | int (* release)(struct Scsi_Host *); | |
67 | ||
68 | /* | |
69 | * The info function will return whatever useful information the | |
70 | * developer sees fit. If not provided, then the name field will | |
71 | * be used instead. | |
72 | * | |
73 | * Status: OPTIONAL | |
74 | */ | |
75 | const char *(* info)(struct Scsi_Host *); | |
76 | ||
77 | /* | |
78 | * Ioctl interface | |
79 | * | |
80 | * Status: OPTIONAL | |
81 | */ | |
82 | int (* ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | |
83 | ||
84 | ||
85 | #ifdef CONFIG_COMPAT | |
86 | /* | |
87 | * Compat handler. Handle 32bit ABI. | |
88 | * When unknown ioctl is passed return -ENOIOCTLCMD. | |
89 | * | |
90 | * Status: OPTIONAL | |
91 | */ | |
92 | int (* compat_ioctl)(struct scsi_device *dev, int cmd, void __user *arg); | |
93 | #endif | |
94 | ||
95 | /* | |
96 | * The queuecommand function is used to queue up a scsi | |
97 | * command block to the LLDD. When the driver finished | |
98 | * processing the command the done callback is invoked. | |
99 | * | |
100 | * If queuecommand returns 0, then the HBA has accepted the | |
101 | * command. The done() function must be called on the command | |
102 | * when the driver has finished with it. (you may call done on the | |
103 | * command before queuecommand returns, but in this case you | |
104 | * *must* return 0 from queuecommand). | |
105 | * | |
106 | * Queuecommand may also reject the command, in which case it may | |
107 | * not touch the command and must not call done() for it. | |
108 | * | |
109 | * There are two possible rejection returns: | |
110 | * | |
111 | * SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but | |
112 | * allow commands to other devices serviced by this host. | |
113 | * | |
114 | * SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this | |
115 | * host temporarily. | |
116 | * | |
117 | * For compatibility, any other non-zero return is treated the | |
118 | * same as SCSI_MLQUEUE_HOST_BUSY. | |
119 | * | |
120 | * NOTE: "temporarily" means either until the next command for# | |
121 | * this device/host completes, or a period of time determined by | |
122 | * I/O pressure in the system if there are no other outstanding | |
123 | * commands. | |
124 | * | |
125 | * STATUS: REQUIRED | |
126 | */ | |
127 | int (* queuecommand)(struct scsi_cmnd *, | |
128 | void (*done)(struct scsi_cmnd *)); | |
129 | ||
b58d9154 FT |
130 | /* |
131 | * The transfer functions are used to queue a scsi command to | |
132 | * the LLD. When the driver is finished processing the command | |
133 | * the done callback is invoked. | |
134 | * | |
bc7e380a | 135 | * This is called to inform the LLD to transfer |
30b0c37b | 136 | * scsi_bufflen(cmd) bytes. scsi_sg_count(cmd) speciefies the |
bc7e380a | 137 | * number of scatterlist entried in the command and |
30b0c37b | 138 | * scsi_sglist(cmd) returns the scatterlist. |
bc7e380a | 139 | * |
b58d9154 FT |
140 | * return values: see queuecommand |
141 | * | |
142 | * If the LLD accepts the cmd, it should set the result to an | |
143 | * appropriate value when completed before calling the done function. | |
144 | * | |
145 | * STATUS: REQUIRED FOR TARGET DRIVERS | |
146 | */ | |
147 | /* TODO: rename */ | |
148 | int (* transfer_response)(struct scsi_cmnd *, | |
149 | void (*done)(struct scsi_cmnd *)); | |
b58d9154 | 150 | |
1da177e4 LT |
151 | /* |
152 | * This is an error handling strategy routine. You don't need to | |
153 | * define one of these if you don't want to - there is a default | |
154 | * routine that is present that should work in most cases. For those | |
155 | * driver authors that have the inclination and ability to write their | |
156 | * own strategy routine, this is where it is specified. Note - the | |
157 | * strategy routine is *ALWAYS* run in the context of the kernel eh | |
158 | * thread. Thus you are guaranteed to *NOT* be in an interrupt | |
159 | * handler when you execute this, and you are also guaranteed to | |
160 | * *NOT* have any other commands being queued while you are in the | |
161 | * strategy routine. When you return from this function, operations | |
162 | * return to normal. | |
163 | * | |
164 | * See scsi_error.c scsi_unjam_host for additional comments about | |
165 | * what this function should and should not be attempting to do. | |
166 | * | |
167 | * Status: REQUIRED (at least one of them) | |
168 | */ | |
1da177e4 LT |
169 | int (* eh_abort_handler)(struct scsi_cmnd *); |
170 | int (* eh_device_reset_handler)(struct scsi_cmnd *); | |
171 | int (* eh_bus_reset_handler)(struct scsi_cmnd *); | |
172 | int (* eh_host_reset_handler)(struct scsi_cmnd *); | |
173 | ||
1da177e4 LT |
174 | /* |
175 | * Before the mid layer attempts to scan for a new device where none | |
176 | * currently exists, it will call this entry in your driver. Should | |
177 | * your driver need to allocate any structs or perform any other init | |
178 | * items in order to send commands to a currently unused target/lun | |
179 | * combo, then this is where you can perform those allocations. This | |
180 | * is specifically so that drivers won't have to perform any kind of | |
181 | * "is this a new device" checks in their queuecommand routine, | |
182 | * thereby making the hot path a bit quicker. | |
183 | * | |
184 | * Return values: 0 on success, non-0 on failure | |
185 | * | |
186 | * Deallocation: If we didn't find any devices at this ID, you will | |
187 | * get an immediate call to slave_destroy(). If we find something | |
188 | * here then you will get a call to slave_configure(), then the | |
189 | * device will be used for however long it is kept around, then when | |
190 | * the device is removed from the system (or * possibly at reboot | |
191 | * time), you will then get a call to slave_destroy(). This is | |
192 | * assuming you implement slave_configure and slave_destroy. | |
193 | * However, if you allocate memory and hang it off the device struct, | |
194 | * then you must implement the slave_destroy() routine at a minimum | |
195 | * in order to avoid leaking memory | |
196 | * each time a device is tore down. | |
197 | * | |
198 | * Status: OPTIONAL | |
199 | */ | |
200 | int (* slave_alloc)(struct scsi_device *); | |
201 | ||
202 | /* | |
203 | * Once the device has responded to an INQUIRY and we know the | |
204 | * device is online, we call into the low level driver with the | |
205 | * struct scsi_device *. If the low level device driver implements | |
206 | * this function, it *must* perform the task of setting the queue | |
207 | * depth on the device. All other tasks are optional and depend | |
208 | * on what the driver supports and various implementation details. | |
209 | * | |
210 | * Things currently recommended to be handled at this time include: | |
211 | * | |
212 | * 1. Setting the device queue depth. Proper setting of this is | |
213 | * described in the comments for scsi_adjust_queue_depth. | |
214 | * 2. Determining if the device supports the various synchronous | |
215 | * negotiation protocols. The device struct will already have | |
216 | * responded to INQUIRY and the results of the standard items | |
217 | * will have been shoved into the various device flag bits, eg. | |
218 | * device->sdtr will be true if the device supports SDTR messages. | |
219 | * 3. Allocating command structs that the device will need. | |
220 | * 4. Setting the default timeout on this device (if needed). | |
221 | * 5. Anything else the low level driver might want to do on a device | |
222 | * specific setup basis... | |
223 | * 6. Return 0 on success, non-0 on error. The device will be marked | |
224 | * as offline on error so that no access will occur. If you return | |
225 | * non-0, your slave_destroy routine will never get called for this | |
226 | * device, so don't leave any loose memory hanging around, clean | |
227 | * up after yourself before returning non-0 | |
228 | * | |
229 | * Status: OPTIONAL | |
230 | */ | |
231 | int (* slave_configure)(struct scsi_device *); | |
232 | ||
233 | /* | |
234 | * Immediately prior to deallocating the device and after all activity | |
235 | * has ceased the mid layer calls this point so that the low level | |
236 | * driver may completely detach itself from the scsi device and vice | |
237 | * versa. The low level driver is responsible for freeing any memory | |
238 | * it allocated in the slave_alloc or slave_configure calls. | |
239 | * | |
240 | * Status: OPTIONAL | |
241 | */ | |
242 | void (* slave_destroy)(struct scsi_device *); | |
243 | ||
a283bd37 JB |
244 | /* |
245 | * Before the mid layer attempts to scan for a new device attached | |
246 | * to a target where no target currently exists, it will call this | |
247 | * entry in your driver. Should your driver need to allocate any | |
248 | * structs or perform any other init items in order to send commands | |
249 | * to a currently unused target, then this is where you can perform | |
250 | * those allocations. | |
251 | * | |
252 | * Return values: 0 on success, non-0 on failure | |
253 | * | |
254 | * Status: OPTIONAL | |
255 | */ | |
256 | int (* target_alloc)(struct scsi_target *); | |
257 | ||
258 | /* | |
259 | * Immediately prior to deallocating the target structure, and | |
260 | * after all activity to attached scsi devices has ceased, the | |
261 | * midlayer calls this point so that the driver may deallocate | |
262 | * and terminate any references to the target. | |
263 | * | |
264 | * Status: OPTIONAL | |
265 | */ | |
266 | void (* target_destroy)(struct scsi_target *); | |
267 | ||
1aa8fab2 MW |
268 | /* |
269 | * If a host has the ability to discover targets on its own instead | |
270 | * of scanning the entire bus, it can fill in this function and | |
271 | * call scsi_scan_host(). This function will be called periodically | |
272 | * until it returns 1 with the scsi_host and the elapsed time of | |
273 | * the scan in jiffies. | |
274 | * | |
275 | * Status: OPTIONAL | |
276 | */ | |
277 | int (* scan_finished)(struct Scsi_Host *, unsigned long); | |
278 | ||
279 | /* | |
280 | * If the host wants to be called before the scan starts, but | |
281 | * after the midlayer has set up ready for the scan, it can fill | |
282 | * in this function. | |
d850bd34 PM |
283 | * |
284 | * Status: OPTIONAL | |
1aa8fab2 MW |
285 | */ |
286 | void (* scan_start)(struct Scsi_Host *); | |
287 | ||
1da177e4 | 288 | /* |
d850bd34 PM |
289 | * Fill in this function to allow the queue depth of this host |
290 | * to be changeable (on a per device basis). Returns either | |
1da177e4 LT |
291 | * the current queue depth setting (may be different from what |
292 | * was passed in) or an error. An error should only be | |
293 | * returned if the requested depth is legal but the driver was | |
294 | * unable to set it. If the requested depth is illegal, the | |
295 | * driver should set and return the closest legal queue depth. | |
296 | * | |
d850bd34 | 297 | * Status: OPTIONAL |
1da177e4 LT |
298 | */ |
299 | int (* change_queue_depth)(struct scsi_device *, int); | |
300 | ||
301 | /* | |
d850bd34 | 302 | * Fill in this function to allow the changing of tag types |
1da177e4 LT |
303 | * (this also allows the enabling/disabling of tag command |
304 | * queueing). An error should only be returned if something | |
305 | * went wrong in the driver while trying to set the tag type. | |
306 | * If the driver doesn't support the requested tag type, then | |
307 | * it should set the closest type it does support without | |
308 | * returning an error. Returns the actual tag type set. | |
d850bd34 PM |
309 | * |
310 | * Status: OPTIONAL | |
1da177e4 LT |
311 | */ |
312 | int (* change_queue_type)(struct scsi_device *, int); | |
313 | ||
314 | /* | |
d850bd34 | 315 | * This function determines the BIOS parameters for a given |
1da177e4 LT |
316 | * harddisk. These tend to be numbers that are made up by |
317 | * the host adapter. Parameters: | |
318 | * size, device, list (heads, sectors, cylinders) | |
319 | * | |
d850bd34 PM |
320 | * Status: OPTIONAL |
321 | */ | |
1da177e4 LT |
322 | int (* bios_param)(struct scsi_device *, struct block_device *, |
323 | sector_t, int []); | |
324 | ||
325 | /* | |
326 | * Can be used to export driver statistics and other infos to the | |
327 | * world outside the kernel ie. userspace and it also provides an | |
328 | * interface to feed the driver with information. | |
329 | * | |
330 | * Status: OBSOLETE | |
331 | */ | |
332 | int (*proc_info)(struct Scsi_Host *, char *, char **, off_t, int, int); | |
333 | ||
6c5f8ce1 JB |
334 | /* |
335 | * This is an optional routine that allows the transport to become | |
336 | * involved when a scsi io timer fires. The return value tells the | |
337 | * timer routine how to finish the io timeout handling: | |
338 | * EH_HANDLED: I fixed the error, please complete the command | |
339 | * EH_RESET_TIMER: I need more time, reset the timer and | |
340 | * begin counting again | |
341 | * EH_NOT_HANDLED Begin normal error recovery | |
342 | * | |
343 | * Status: OPTIONAL | |
344 | */ | |
345 | enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); | |
346 | ||
1da177e4 LT |
347 | /* |
348 | * Name of proc directory | |
349 | */ | |
b02b6bc4 | 350 | const char *proc_name; |
1da177e4 LT |
351 | |
352 | /* | |
353 | * Used to store the procfs directory if a driver implements the | |
354 | * proc_info method. | |
355 | */ | |
356 | struct proc_dir_entry *proc_dir; | |
357 | ||
358 | /* | |
359 | * This determines if we will use a non-interrupt driven | |
d850bd34 | 360 | * or an interrupt driven scheme. It is set to the maximum number |
1da177e4 LT |
361 | * of simultaneous commands a given host adapter will accept. |
362 | */ | |
363 | int can_queue; | |
364 | ||
365 | /* | |
366 | * In many instances, especially where disconnect / reconnect are | |
367 | * supported, our host also has an ID on the SCSI bus. If this is | |
368 | * the case, then it must be reserved. Please set this_id to -1 if | |
369 | * your setup is in single initiator mode, and the host lacks an | |
370 | * ID. | |
371 | */ | |
372 | int this_id; | |
373 | ||
374 | /* | |
375 | * This determines the degree to which the host adapter is capable | |
376 | * of scatter-gather. | |
377 | */ | |
378 | unsigned short sg_tablesize; | |
379 | ||
380 | /* | |
d850bd34 | 381 | * Set this if the host adapter has limitations beside segment count. |
1da177e4 LT |
382 | */ |
383 | unsigned short max_sectors; | |
384 | ||
385 | /* | |
d850bd34 | 386 | * DMA scatter gather segment boundary limit. A segment crossing this |
1da177e4 LT |
387 | * boundary will be split in two. |
388 | */ | |
389 | unsigned long dma_boundary; | |
390 | ||
391 | /* | |
392 | * This specifies "machine infinity" for host templates which don't | |
393 | * limit the transfer size. Note this limit represents an absolute | |
394 | * maximum, and may be over the transfer limits allowed for | |
d850bd34 | 395 | * individual devices (e.g. 256 for SCSI-1). |
1da177e4 LT |
396 | */ |
397 | #define SCSI_DEFAULT_MAX_SECTORS 1024 | |
398 | ||
399 | /* | |
400 | * True if this host adapter can make good use of linked commands. | |
401 | * This will allow more than one command to be queued to a given | |
402 | * unit on a given host. Set this to the maximum number of command | |
403 | * blocks to be provided for each device. Set this to 1 for one | |
404 | * command block per lun, 2 for two, etc. Do not set this to 0. | |
405 | * You should make sure that the host adapter will do the right thing | |
406 | * before you try setting this above 1. | |
407 | */ | |
408 | short cmd_per_lun; | |
409 | ||
410 | /* | |
411 | * present contains counter indicating how many boards of this | |
412 | * type were found when we did the scan. | |
413 | */ | |
414 | unsigned char present; | |
415 | ||
5dc2b89e FT |
416 | /* |
417 | * This specifies the mode that a LLD supports. | |
418 | */ | |
419 | unsigned supported_mode:2; | |
420 | ||
1da177e4 | 421 | /* |
d850bd34 | 422 | * True if this host adapter uses unchecked DMA onto an ISA bus. |
1da177e4 LT |
423 | */ |
424 | unsigned unchecked_isa_dma:1; | |
425 | ||
426 | /* | |
d850bd34 | 427 | * True if this host adapter can make good use of clustering. |
1da177e4 LT |
428 | * I originally thought that if the tablesize was large that it |
429 | * was a waste of CPU cycles to prepare a cluster list, but | |
430 | * it works out that the Buslogic is faster if you use a smaller | |
431 | * number of segments (i.e. use clustering). I guess it is | |
432 | * inefficient. | |
433 | */ | |
434 | unsigned use_clustering:1; | |
435 | ||
436 | /* | |
d850bd34 | 437 | * True for emulated SCSI host adapters (e.g. ATAPI). |
1da177e4 LT |
438 | */ |
439 | unsigned emulated:1; | |
440 | ||
441 | /* | |
442 | * True if the low-level driver performs its own reset-settle delays. | |
443 | */ | |
444 | unsigned skip_settle_delay:1; | |
445 | ||
446 | /* | |
d850bd34 | 447 | * True if we are using ordered write support. |
1da177e4 | 448 | */ |
1da177e4 LT |
449 | unsigned ordered_tag:1; |
450 | ||
451 | /* | |
d850bd34 | 452 | * Countdown for host blocking with no commands outstanding. |
1da177e4 LT |
453 | */ |
454 | unsigned int max_host_blocked; | |
455 | ||
456 | /* | |
457 | * Default value for the blocking. If the queue is empty, | |
458 | * host_blocked counts down in the request_fn until it restarts | |
459 | * host operations as zero is reached. | |
460 | * | |
461 | * FIXME: This should probably be a value in the template | |
462 | */ | |
463 | #define SCSI_DEFAULT_HOST_BLOCKED 7 | |
464 | ||
465 | /* | |
466 | * Pointer to the sysfs class properties for this host, NULL terminated. | |
467 | */ | |
468 | struct class_device_attribute **shost_attrs; | |
469 | ||
470 | /* | |
471 | * Pointer to the SCSI device properties for this host, NULL terminated. | |
472 | */ | |
473 | struct device_attribute **sdev_attrs; | |
474 | ||
475 | /* | |
476 | * List of hosts per template. | |
477 | * | |
478 | * This is only for use by scsi_module.c for legacy templates. | |
479 | * For these access to it is synchronized implicitly by | |
480 | * module_init/module_exit. | |
481 | */ | |
482 | struct list_head legacy_hosts; | |
483 | }; | |
484 | ||
485 | /* | |
d3301874 MA |
486 | * shost state: If you alter this, you also need to alter scsi_sysfs.c |
487 | * (for the ascii descriptions) and the state model enforcer: | |
488 | * scsi_host_set_state() | |
1da177e4 | 489 | */ |
d3301874 MA |
490 | enum scsi_host_state { |
491 | SHOST_CREATED = 1, | |
492 | SHOST_RUNNING, | |
1da177e4 | 493 | SHOST_CANCEL, |
d3301874 | 494 | SHOST_DEL, |
1da177e4 | 495 | SHOST_RECOVERY, |
939647ee JB |
496 | SHOST_CANCEL_RECOVERY, |
497 | SHOST_DEL_RECOVERY, | |
1da177e4 LT |
498 | }; |
499 | ||
500 | struct Scsi_Host { | |
501 | /* | |
502 | * __devices is protected by the host_lock, but you should | |
503 | * usually use scsi_device_lookup / shost_for_each_device | |
504 | * to access it and don't care about locking yourself. | |
505 | * In the rare case of beeing in irq context you can use | |
506 | * their __ prefixed variants with the lock held. NEVER | |
507 | * access this list directly from a driver. | |
508 | */ | |
509 | struct list_head __devices; | |
510 | struct list_head __targets; | |
511 | ||
512 | struct scsi_host_cmd_pool *cmd_pool; | |
513 | spinlock_t free_list_lock; | |
514 | struct list_head free_list; /* backup store of cmd structs */ | |
515 | struct list_head starved_list; | |
516 | ||
517 | spinlock_t default_lock; | |
518 | spinlock_t *host_lock; | |
519 | ||
0b950672 | 520 | struct mutex scan_mutex;/* serialize scanning activity */ |
1da177e4 LT |
521 | |
522 | struct list_head eh_cmd_q; | |
523 | struct task_struct * ehandler; /* Error recovery thread. */ | |
7dfdc9a5 CH |
524 | struct completion * eh_action; /* Wait for specific actions on the |
525 | host. */ | |
1da177e4 LT |
526 | wait_queue_head_t host_wait; |
527 | struct scsi_host_template *hostt; | |
528 | struct scsi_transport_template *transportt; | |
06f81ea8 | 529 | |
86e33a29 | 530 | /* |
d850bd34 PM |
531 | * Area to keep a shared tag map (if needed, will be |
532 | * NULL if not). | |
86e33a29 JB |
533 | */ |
534 | struct blk_queue_tag *bqt; | |
535 | ||
06f81ea8 TH |
536 | /* |
537 | * The following two fields are protected with host_lock; | |
538 | * however, eh routines can safely access during eh processing | |
539 | * without acquiring the lock. | |
540 | */ | |
541 | unsigned int host_busy; /* commands actually active on low-level */ | |
542 | unsigned int host_failed; /* commands that failed. */ | |
ee7863bc | 543 | unsigned int host_eh_scheduled; /* EH scheduled without command */ |
1da177e4 LT |
544 | |
545 | unsigned short host_no; /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */ | |
546 | int resetting; /* if set, it means that last_reset is a valid value */ | |
547 | unsigned long last_reset; | |
548 | ||
549 | /* | |
550 | * These three parameters can be used to allow for wide scsi, | |
551 | * and for host adapters that support multiple busses | |
552 | * The first two should be set to 1 more than the actual max id | |
553 | * or lun (i.e. 8 for normal systems). | |
554 | */ | |
555 | unsigned int max_id; | |
556 | unsigned int max_lun; | |
557 | unsigned int max_channel; | |
558 | ||
559 | /* | |
560 | * This is a unique identifier that must be assigned so that we | |
561 | * have some way of identifying each detected host adapter properly | |
562 | * and uniquely. For hosts that do not support more than one card | |
563 | * in the system at one time, this does not need to be set. It is | |
564 | * initialized to 0 in scsi_register. | |
565 | */ | |
566 | unsigned int unique_id; | |
567 | ||
568 | /* | |
569 | * The maximum length of SCSI commands that this host can accept. | |
570 | * Probably 12 for most host adapters, but could be 16 for others. | |
571 | * For drivers that don't set this field, a value of 12 is | |
572 | * assumed. I am leaving this as a number rather than a bit | |
573 | * because you never know what subsequent SCSI standards might do | |
574 | * (i.e. could there be a 20 byte or a 24-byte command a few years | |
575 | * down the road?). | |
576 | */ | |
577 | unsigned char max_cmd_len; | |
578 | ||
579 | int this_id; | |
580 | int can_queue; | |
581 | short cmd_per_lun; | |
582 | short unsigned int sg_tablesize; | |
583 | short unsigned int max_sectors; | |
584 | unsigned long dma_boundary; | |
585 | /* | |
586 | * Used to assign serial numbers to the cmds. | |
587 | * Protected by the host lock. | |
588 | */ | |
12a44162 | 589 | unsigned long cmd_serial_number; |
1da177e4 | 590 | |
5dc2b89e | 591 | unsigned active_mode:2; |
1da177e4 LT |
592 | unsigned unchecked_isa_dma:1; |
593 | unsigned use_clustering:1; | |
594 | unsigned use_blk_tcq:1; | |
595 | ||
596 | /* | |
597 | * Host has requested that no further requests come through for the | |
598 | * time being. | |
599 | */ | |
600 | unsigned host_self_blocked:1; | |
601 | ||
602 | /* | |
603 | * Host uses correct SCSI ordering not PC ordering. The bit is | |
604 | * set for the minority of drivers whose authors actually read | |
d850bd34 | 605 | * the spec ;). |
1da177e4 LT |
606 | */ |
607 | unsigned reverse_ordering:1; | |
608 | ||
609 | /* | |
d850bd34 | 610 | * Ordered write support |
1da177e4 | 611 | */ |
1da177e4 LT |
612 | unsigned ordered_tag:1; |
613 | ||
d850bd34 | 614 | /* Task mgmt function in progress */ |
d7a1bb0a JS |
615 | unsigned tmf_in_progress:1; |
616 | ||
3e082a91 MW |
617 | /* Asynchronous scan in progress */ |
618 | unsigned async_scan:1; | |
619 | ||
1da177e4 LT |
620 | /* |
621 | * Optional work queue to be utilized by the transport | |
622 | */ | |
623 | char work_q_name[KOBJ_NAME_LEN]; | |
624 | struct workqueue_struct *work_q; | |
625 | ||
626 | /* | |
627 | * Host has rejected a command because it was busy. | |
628 | */ | |
629 | unsigned int host_blocked; | |
630 | ||
631 | /* | |
632 | * Value host_blocked counts down from | |
633 | */ | |
634 | unsigned int max_host_blocked; | |
635 | ||
b58d9154 FT |
636 | /* |
637 | * q used for scsi_tgt msgs, async events or any other requests that | |
638 | * need to be processed in userspace | |
639 | */ | |
640 | struct request_queue *uspace_req_q; | |
641 | ||
1da177e4 LT |
642 | /* legacy crap */ |
643 | unsigned long base; | |
644 | unsigned long io_port; | |
645 | unsigned char n_io_port; | |
646 | unsigned char dma_channel; | |
647 | unsigned int irq; | |
648 | ||
649 | ||
d3301874 | 650 | enum scsi_host_state shost_state; |
1da177e4 LT |
651 | |
652 | /* ldm bits */ | |
653 | struct device shost_gendev; | |
654 | struct class_device shost_classdev; | |
655 | ||
656 | /* | |
657 | * List of hosts per template. | |
658 | * | |
659 | * This is only for use by scsi_module.c for legacy templates. | |
660 | * For these access to it is synchronized implicitly by | |
661 | * module_init/module_exit. | |
662 | */ | |
663 | struct list_head sht_legacy_list; | |
664 | ||
665 | /* | |
666 | * Points to the transport data (if any) which is allocated | |
667 | * separately | |
668 | */ | |
669 | void *shost_data; | |
670 | ||
671 | /* | |
672 | * We should ensure that this is aligned, both for better performance | |
673 | * and also because some compilers (m68k) don't automatically force | |
674 | * alignment to a long boundary. | |
675 | */ | |
676 | unsigned long hostdata[0] /* Used for storage of host specific stuff */ | |
677 | __attribute__ ((aligned (sizeof(unsigned long)))); | |
678 | }; | |
679 | ||
680 | #define class_to_shost(d) \ | |
681 | container_of(d, struct Scsi_Host, shost_classdev) | |
682 | ||
9ccfc756 JB |
683 | #define shost_printk(prefix, shost, fmt, a...) \ |
684 | dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a) | |
685 | ||
bcd92c9f CH |
686 | static inline void *shost_priv(struct Scsi_Host *shost) |
687 | { | |
688 | return (void *)shost->hostdata; | |
689 | } | |
9ccfc756 | 690 | |
1da177e4 LT |
691 | int scsi_is_host_device(const struct device *); |
692 | ||
693 | static inline struct Scsi_Host *dev_to_shost(struct device *dev) | |
694 | { | |
695 | while (!scsi_is_host_device(dev)) { | |
696 | if (!dev->parent) | |
697 | return NULL; | |
698 | dev = dev->parent; | |
699 | } | |
700 | return container_of(dev, struct Scsi_Host, shost_gendev); | |
701 | } | |
702 | ||
939647ee JB |
703 | static inline int scsi_host_in_recovery(struct Scsi_Host *shost) |
704 | { | |
705 | return shost->shost_state == SHOST_RECOVERY || | |
706 | shost->shost_state == SHOST_CANCEL_RECOVERY || | |
d7a1bb0a JS |
707 | shost->shost_state == SHOST_DEL_RECOVERY || |
708 | shost->tmf_in_progress; | |
939647ee JB |
709 | } |
710 | ||
1da177e4 LT |
711 | extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *); |
712 | extern void scsi_flush_work(struct Scsi_Host *); | |
713 | ||
714 | extern struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *, int); | |
715 | extern int __must_check scsi_add_host(struct Scsi_Host *, struct device *); | |
716 | extern void scsi_scan_host(struct Scsi_Host *); | |
1da177e4 LT |
717 | extern void scsi_rescan_device(struct device *); |
718 | extern void scsi_remove_host(struct Scsi_Host *); | |
719 | extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *); | |
720 | extern void scsi_host_put(struct Scsi_Host *t); | |
721 | extern struct Scsi_Host *scsi_host_lookup(unsigned short); | |
d3301874 | 722 | extern const char *scsi_host_state_name(enum scsi_host_state); |
1da177e4 LT |
723 | |
724 | extern u64 scsi_calculate_bounce_limit(struct Scsi_Host *); | |
725 | ||
1da177e4 LT |
726 | static inline struct device *scsi_get_device(struct Scsi_Host *shost) |
727 | { | |
728 | return shost->shost_gendev.parent; | |
729 | } | |
730 | ||
82f29467 MA |
731 | /** |
732 | * scsi_host_scan_allowed - Is scanning of this host allowed | |
733 | * @shost: Pointer to Scsi_Host. | |
734 | **/ | |
735 | static inline int scsi_host_scan_allowed(struct Scsi_Host *shost) | |
736 | { | |
737 | return shost->shost_state == SHOST_RUNNING; | |
738 | } | |
739 | ||
1da177e4 LT |
740 | extern void scsi_unblock_requests(struct Scsi_Host *); |
741 | extern void scsi_block_requests(struct Scsi_Host *); | |
742 | ||
743 | struct class_container; | |
b58d9154 FT |
744 | |
745 | extern struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |
746 | void (*) (struct request_queue *)); | |
1da177e4 LT |
747 | /* |
748 | * These two functions are used to allocate and free a pseudo device | |
749 | * which will connect to the host adapter itself rather than any | |
750 | * physical device. You must deallocate when you are done with the | |
751 | * thing. This physical pseudo-device isn't real and won't be available | |
752 | * from any high-level drivers. | |
753 | */ | |
754 | extern void scsi_free_host_dev(struct scsi_device *); | |
755 | extern struct scsi_device *scsi_get_host_dev(struct Scsi_Host *); | |
756 | ||
757 | /* legacy interfaces */ | |
758 | extern struct Scsi_Host *scsi_register(struct scsi_host_template *, int); | |
759 | extern void scsi_unregister(struct Scsi_Host *); | |
47ba39ee | 760 | extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state); |
1da177e4 LT |
761 | |
762 | #endif /* _SCSI_SCSI_HOST_H */ |