]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * I2O kernel space accessible structures/APIs | |
3 | * | |
4 | * (c) Copyright 1999, 2000 Red Hat Software | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | ************************************************************************* | |
12 | * | |
13 | * This header file defined the I2O APIs/structures for use by | |
14 | * the I2O kernel modules. | |
15 | * | |
16 | */ | |
17 | ||
18 | #ifndef _I2O_H | |
19 | #define _I2O_H | |
20 | ||
21 | #ifdef __KERNEL__ /* This file to be included by kernel only */ | |
22 | ||
23 | #include <linux/i2o-dev.h> | |
24 | ||
25 | /* How many different OSM's are we allowing */ | |
26 | #define I2O_MAX_DRIVERS 8 | |
27 | ||
1da177e4 LT |
28 | #include <linux/pci.h> |
29 | #include <linux/dma-mapping.h> | |
4e57b681 TS |
30 | #include <linux/string.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/workqueue.h> /* work_struct */ | |
a1a5ea70 | 33 | #include <linux/mempool.h> |
4e57b681 TS |
34 | |
35 | #include <asm/io.h> | |
36 | #include <asm/semaphore.h> /* Needed for MUTEX init macros */ | |
1da177e4 LT |
37 | |
38 | /* message queue empty */ | |
39 | #define I2O_QUEUE_EMPTY 0xffffffff | |
40 | ||
41 | /* | |
a1a5ea70 | 42 | * Cache strategies |
1da177e4 | 43 | */ |
1da177e4 | 44 | |
a1a5ea70 ML |
45 | /* The NULL strategy leaves everything up to the controller. This tends to be a |
46 | * pessimal but functional choice. | |
1da177e4 | 47 | */ |
a1a5ea70 ML |
48 | #define CACHE_NULL 0 |
49 | /* Prefetch data when reading. We continually attempt to load the next 32 sectors | |
50 | * into the controller cache. | |
51 | */ | |
52 | #define CACHE_PREFETCH 1 | |
53 | /* Prefetch data when reading. We sometimes attempt to load the next 32 sectors | |
54 | * into the controller cache. When an I/O is less <= 8K we assume its probably | |
55 | * not sequential and don't prefetch (default) | |
56 | */ | |
57 | #define CACHE_SMARTFETCH 2 | |
58 | /* Data is written to the cache and then out on to the disk. The I/O must be | |
59 | * physically on the medium before the write is acknowledged (default without | |
60 | * NVRAM) | |
61 | */ | |
62 | #define CACHE_WRITETHROUGH 17 | |
63 | /* Data is written to the cache and then out on to the disk. The controller | |
64 | * is permitted to write back the cache any way it wants. (default if battery | |
65 | * backed NVRAM is present). It can be useful to set this for swap regardless of | |
66 | * battery state. | |
67 | */ | |
68 | #define CACHE_WRITEBACK 18 | |
69 | /* Optimise for under powered controllers, especially on RAID1 and RAID0. We | |
70 | * write large I/O's directly to disk bypassing the cache to avoid the extra | |
71 | * memory copy hits. Small writes are writeback cached | |
72 | */ | |
73 | #define CACHE_SMARTBACK 19 | |
74 | /* Optimise for under powered controllers, especially on RAID1 and RAID0. We | |
75 | * write large I/O's directly to disk bypassing the cache to avoid the extra | |
76 | * memory copy hits. Small writes are writethrough cached. Suitable for devices | |
77 | * lacking battery backup | |
78 | */ | |
79 | #define CACHE_SMARTTHROUGH 20 | |
1da177e4 LT |
80 | |
81 | /* | |
a1a5ea70 | 82 | * Ioctl structures |
1da177e4 | 83 | */ |
a1a5ea70 ML |
84 | |
85 | #define BLKI2OGRSTRAT _IOR('2', 1, int) | |
86 | #define BLKI2OGWSTRAT _IOR('2', 2, int) | |
87 | #define BLKI2OSRSTRAT _IOW('2', 3, int) | |
88 | #define BLKI2OSWSTRAT _IOW('2', 4, int) | |
1da177e4 LT |
89 | |
90 | /* | |
a1a5ea70 | 91 | * I2O Function codes |
1da177e4 | 92 | */ |
1da177e4 LT |
93 | |
94 | /* | |
a1a5ea70 | 95 | * Executive Class |
1da177e4 | 96 | */ |
a1a5ea70 ML |
97 | #define I2O_CMD_ADAPTER_ASSIGN 0xB3 |
98 | #define I2O_CMD_ADAPTER_READ 0xB2 | |
99 | #define I2O_CMD_ADAPTER_RELEASE 0xB5 | |
100 | #define I2O_CMD_BIOS_INFO_SET 0xA5 | |
101 | #define I2O_CMD_BOOT_DEVICE_SET 0xA7 | |
102 | #define I2O_CMD_CONFIG_VALIDATE 0xBB | |
103 | #define I2O_CMD_CONN_SETUP 0xCA | |
104 | #define I2O_CMD_DDM_DESTROY 0xB1 | |
105 | #define I2O_CMD_DDM_ENABLE 0xD5 | |
106 | #define I2O_CMD_DDM_QUIESCE 0xC7 | |
107 | #define I2O_CMD_DDM_RESET 0xD9 | |
108 | #define I2O_CMD_DDM_SUSPEND 0xAF | |
109 | #define I2O_CMD_DEVICE_ASSIGN 0xB7 | |
110 | #define I2O_CMD_DEVICE_RELEASE 0xB9 | |
111 | #define I2O_CMD_HRT_GET 0xA8 | |
112 | #define I2O_CMD_ADAPTER_CLEAR 0xBE | |
113 | #define I2O_CMD_ADAPTER_CONNECT 0xC9 | |
114 | #define I2O_CMD_ADAPTER_RESET 0xBD | |
115 | #define I2O_CMD_LCT_NOTIFY 0xA2 | |
116 | #define I2O_CMD_OUTBOUND_INIT 0xA1 | |
117 | #define I2O_CMD_PATH_ENABLE 0xD3 | |
118 | #define I2O_CMD_PATH_QUIESCE 0xC5 | |
119 | #define I2O_CMD_PATH_RESET 0xD7 | |
120 | #define I2O_CMD_STATIC_MF_CREATE 0xDD | |
121 | #define I2O_CMD_STATIC_MF_RELEASE 0xDF | |
122 | #define I2O_CMD_STATUS_GET 0xA0 | |
123 | #define I2O_CMD_SW_DOWNLOAD 0xA9 | |
124 | #define I2O_CMD_SW_UPLOAD 0xAB | |
125 | #define I2O_CMD_SW_REMOVE 0xAD | |
126 | #define I2O_CMD_SYS_ENABLE 0xD1 | |
127 | #define I2O_CMD_SYS_MODIFY 0xC1 | |
128 | #define I2O_CMD_SYS_QUIESCE 0xC3 | |
129 | #define I2O_CMD_SYS_TAB_SET 0xA3 | |
1da177e4 LT |
130 | |
131 | /* | |
a1a5ea70 | 132 | * Utility Class |
1da177e4 | 133 | */ |
a1a5ea70 ML |
134 | #define I2O_CMD_UTIL_NOP 0x00 |
135 | #define I2O_CMD_UTIL_ABORT 0x01 | |
136 | #define I2O_CMD_UTIL_CLAIM 0x09 | |
137 | #define I2O_CMD_UTIL_RELEASE 0x0B | |
138 | #define I2O_CMD_UTIL_PARAMS_GET 0x06 | |
139 | #define I2O_CMD_UTIL_PARAMS_SET 0x05 | |
140 | #define I2O_CMD_UTIL_EVT_REGISTER 0x13 | |
141 | #define I2O_CMD_UTIL_EVT_ACK 0x14 | |
142 | #define I2O_CMD_UTIL_CONFIG_DIALOG 0x10 | |
143 | #define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D | |
144 | #define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F | |
145 | #define I2O_CMD_UTIL_LOCK 0x17 | |
146 | #define I2O_CMD_UTIL_LOCK_RELEASE 0x19 | |
147 | #define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15 | |
9e87545f ML |
148 | |
149 | /* | |
a1a5ea70 | 150 | * SCSI Host Bus Adapter Class |
9e87545f | 151 | */ |
a1a5ea70 ML |
152 | #define I2O_CMD_SCSI_EXEC 0x81 |
153 | #define I2O_CMD_SCSI_ABORT 0x83 | |
154 | #define I2O_CMD_SCSI_BUSRESET 0x27 | |
1da177e4 LT |
155 | |
156 | /* | |
a1a5ea70 | 157 | * Bus Adapter Class |
1da177e4 | 158 | */ |
a1a5ea70 ML |
159 | #define I2O_CMD_BUS_ADAPTER_RESET 0x85 |
160 | #define I2O_CMD_BUS_RESET 0x87 | |
161 | #define I2O_CMD_BUS_SCAN 0x89 | |
162 | #define I2O_CMD_BUS_QUIESCE 0x8b | |
1da177e4 LT |
163 | |
164 | /* | |
a1a5ea70 | 165 | * Random Block Storage Class |
1da177e4 | 166 | */ |
a1a5ea70 ML |
167 | #define I2O_CMD_BLOCK_READ 0x30 |
168 | #define I2O_CMD_BLOCK_WRITE 0x31 | |
169 | #define I2O_CMD_BLOCK_CFLUSH 0x37 | |
170 | #define I2O_CMD_BLOCK_MLOCK 0x49 | |
171 | #define I2O_CMD_BLOCK_MUNLOCK 0x4B | |
172 | #define I2O_CMD_BLOCK_MMOUNT 0x41 | |
173 | #define I2O_CMD_BLOCK_MEJECT 0x43 | |
174 | #define I2O_CMD_BLOCK_POWER 0x70 | |
1da177e4 | 175 | |
a1a5ea70 | 176 | #define I2O_CMD_PRIVATE 0xFF |
1da177e4 | 177 | |
a1a5ea70 | 178 | /* Command status values */ |
1da177e4 | 179 | |
a1a5ea70 ML |
180 | #define I2O_CMD_IN_PROGRESS 0x01 |
181 | #define I2O_CMD_REJECTED 0x02 | |
182 | #define I2O_CMD_FAILED 0x03 | |
183 | #define I2O_CMD_COMPLETED 0x04 | |
f88e119c | 184 | |
a1a5ea70 | 185 | /* I2O API function return values */ |
1da177e4 | 186 | |
a1a5ea70 ML |
187 | #define I2O_RTN_NO_ERROR 0 |
188 | #define I2O_RTN_NOT_INIT 1 | |
189 | #define I2O_RTN_FREE_Q_EMPTY 2 | |
190 | #define I2O_RTN_TCB_ERROR 3 | |
191 | #define I2O_RTN_TRANSACTION_ERROR 4 | |
192 | #define I2O_RTN_ADAPTER_ALREADY_INIT 5 | |
193 | #define I2O_RTN_MALLOC_ERROR 6 | |
194 | #define I2O_RTN_ADPTR_NOT_REGISTERED 7 | |
195 | #define I2O_RTN_MSG_REPLY_TIMEOUT 8 | |
196 | #define I2O_RTN_NO_STATUS 9 | |
197 | #define I2O_RTN_NO_FIRM_VER 10 | |
198 | #define I2O_RTN_NO_LINK_SPEED 11 | |
1da177e4 | 199 | |
a1a5ea70 | 200 | /* Reply message status defines for all messages */ |
1da177e4 | 201 | |
a1a5ea70 ML |
202 | #define I2O_REPLY_STATUS_SUCCESS 0x00 |
203 | #define I2O_REPLY_STATUS_ABORT_DIRTY 0x01 | |
204 | #define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02 | |
205 | #define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03 | |
206 | #define I2O_REPLY_STATUS_ERROR_DIRTY 0x04 | |
207 | #define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05 | |
208 | #define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06 | |
209 | #define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08 | |
210 | #define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09 | |
211 | #define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A | |
212 | #define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B | |
213 | #define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80 | |
1da177e4 | 214 | |
a1a5ea70 | 215 | /* Status codes and Error Information for Parameter functions */ |
1da177e4 | 216 | |
a1a5ea70 ML |
217 | #define I2O_PARAMS_STATUS_SUCCESS 0x00 |
218 | #define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01 | |
219 | #define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02 | |
220 | #define I2O_PARAMS_STATUS_BUFFER_FULL 0x03 | |
221 | #define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04 | |
222 | #define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05 | |
223 | #define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06 | |
224 | #define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07 | |
225 | #define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08 | |
226 | #define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09 | |
227 | #define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A | |
228 | #define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B | |
229 | #define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C | |
230 | #define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D | |
231 | #define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E | |
232 | #define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F | |
233 | #define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10 | |
1da177e4 | 234 | |
a1a5ea70 ML |
235 | /* DetailedStatusCode defines for Executive, DDM, Util and Transaction error |
236 | * messages: Table 3-2 Detailed Status Codes.*/ | |
1da177e4 | 237 | |
a1a5ea70 ML |
238 | #define I2O_DSC_SUCCESS 0x0000 |
239 | #define I2O_DSC_BAD_KEY 0x0002 | |
240 | #define I2O_DSC_TCL_ERROR 0x0003 | |
241 | #define I2O_DSC_REPLY_BUFFER_FULL 0x0004 | |
242 | #define I2O_DSC_NO_SUCH_PAGE 0x0005 | |
243 | #define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006 | |
244 | #define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007 | |
245 | #define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009 | |
246 | #define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A | |
247 | #define I2O_DSC_DEVICE_LOCKED 0x000B | |
248 | #define I2O_DSC_DEVICE_RESET 0x000C | |
249 | #define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D | |
250 | #define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E | |
251 | #define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F | |
252 | #define I2O_DSC_INVALID_OFFSET 0x0010 | |
253 | #define I2O_DSC_INVALID_PARAMETER 0x0011 | |
254 | #define I2O_DSC_INVALID_REQUEST 0x0012 | |
255 | #define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013 | |
256 | #define I2O_DSC_MESSAGE_TOO_LARGE 0x0014 | |
257 | #define I2O_DSC_MESSAGE_TOO_SMALL 0x0015 | |
258 | #define I2O_DSC_MISSING_PARAMETER 0x0016 | |
259 | #define I2O_DSC_TIMEOUT 0x0017 | |
260 | #define I2O_DSC_UNKNOWN_ERROR 0x0018 | |
261 | #define I2O_DSC_UNKNOWN_FUNCTION 0x0019 | |
262 | #define I2O_DSC_UNSUPPORTED_VERSION 0x001A | |
263 | #define I2O_DSC_DEVICE_BUSY 0x001B | |
264 | #define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C | |
1da177e4 | 265 | |
a1a5ea70 ML |
266 | /* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed |
267 | Status Codes.*/ | |
1da177e4 | 268 | |
a1a5ea70 ML |
269 | #define I2O_BSA_DSC_SUCCESS 0x0000 |
270 | #define I2O_BSA_DSC_MEDIA_ERROR 0x0001 | |
271 | #define I2O_BSA_DSC_ACCESS_ERROR 0x0002 | |
272 | #define I2O_BSA_DSC_DEVICE_FAILURE 0x0003 | |
273 | #define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004 | |
274 | #define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005 | |
275 | #define I2O_BSA_DSC_MEDIA_LOCKED 0x0006 | |
276 | #define I2O_BSA_DSC_MEDIA_FAILURE 0x0007 | |
277 | #define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008 | |
278 | #define I2O_BSA_DSC_BUS_FAILURE 0x0009 | |
279 | #define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A | |
280 | #define I2O_BSA_DSC_WRITE_PROTECTED 0x000B | |
281 | #define I2O_BSA_DSC_DEVICE_RESET 0x000C | |
282 | #define I2O_BSA_DSC_VOLUME_CHANGED 0x000D | |
283 | #define I2O_BSA_DSC_TIMEOUT 0x000E | |
1da177e4 | 284 | |
a1a5ea70 | 285 | /* FailureStatusCodes, Table 3-3 Message Failure Codes */ |
1da177e4 | 286 | |
a1a5ea70 ML |
287 | #define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81 |
288 | #define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82 | |
289 | #define I2O_FSC_TRANSPORT_CONGESTION 0x83 | |
290 | #define I2O_FSC_TRANSPORT_FAILURE 0x84 | |
291 | #define I2O_FSC_TRANSPORT_STATE_ERROR 0x85 | |
292 | #define I2O_FSC_TRANSPORT_TIME_OUT 0x86 | |
293 | #define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87 | |
294 | #define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88 | |
295 | #define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89 | |
296 | #define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A | |
297 | #define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B | |
298 | #define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C | |
299 | #define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D | |
300 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E | |
301 | #define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F | |
302 | #define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF | |
1da177e4 | 303 | |
a1a5ea70 ML |
304 | /* Device Claim Types */ |
305 | #define I2O_CLAIM_PRIMARY 0x01000000 | |
306 | #define I2O_CLAIM_MANAGEMENT 0x02000000 | |
307 | #define I2O_CLAIM_AUTHORIZED 0x03000000 | |
308 | #define I2O_CLAIM_SECONDARY 0x04000000 | |
1da177e4 | 309 | |
a1a5ea70 ML |
310 | /* Message header defines for VersionOffset */ |
311 | #define I2OVER15 0x0001 | |
312 | #define I2OVER20 0x0002 | |
1da177e4 | 313 | |
a1a5ea70 ML |
314 | /* Default is 1.5 */ |
315 | #define I2OVERSION I2OVER15 | |
1da177e4 | 316 | |
a1a5ea70 ML |
317 | #define SGL_OFFSET_0 I2OVERSION |
318 | #define SGL_OFFSET_4 (0x0040 | I2OVERSION) | |
319 | #define SGL_OFFSET_5 (0x0050 | I2OVERSION) | |
320 | #define SGL_OFFSET_6 (0x0060 | I2OVERSION) | |
321 | #define SGL_OFFSET_7 (0x0070 | I2OVERSION) | |
322 | #define SGL_OFFSET_8 (0x0080 | I2OVERSION) | |
323 | #define SGL_OFFSET_9 (0x0090 | I2OVERSION) | |
324 | #define SGL_OFFSET_10 (0x00A0 | I2OVERSION) | |
325 | #define SGL_OFFSET_11 (0x00B0 | I2OVERSION) | |
326 | #define SGL_OFFSET_12 (0x00C0 | I2OVERSION) | |
327 | #define SGL_OFFSET(x) (((x)<<4) | I2OVERSION) | |
1da177e4 | 328 | |
a1a5ea70 ML |
329 | /* Transaction Reply Lists (TRL) Control Word structure */ |
330 | #define TRL_SINGLE_FIXED_LENGTH 0x00 | |
331 | #define TRL_SINGLE_VARIABLE_LENGTH 0x40 | |
332 | #define TRL_MULTIPLE_FIXED_LENGTH 0x80 | |
1da177e4 | 333 | |
a1a5ea70 ML |
334 | /* msg header defines for MsgFlags */ |
335 | #define MSG_STATIC 0x0100 | |
336 | #define MSG_64BIT_CNTXT 0x0200 | |
337 | #define MSG_MULTI_TRANS 0x1000 | |
338 | #define MSG_FAIL 0x2000 | |
339 | #define MSG_FINAL 0x4000 | |
340 | #define MSG_REPLY 0x8000 | |
f10378ff | 341 | |
a1a5ea70 ML |
342 | /* minimum size msg */ |
343 | #define THREE_WORD_MSG_SIZE 0x00030000 | |
344 | #define FOUR_WORD_MSG_SIZE 0x00040000 | |
345 | #define FIVE_WORD_MSG_SIZE 0x00050000 | |
346 | #define SIX_WORD_MSG_SIZE 0x00060000 | |
347 | #define SEVEN_WORD_MSG_SIZE 0x00070000 | |
348 | #define EIGHT_WORD_MSG_SIZE 0x00080000 | |
349 | #define NINE_WORD_MSG_SIZE 0x00090000 | |
350 | #define TEN_WORD_MSG_SIZE 0x000A0000 | |
351 | #define ELEVEN_WORD_MSG_SIZE 0x000B0000 | |
352 | #define I2O_MESSAGE_SIZE(x) ((x)<<16) | |
f10378ff | 353 | |
a1a5ea70 ML |
354 | /* special TID assignments */ |
355 | #define ADAPTER_TID 0 | |
356 | #define HOST_TID 1 | |
1da177e4 | 357 | |
a1a5ea70 ML |
358 | /* outbound queue defines */ |
359 | #define I2O_MAX_OUTBOUND_MSG_FRAMES 128 | |
360 | #define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ | |
1da177e4 | 361 | |
a1a5ea70 ML |
362 | /* inbound queue definitions */ |
363 | #define I2O_MSG_INPOOL_MIN 32 | |
364 | #define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */ | |
1da177e4 | 365 | |
a1a5ea70 ML |
366 | #define I2O_POST_WAIT_OK 0 |
367 | #define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT | |
1da177e4 | 368 | |
a1a5ea70 ML |
369 | #define I2O_CONTEXT_LIST_MIN_LENGTH 15 |
370 | #define I2O_CONTEXT_LIST_USED 0x01 | |
371 | #define I2O_CONTEXT_LIST_DELETED 0x02 | |
1da177e4 | 372 | |
a1a5ea70 ML |
373 | /* timeouts */ |
374 | #define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15 | |
375 | #define I2O_TIMEOUT_MESSAGE_GET 5 | |
376 | #define I2O_TIMEOUT_RESET 30 | |
377 | #define I2O_TIMEOUT_STATUS_GET 5 | |
378 | #define I2O_TIMEOUT_LCT_GET 360 | |
379 | #define I2O_TIMEOUT_SCSI_SCB_ABORT 240 | |
f10378ff | 380 | |
a1a5ea70 ML |
381 | /* retries */ |
382 | #define I2O_HRT_GET_TRIES 3 | |
383 | #define I2O_LCT_GET_TRIES 3 | |
f10378ff | 384 | |
a1a5ea70 ML |
385 | /* defines for max_sectors and max_phys_segments */ |
386 | #define I2O_MAX_SECTORS 1024 | |
dcceafe2 | 387 | #define I2O_MAX_SECTORS_LIMITED 128 |
a1a5ea70 | 388 | #define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS |
f10378ff | 389 | |
a1a5ea70 ML |
390 | /* |
391 | * Message structures | |
f10378ff | 392 | */ |
a1a5ea70 ML |
393 | struct i2o_message { |
394 | union { | |
395 | struct { | |
396 | u8 version_offset; | |
397 | u8 flags; | |
398 | u16 size; | |
399 | u32 target_tid:12; | |
400 | u32 init_tid:12; | |
401 | u32 function:8; | |
402 | u32 icntxt; /* initiator context */ | |
403 | u32 tcntxt; /* transaction context */ | |
404 | } s; | |
405 | u32 head[4]; | |
406 | } u; | |
407 | /* List follows */ | |
408 | u32 body[0]; | |
409 | }; | |
f10378ff | 410 | |
a1a5ea70 ML |
411 | /* MFA and I2O message used by mempool */ |
412 | struct i2o_msg_mfa { | |
413 | u32 mfa; /* MFA returned by the controller */ | |
414 | struct i2o_message msg; /* I2O message */ | |
f10378ff ML |
415 | }; |
416 | ||
a1a5ea70 ML |
417 | /* |
418 | * Each I2O device entity has one of these. There is one per device. | |
f10378ff | 419 | */ |
a1a5ea70 ML |
420 | struct i2o_device { |
421 | i2o_lct_entry lct_data; /* Device LCT information */ | |
f10378ff | 422 | |
a1a5ea70 ML |
423 | struct i2o_controller *iop; /* Controlling IOP */ |
424 | struct list_head list; /* node in IOP devices list */ | |
f10378ff | 425 | |
a1a5ea70 | 426 | struct device device; |
f10378ff | 427 | |
a1a5ea70 | 428 | struct semaphore lock; /* device lock */ |
f10378ff ML |
429 | }; |
430 | ||
a1a5ea70 ML |
431 | /* |
432 | * Event structure provided to the event handling function | |
f10378ff | 433 | */ |
a1a5ea70 ML |
434 | struct i2o_event { |
435 | struct work_struct work; | |
436 | struct i2o_device *i2o_dev; /* I2O device pointer from which the | |
437 | event reply was initiated */ | |
438 | u16 size; /* Size of data in 32-bit words */ | |
439 | u32 tcntxt; /* Transaction context used at | |
440 | registration */ | |
441 | u32 event_indicator; /* Event indicator from reply */ | |
442 | u32 data[0]; /* Event data from reply */ | |
443 | }; | |
f10378ff | 444 | |
a1a5ea70 ML |
445 | /* |
446 | * I2O classes which could be handled by the OSM | |
447 | */ | |
448 | struct i2o_class_id { | |
449 | u16 class_id:12; | |
f10378ff ML |
450 | }; |
451 | ||
a1a5ea70 ML |
452 | /* |
453 | * I2O driver structure for OSMs | |
f10378ff | 454 | */ |
a1a5ea70 ML |
455 | struct i2o_driver { |
456 | char *name; /* OSM name */ | |
457 | int context; /* Low 8 bits of the transaction info */ | |
458 | struct i2o_class_id *classes; /* I2O classes that this OSM handles */ | |
f10378ff | 459 | |
a1a5ea70 ML |
460 | /* Message reply handler */ |
461 | int (*reply) (struct i2o_controller *, u32, struct i2o_message *); | |
f10378ff | 462 | |
a1a5ea70 | 463 | /* Event handler */ |
c4028958 | 464 | work_func_t event; |
f10378ff | 465 | |
a1a5ea70 | 466 | struct workqueue_struct *event_queue; /* Event queue */ |
f10378ff | 467 | |
a1a5ea70 | 468 | struct device_driver driver; |
f10378ff | 469 | |
a1a5ea70 ML |
470 | /* notification of changes */ |
471 | void (*notify_controller_add) (struct i2o_controller *); | |
472 | void (*notify_controller_remove) (struct i2o_controller *); | |
473 | void (*notify_device_add) (struct i2o_device *); | |
474 | void (*notify_device_remove) (struct i2o_device *); | |
f10378ff | 475 | |
a1a5ea70 | 476 | struct semaphore lock; |
f10378ff ML |
477 | }; |
478 | ||
a1a5ea70 ML |
479 | /* |
480 | * Contains DMA mapped address information | |
f10378ff | 481 | */ |
a1a5ea70 ML |
482 | struct i2o_dma { |
483 | void *virt; | |
484 | dma_addr_t phys; | |
485 | size_t len; | |
f10378ff ML |
486 | }; |
487 | ||
a1a5ea70 ML |
488 | /* |
489 | * Contains slab cache and mempool information | |
f10378ff | 490 | */ |
a1a5ea70 ML |
491 | struct i2o_pool { |
492 | char *name; | |
e18b890b | 493 | struct kmem_cache *slab; |
a1a5ea70 | 494 | mempool_t *mempool; |
f10378ff ML |
495 | }; |
496 | ||
a1a5ea70 ML |
497 | /* |
498 | * Contains IO mapped address information | |
1da177e4 | 499 | */ |
a1a5ea70 ML |
500 | struct i2o_io { |
501 | void __iomem *virt; | |
502 | unsigned long phys; | |
503 | unsigned long len; | |
1da177e4 LT |
504 | }; |
505 | ||
a1a5ea70 ML |
506 | /* |
507 | * Context queue entry, used for 32-bit context on 64-bit systems | |
1da177e4 | 508 | */ |
a1a5ea70 ML |
509 | struct i2o_context_list_element { |
510 | struct list_head list; | |
511 | u32 context; | |
512 | void *ptr; | |
513 | unsigned long timestamp; | |
1da177e4 LT |
514 | }; |
515 | ||
a1a5ea70 ML |
516 | /* |
517 | * Each I2O controller has one of these objects | |
1da177e4 | 518 | */ |
a1a5ea70 ML |
519 | struct i2o_controller { |
520 | char name[16]; | |
521 | int unit; | |
522 | int type; | |
523 | ||
524 | struct pci_dev *pdev; /* PCI device */ | |
525 | ||
526 | unsigned int promise:1; /* Promise controller */ | |
527 | unsigned int adaptec:1; /* DPT / Adaptec controller */ | |
528 | unsigned int raptor:1; /* split bar */ | |
529 | unsigned int no_quiesce:1; /* dont quiesce before reset */ | |
530 | unsigned int short_req:1; /* use small block sizes */ | |
531 | unsigned int limit_sectors:1; /* limit number of sectors / request */ | |
532 | unsigned int pae_support:1; /* controller has 64-bit SGL support */ | |
533 | ||
534 | struct list_head devices; /* list of I2O devices */ | |
535 | struct list_head list; /* Controller list */ | |
536 | ||
537 | void __iomem *in_port; /* Inbout port address */ | |
538 | void __iomem *out_port; /* Outbound port address */ | |
539 | void __iomem *irq_status; /* Interrupt status register address */ | |
540 | void __iomem *irq_mask; /* Interrupt mask register address */ | |
541 | ||
542 | struct i2o_dma status; /* IOP status block */ | |
543 | ||
544 | struct i2o_dma hrt; /* HW Resource Table */ | |
545 | i2o_lct *lct; /* Logical Config Table */ | |
546 | struct i2o_dma dlct; /* Temp LCT */ | |
547 | struct semaphore lct_lock; /* Lock for LCT updates */ | |
548 | struct i2o_dma status_block; /* IOP status block */ | |
549 | ||
550 | struct i2o_io base; /* controller messaging unit */ | |
551 | struct i2o_io in_queue; /* inbound message queue Host->IOP */ | |
552 | struct i2o_dma out_queue; /* outbound message queue IOP->Host */ | |
553 | ||
554 | struct i2o_pool in_msg; /* mempool for inbound messages */ | |
555 | ||
556 | unsigned int battery:1; /* Has a battery backup */ | |
557 | unsigned int io_alloc:1; /* An I/O resource was allocated */ | |
558 | unsigned int mem_alloc:1; /* A memory resource was allocated */ | |
559 | ||
560 | struct resource io_resource; /* I/O resource allocated to the IOP */ | |
561 | struct resource mem_resource; /* Mem resource allocated to the IOP */ | |
562 | ||
563 | struct device device; | |
a1a5ea70 ML |
564 | struct i2o_device *exec; /* Executive */ |
565 | #if BITS_PER_LONG == 64 | |
566 | spinlock_t context_list_lock; /* lock for context_list */ | |
567 | atomic_t context_list_counter; /* needed for unique contexts */ | |
568 | struct list_head context_list; /* list of context id's | |
569 | and pointers */ | |
570 | #endif | |
571 | spinlock_t lock; /* lock for controller | |
572 | configuration */ | |
573 | ||
574 | void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */ | |
1da177e4 LT |
575 | }; |
576 | ||
a1a5ea70 ML |
577 | /* |
578 | * I2O System table entry | |
1da177e4 | 579 | * |
a1a5ea70 ML |
580 | * The system table contains information about all the IOPs in the |
581 | * system. It is sent to all IOPs so that they can create peer2peer | |
582 | * connections between them. | |
1da177e4 | 583 | */ |
a1a5ea70 ML |
584 | struct i2o_sys_tbl_entry { |
585 | u16 org_id; | |
586 | u16 reserved1; | |
587 | u32 iop_id:12; | |
588 | u32 reserved2:20; | |
589 | u16 seg_num:12; | |
590 | u16 i2o_version:4; | |
591 | u8 iop_state; | |
592 | u8 msg_type; | |
593 | u16 frame_size; | |
594 | u16 reserved3; | |
595 | u32 last_changed; | |
596 | u32 iop_capabilities; | |
597 | u32 inbound_low; | |
598 | u32 inbound_high; | |
1da177e4 LT |
599 | }; |
600 | ||
a1a5ea70 ML |
601 | struct i2o_sys_tbl { |
602 | u8 num_entries; | |
603 | u8 version; | |
604 | u16 reserved1; | |
605 | u32 change_ind; | |
606 | u32 reserved2; | |
607 | u32 reserved3; | |
608 | struct i2o_sys_tbl_entry iops[0]; | |
609 | }; | |
1da177e4 | 610 | |
a1a5ea70 | 611 | extern struct list_head i2o_controllers; |
1da177e4 | 612 | |
a1a5ea70 ML |
613 | /* Message functions */ |
614 | static inline struct i2o_message *i2o_msg_get(struct i2o_controller *); | |
615 | extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int); | |
616 | static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *); | |
617 | static inline int i2o_msg_post_wait(struct i2o_controller *, | |
618 | struct i2o_message *, unsigned long); | |
619 | extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *, | |
620 | unsigned long, struct i2o_dma *); | |
621 | static inline void i2o_flush_reply(struct i2o_controller *, u32); | |
1da177e4 | 622 | |
a1a5ea70 ML |
623 | /* IOP functions */ |
624 | extern int i2o_status_get(struct i2o_controller *); | |
1da177e4 | 625 | |
a1a5ea70 ML |
626 | extern int i2o_event_register(struct i2o_device *, struct i2o_driver *, int, |
627 | u32); | |
628 | extern struct i2o_device *i2o_iop_find_device(struct i2o_controller *, u16); | |
629 | extern struct i2o_controller *i2o_find_iop(int); | |
1da177e4 | 630 | |
a1a5ea70 ML |
631 | /* Functions needed for handling 64-bit pointers in 32-bit context */ |
632 | #if BITS_PER_LONG == 64 | |
633 | extern u32 i2o_cntxt_list_add(struct i2o_controller *, void *); | |
634 | extern void *i2o_cntxt_list_get(struct i2o_controller *, u32); | |
635 | extern u32 i2o_cntxt_list_remove(struct i2o_controller *, void *); | |
636 | extern u32 i2o_cntxt_list_get_ptr(struct i2o_controller *, void *); | |
1da177e4 | 637 | |
a1a5ea70 ML |
638 | static inline u32 i2o_ptr_low(void *ptr) |
639 | { | |
640 | return (u32) (u64) ptr; | |
1da177e4 LT |
641 | }; |
642 | ||
a1a5ea70 | 643 | static inline u32 i2o_ptr_high(void *ptr) |
1da177e4 | 644 | { |
a1a5ea70 | 645 | return (u32) ((u64) ptr >> 32); |
1da177e4 LT |
646 | }; |
647 | ||
a1a5ea70 | 648 | static inline u32 i2o_dma_low(dma_addr_t dma_addr) |
1da177e4 | 649 | { |
a1a5ea70 | 650 | return (u32) (u64) dma_addr; |
1da177e4 LT |
651 | }; |
652 | ||
a1a5ea70 | 653 | static inline u32 i2o_dma_high(dma_addr_t dma_addr) |
1da177e4 | 654 | { |
a1a5ea70 ML |
655 | return (u32) ((u64) dma_addr >> 32); |
656 | }; | |
657 | #else | |
658 | static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr) | |
659 | { | |
660 | return (u32) ptr; | |
1da177e4 LT |
661 | }; |
662 | ||
a1a5ea70 | 663 | static inline void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context) |
1da177e4 | 664 | { |
a1a5ea70 ML |
665 | return (void *)context; |
666 | }; | |
1da177e4 | 667 | |
a1a5ea70 ML |
668 | static inline u32 i2o_cntxt_list_remove(struct i2o_controller *c, void *ptr) |
669 | { | |
670 | return (u32) ptr; | |
1da177e4 LT |
671 | }; |
672 | ||
a1a5ea70 ML |
673 | static inline u32 i2o_cntxt_list_get_ptr(struct i2o_controller *c, void *ptr) |
674 | { | |
675 | return (u32) ptr; | |
676 | }; | |
677 | ||
678 | static inline u32 i2o_ptr_low(void *ptr) | |
679 | { | |
680 | return (u32) ptr; | |
681 | }; | |
682 | ||
683 | static inline u32 i2o_ptr_high(void *ptr) | |
684 | { | |
685 | return 0; | |
686 | }; | |
687 | ||
688 | static inline u32 i2o_dma_low(dma_addr_t dma_addr) | |
689 | { | |
690 | return (u32) dma_addr; | |
691 | }; | |
692 | ||
693 | static inline u32 i2o_dma_high(dma_addr_t dma_addr) | |
694 | { | |
695 | return 0; | |
696 | }; | |
697 | #endif | |
698 | ||
699 | /** | |
700 | * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL | |
701 | * @c: I2O controller for which the calculation should be done | |
702 | * @body_size: maximum body size used for message in 32-bit words. | |
1da177e4 | 703 | * |
a1a5ea70 | 704 | * Return the maximum number of SG elements in a SG list. |
1da177e4 | 705 | */ |
a1a5ea70 | 706 | static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size) |
1da177e4 | 707 | { |
a1a5ea70 ML |
708 | i2o_status_block *sb = c->status_block.virt; |
709 | u16 sg_count = | |
710 | (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) - | |
711 | body_size; | |
712 | ||
713 | if (c->pae_support) { | |
714 | /* | |
715 | * for 64-bit a SG attribute element must be added and each | |
716 | * SG element needs 12 bytes instead of 8. | |
717 | */ | |
718 | sg_count -= 2; | |
719 | sg_count /= 3; | |
720 | } else | |
721 | sg_count /= 2; | |
722 | ||
723 | if (c->short_req && (sg_count > 8)) | |
724 | sg_count = 8; | |
725 | ||
726 | return sg_count; | |
1da177e4 LT |
727 | }; |
728 | ||
a1a5ea70 ML |
729 | /** |
730 | * i2o_dma_map_single - Map pointer to controller and fill in I2O message. | |
731 | * @c: I2O controller | |
732 | * @ptr: pointer to the data which should be mapped | |
733 | * @size: size of data in bytes | |
734 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | |
735 | * @sg_ptr: pointer to the SG list inside the I2O message | |
736 | * | |
737 | * This function does all necessary DMA handling and also writes the I2O | |
738 | * SGL elements into the I2O message. For details on DMA handling see also | |
739 | * dma_map_single(). The pointer sg_ptr will only be set to the end of the | |
740 | * SG list if the allocation was successful. | |
741 | * | |
742 | * Returns DMA address which must be checked for failures using | |
743 | * dma_mapping_error(). | |
1da177e4 | 744 | */ |
a1a5ea70 ML |
745 | static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr, |
746 | size_t size, | |
747 | enum dma_data_direction direction, | |
748 | u32 ** sg_ptr) | |
749 | { | |
750 | u32 sg_flags; | |
751 | u32 *mptr = *sg_ptr; | |
752 | dma_addr_t dma_addr; | |
1da177e4 | 753 | |
a1a5ea70 ML |
754 | switch (direction) { |
755 | case DMA_TO_DEVICE: | |
756 | sg_flags = 0xd4000000; | |
757 | break; | |
758 | case DMA_FROM_DEVICE: | |
759 | sg_flags = 0xd0000000; | |
760 | break; | |
761 | default: | |
762 | return 0; | |
763 | } | |
1da177e4 | 764 | |
a1a5ea70 ML |
765 | dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); |
766 | if (!dma_mapping_error(dma_addr)) { | |
767 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | |
768 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | |
769 | *mptr++ = cpu_to_le32(0x7C020002); | |
770 | *mptr++ = cpu_to_le32(PAGE_SIZE); | |
771 | } | |
772 | #endif | |
1da177e4 | 773 | |
a1a5ea70 ML |
774 | *mptr++ = cpu_to_le32(sg_flags | size); |
775 | *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr)); | |
776 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | |
777 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | |
778 | *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr)); | |
779 | #endif | |
780 | *sg_ptr = mptr; | |
781 | } | |
782 | return dma_addr; | |
783 | }; | |
1da177e4 | 784 | |
a1a5ea70 ML |
785 | /** |
786 | * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message. | |
787 | * @c: I2O controller | |
788 | * @sg: SG list to be mapped | |
789 | * @sg_count: number of elements in the SG list | |
790 | * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE | |
791 | * @sg_ptr: pointer to the SG list inside the I2O message | |
792 | * | |
793 | * This function does all necessary DMA handling and also writes the I2O | |
794 | * SGL elements into the I2O message. For details on DMA handling see also | |
795 | * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG | |
796 | * list if the allocation was successful. | |
797 | * | |
798 | * Returns 0 on failure or 1 on success. | |
799 | */ | |
800 | static inline int i2o_dma_map_sg(struct i2o_controller *c, | |
801 | struct scatterlist *sg, int sg_count, | |
802 | enum dma_data_direction direction, | |
803 | u32 ** sg_ptr) | |
804 | { | |
805 | u32 sg_flags; | |
806 | u32 *mptr = *sg_ptr; | |
807 | ||
808 | switch (direction) { | |
809 | case DMA_TO_DEVICE: | |
810 | sg_flags = 0x14000000; | |
811 | break; | |
812 | case DMA_FROM_DEVICE: | |
813 | sg_flags = 0x10000000; | |
814 | break; | |
815 | default: | |
816 | return 0; | |
817 | } | |
818 | ||
819 | sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction); | |
820 | if (!sg_count) | |
821 | return 0; | |
822 | ||
823 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | |
824 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) { | |
825 | *mptr++ = cpu_to_le32(0x7C020002); | |
826 | *mptr++ = cpu_to_le32(PAGE_SIZE); | |
827 | } | |
1da177e4 LT |
828 | #endif |
829 | ||
a1a5ea70 ML |
830 | while (sg_count-- > 0) { |
831 | if (!sg_count) | |
832 | sg_flags |= 0xC0000000; | |
833 | *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg)); | |
834 | *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg))); | |
835 | #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 | |
836 | if ((sizeof(dma_addr_t) > 4) && c->pae_support) | |
837 | *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg))); | |
838 | #endif | |
839 | sg++; | |
840 | } | |
841 | *sg_ptr = mptr; | |
1da177e4 | 842 | |
a1a5ea70 ML |
843 | return 1; |
844 | }; | |
1da177e4 | 845 | |
a1a5ea70 ML |
846 | /** |
847 | * i2o_dma_alloc - Allocate DMA memory | |
848 | * @dev: struct device pointer to the PCI device of the I2O controller | |
849 | * @addr: i2o_dma struct which should get the DMA buffer | |
850 | * @len: length of the new DMA memory | |
851 | * @gfp_mask: GFP mask | |
852 | * | |
853 | * Allocate a coherent DMA memory and write the pointers into addr. | |
854 | * | |
855 | * Returns 0 on success or -ENOMEM on failure. | |
1da177e4 | 856 | */ |
a1a5ea70 ML |
857 | static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, |
858 | size_t len, gfp_t gfp_mask) | |
859 | { | |
860 | struct pci_dev *pdev = to_pci_dev(dev); | |
861 | int dma_64 = 0; | |
1da177e4 | 862 | |
a1a5ea70 ML |
863 | if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) { |
864 | dma_64 = 1; | |
865 | if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) | |
866 | return -ENOMEM; | |
867 | } | |
1da177e4 | 868 | |
a1a5ea70 | 869 | addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask); |
1da177e4 | 870 | |
a1a5ea70 ML |
871 | if ((sizeof(dma_addr_t) > 4) && dma_64) |
872 | if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)) | |
873 | printk(KERN_WARNING "i2o: unable to set 64-bit DMA"); | |
1da177e4 | 874 | |
a1a5ea70 ML |
875 | if (!addr->virt) |
876 | return -ENOMEM; | |
1da177e4 | 877 | |
a1a5ea70 ML |
878 | memset(addr->virt, 0, len); |
879 | addr->len = len; | |
1da177e4 | 880 | |
a1a5ea70 ML |
881 | return 0; |
882 | }; | |
883 | ||
884 | /** | |
885 | * i2o_dma_free - Free DMA memory | |
886 | * @dev: struct device pointer to the PCI device of the I2O controller | |
887 | * @addr: i2o_dma struct which contains the DMA buffer | |
888 | * | |
889 | * Free a coherent DMA memory and set virtual address of addr to NULL. | |
1da177e4 | 890 | */ |
a1a5ea70 ML |
891 | static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr) |
892 | { | |
893 | if (addr->virt) { | |
894 | if (addr->phys) | |
895 | dma_free_coherent(dev, addr->len, addr->virt, | |
896 | addr->phys); | |
897 | else | |
898 | kfree(addr->virt); | |
899 | addr->virt = NULL; | |
900 | } | |
901 | }; | |
1da177e4 | 902 | |
a1a5ea70 ML |
903 | /** |
904 | * i2o_dma_realloc - Realloc DMA memory | |
905 | * @dev: struct device pointer to the PCI device of the I2O controller | |
906 | * @addr: pointer to a i2o_dma struct DMA buffer | |
907 | * @len: new length of memory | |
908 | * @gfp_mask: GFP mask | |
909 | * | |
910 | * If there was something allocated in the addr, free it first. If len > 0 | |
911 | * than try to allocate it and write the addresses back to the addr | |
912 | * structure. If len == 0 set the virtual address to NULL. | |
913 | * | |
914 | * Returns the 0 on success or negative error code on failure. | |
1da177e4 | 915 | */ |
a1a5ea70 ML |
916 | static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, |
917 | size_t len, gfp_t gfp_mask) | |
918 | { | |
919 | i2o_dma_free(dev, addr); | |
920 | ||
921 | if (len) | |
922 | return i2o_dma_alloc(dev, addr, len, gfp_mask); | |
923 | ||
924 | return 0; | |
925 | }; | |
1da177e4 | 926 | |
f10378ff | 927 | /* |
a1a5ea70 ML |
928 | * i2o_pool_alloc - Allocate an slab cache and mempool |
929 | * @mempool: pointer to struct i2o_pool to write data into. | |
930 | * @name: name which is used to identify cache | |
931 | * @size: size of each object | |
932 | * @min_nr: minimum number of objects | |
933 | * | |
934 | * First allocates a slab cache with name and size. Then allocates a | |
935 | * mempool which uses the slab cache for allocation and freeing. | |
936 | * | |
937 | * Returns 0 on success or negative error code on failure. | |
f10378ff | 938 | */ |
a1a5ea70 ML |
939 | static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, |
940 | size_t size, int min_nr) | |
941 | { | |
942 | pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL); | |
943 | if (!pool->name) | |
944 | goto exit; | |
945 | strcpy(pool->name, name); | |
946 | ||
947 | pool->slab = | |
948 | kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL, | |
949 | NULL); | |
950 | if (!pool->slab) | |
951 | goto free_name; | |
952 | ||
93d2341c | 953 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); |
a1a5ea70 ML |
954 | if (!pool->mempool) |
955 | goto free_slab; | |
956 | ||
957 | return 0; | |
958 | ||
959 | free_slab: | |
960 | kmem_cache_destroy(pool->slab); | |
961 | ||
962 | free_name: | |
963 | kfree(pool->name); | |
964 | ||
965 | exit: | |
966 | return -ENOMEM; | |
967 | }; | |
f10378ff | 968 | |
1da177e4 | 969 | /* |
a1a5ea70 ML |
970 | * i2o_pool_free - Free slab cache and mempool again |
971 | * @mempool: pointer to struct i2o_pool which should be freed | |
972 | * | |
973 | * Note that you have to return all objects to the mempool again before | |
974 | * calling i2o_pool_free(). | |
1da177e4 | 975 | */ |
a1a5ea70 ML |
976 | static inline void i2o_pool_free(struct i2o_pool *pool) |
977 | { | |
978 | mempool_destroy(pool->mempool); | |
979 | kmem_cache_destroy(pool->slab); | |
980 | kfree(pool->name); | |
981 | }; | |
1da177e4 | 982 | |
a1a5ea70 ML |
983 | /* I2O driver (OSM) functions */ |
984 | extern int i2o_driver_register(struct i2o_driver *); | |
985 | extern void i2o_driver_unregister(struct i2o_driver *); | |
1da177e4 | 986 | |
a1a5ea70 ML |
987 | /** |
988 | * i2o_driver_notify_controller_add - Send notification of added controller | |
d9489fb6 RD |
989 | * @drv: I2O driver |
990 | * @c: I2O controller | |
a1a5ea70 ML |
991 | * |
992 | * Send notification of added controller to a single registered driver. | |
993 | */ | |
994 | static inline void i2o_driver_notify_controller_add(struct i2o_driver *drv, | |
995 | struct i2o_controller *c) | |
996 | { | |
997 | if (drv->notify_controller_add) | |
998 | drv->notify_controller_add(c); | |
999 | }; | |
1da177e4 | 1000 | |
a1a5ea70 | 1001 | /** |
d9489fb6 RD |
1002 | * i2o_driver_notify_controller_remove - Send notification of removed controller |
1003 | * @drv: I2O driver | |
1004 | * @c: I2O controller | |
a1a5ea70 ML |
1005 | * |
1006 | * Send notification of removed controller to a single registered driver. | |
1007 | */ | |
1008 | static inline void i2o_driver_notify_controller_remove(struct i2o_driver *drv, | |
1009 | struct i2o_controller *c) | |
1010 | { | |
1011 | if (drv->notify_controller_remove) | |
1012 | drv->notify_controller_remove(c); | |
1013 | }; | |
1da177e4 | 1014 | |
a1a5ea70 | 1015 | /** |
d9489fb6 RD |
1016 | * i2o_driver_notify_device_add - Send notification of added device |
1017 | * @drv: I2O driver | |
1018 | * @i2o_dev: the added i2o_device | |
a1a5ea70 ML |
1019 | * |
1020 | * Send notification of added device to a single registered driver. | |
1021 | */ | |
1022 | static inline void i2o_driver_notify_device_add(struct i2o_driver *drv, | |
1023 | struct i2o_device *i2o_dev) | |
1024 | { | |
1025 | if (drv->notify_device_add) | |
1026 | drv->notify_device_add(i2o_dev); | |
1027 | }; | |
1da177e4 | 1028 | |
a1a5ea70 ML |
1029 | /** |
1030 | * i2o_driver_notify_device_remove - Send notification of removed device | |
d9489fb6 RD |
1031 | * @drv: I2O driver |
1032 | * @i2o_dev: the added i2o_device | |
a1a5ea70 ML |
1033 | * |
1034 | * Send notification of removed device to a single registered driver. | |
1035 | */ | |
1036 | static inline void i2o_driver_notify_device_remove(struct i2o_driver *drv, | |
1037 | struct i2o_device *i2o_dev) | |
1038 | { | |
1039 | if (drv->notify_device_remove) | |
1040 | drv->notify_device_remove(i2o_dev); | |
1041 | }; | |
1da177e4 | 1042 | |
a1a5ea70 ML |
1043 | extern void i2o_driver_notify_controller_add_all(struct i2o_controller *); |
1044 | extern void i2o_driver_notify_controller_remove_all(struct i2o_controller *); | |
1045 | extern void i2o_driver_notify_device_add_all(struct i2o_device *); | |
1046 | extern void i2o_driver_notify_device_remove_all(struct i2o_device *); | |
1da177e4 | 1047 | |
a1a5ea70 ML |
1048 | /* I2O device functions */ |
1049 | extern int i2o_device_claim(struct i2o_device *); | |
1050 | extern int i2o_device_claim_release(struct i2o_device *); | |
1da177e4 | 1051 | |
a1a5ea70 ML |
1052 | /* Exec OSM functions */ |
1053 | extern int i2o_exec_lct_get(struct i2o_controller *); | |
1da177e4 | 1054 | |
a1a5ea70 ML |
1055 | /* device / driver / kobject conversion functions */ |
1056 | #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) | |
1057 | #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) | |
1058 | #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) | |
1059 | #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) | |
1da177e4 | 1060 | |
a1a5ea70 ML |
1061 | /** |
1062 | * i2o_out_to_virt - Turn an I2O message to a virtual address | |
1063 | * @c: controller | |
1064 | * @m: message engine value | |
1065 | * | |
1066 | * Turn a receive message from an I2O controller bus address into | |
1067 | * a Linux virtual address. The shared page frame is a linear block | |
1068 | * so we simply have to shift the offset. This function does not | |
1069 | * work for sender side messages as they are ioremap objects | |
1070 | * provided by the I2O controller. | |
1071 | */ | |
1072 | static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c, | |
1073 | u32 m) | |
1074 | { | |
1075 | BUG_ON(m < c->out_queue.phys | |
1076 | || m >= c->out_queue.phys + c->out_queue.len); | |
1da177e4 | 1077 | |
a1a5ea70 ML |
1078 | return c->out_queue.virt + (m - c->out_queue.phys); |
1079 | }; | |
1da177e4 | 1080 | |
a1a5ea70 ML |
1081 | /** |
1082 | * i2o_msg_in_to_virt - Turn an I2O message to a virtual address | |
1083 | * @c: controller | |
1084 | * @m: message engine value | |
1085 | * | |
1086 | * Turn a send message from an I2O controller bus address into | |
1087 | * a Linux virtual address. The shared page frame is a linear block | |
1088 | * so we simply have to shift the offset. This function does not | |
1089 | * work for receive side messages as they are kmalloc objects | |
1090 | * in a different pool. | |
1091 | */ | |
1092 | static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct | |
1093 | i2o_controller *c, | |
1094 | u32 m) | |
1095 | { | |
1096 | return c->in_queue.virt + m; | |
1097 | }; | |
1da177e4 | 1098 | |
a1a5ea70 ML |
1099 | /** |
1100 | * i2o_msg_get - obtain an I2O message from the IOP | |
1101 | * @c: I2O controller | |
1102 | * | |
1103 | * This function tries to get a message frame. If no message frame is | |
1104 | * available do not wait until one is availabe (see also i2o_msg_get_wait). | |
1105 | * The returned pointer to the message frame is not in I/O memory, it is | |
1106 | * allocated from a mempool. But because a MFA is allocated from the | |
1107 | * controller too it is guaranteed that i2o_msg_post() will never fail. | |
1108 | * | |
1109 | * On a success a pointer to the message frame is returned. If the message | |
1110 | * queue is empty -EBUSY is returned and if no memory is available -ENOMEM | |
1111 | * is returned. | |
1112 | */ | |
1113 | static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c) | |
1114 | { | |
1115 | struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC); | |
1116 | if (!mmsg) | |
1117 | return ERR_PTR(-ENOMEM); | |
1118 | ||
1119 | mmsg->mfa = readl(c->in_port); | |
8b3e09e1 | 1120 | if (unlikely(mmsg->mfa >= c->in_queue.len)) { |
57a62fed ML |
1121 | u32 mfa = mmsg->mfa; |
1122 | ||
a1a5ea70 | 1123 | mempool_free(mmsg, c->in_msg.mempool); |
57a62fed ML |
1124 | |
1125 | if (mfa == I2O_QUEUE_EMPTY) | |
8b3e09e1 ML |
1126 | return ERR_PTR(-EBUSY); |
1127 | return ERR_PTR(-EFAULT); | |
a1a5ea70 | 1128 | } |
1da177e4 | 1129 | |
a1a5ea70 ML |
1130 | return &mmsg->msg; |
1131 | }; | |
1da177e4 | 1132 | |
a1a5ea70 ML |
1133 | /** |
1134 | * i2o_msg_post - Post I2O message to I2O controller | |
1135 | * @c: I2O controller to which the message should be send | |
1136 | * @msg: message returned by i2o_msg_get() | |
1137 | * | |
1138 | * Post the message to the I2O controller and return immediately. | |
1139 | */ | |
1140 | static inline void i2o_msg_post(struct i2o_controller *c, | |
1141 | struct i2o_message *msg) | |
1142 | { | |
1143 | struct i2o_msg_mfa *mmsg; | |
1da177e4 | 1144 | |
a1a5ea70 ML |
1145 | mmsg = container_of(msg, struct i2o_msg_mfa, msg); |
1146 | memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg, | |
1147 | (le32_to_cpu(msg->u.head[0]) >> 16) << 2); | |
1148 | writel(mmsg->mfa, c->in_port); | |
1149 | mempool_free(mmsg, c->in_msg.mempool); | |
1150 | }; | |
1da177e4 | 1151 | |
a1a5ea70 ML |
1152 | /** |
1153 | * i2o_msg_post_wait - Post and wait a message and wait until return | |
1154 | * @c: controller | |
d9489fb6 | 1155 | * @msg: message to post |
a1a5ea70 ML |
1156 | * @timeout: time in seconds to wait |
1157 | * | |
1158 | * This API allows an OSM to post a message and then be told whether or | |
1159 | * not the system received a successful reply. If the message times out | |
1160 | * then the value '-ETIMEDOUT' is returned. | |
1161 | * | |
1162 | * Returns 0 on success or negative error code on failure. | |
1163 | */ | |
1164 | static inline int i2o_msg_post_wait(struct i2o_controller *c, | |
1165 | struct i2o_message *msg, | |
1166 | unsigned long timeout) | |
1167 | { | |
1168 | return i2o_msg_post_wait_mem(c, msg, timeout, NULL); | |
1169 | }; | |
1da177e4 | 1170 | |
a1a5ea70 ML |
1171 | /** |
1172 | * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller | |
1173 | * @c: I2O controller from which the MFA was fetched | |
1174 | * @mfa: MFA which should be returned | |
1175 | * | |
1176 | * This function must be used for preserved messages, because i2o_msg_nop() | |
1177 | * also returns the allocated memory back to the msg_pool mempool. | |
1178 | */ | |
1179 | static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa) | |
1180 | { | |
1181 | struct i2o_message __iomem *msg; | |
1182 | u32 nop[3] = { | |
1183 | THREE_WORD_MSG_SIZE | SGL_OFFSET_0, | |
1184 | I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID, | |
1185 | 0x00000000 | |
1186 | }; | |
1187 | ||
1188 | msg = i2o_msg_in_to_virt(c, mfa); | |
1189 | memcpy_toio(msg, nop, sizeof(nop)); | |
1190 | writel(mfa, c->in_port); | |
1191 | }; | |
1da177e4 | 1192 | |
a1a5ea70 ML |
1193 | /** |
1194 | * i2o_msg_nop - Returns a message which is not used | |
1195 | * @c: I2O controller from which the message was created | |
1196 | * @msg: message which should be returned | |
1197 | * | |
1198 | * If you fetch a message via i2o_msg_get, and can't use it, you must | |
1199 | * return the message with this function. Otherwise the MFA is lost as well | |
1200 | * as the allocated memory from the mempool. | |
1201 | */ | |
1202 | static inline void i2o_msg_nop(struct i2o_controller *c, | |
1203 | struct i2o_message *msg) | |
1204 | { | |
1205 | struct i2o_msg_mfa *mmsg; | |
1206 | mmsg = container_of(msg, struct i2o_msg_mfa, msg); | |
1da177e4 | 1207 | |
a1a5ea70 ML |
1208 | i2o_msg_nop_mfa(c, mmsg->mfa); |
1209 | mempool_free(mmsg, c->in_msg.mempool); | |
1210 | }; | |
1da177e4 | 1211 | |
a1a5ea70 ML |
1212 | /** |
1213 | * i2o_flush_reply - Flush reply from I2O controller | |
1214 | * @c: I2O controller | |
1215 | * @m: the message identifier | |
1216 | * | |
1217 | * The I2O controller must be informed that the reply message is not needed | |
1218 | * anymore. If you forget to flush the reply, the message frame can't be | |
1219 | * used by the controller anymore and is therefore lost. | |
1220 | */ | |
1221 | static inline void i2o_flush_reply(struct i2o_controller *c, u32 m) | |
1222 | { | |
1223 | writel(m, c->out_port); | |
1224 | }; | |
1da177e4 | 1225 | |
a1a5ea70 ML |
1226 | /* |
1227 | * Endian handling wrapped into the macro - keeps the core code | |
1228 | * cleaner. | |
1229 | */ | |
1da177e4 | 1230 | |
a1a5ea70 | 1231 | #define i2o_raw_writel(val, mem) __raw_writel(cpu_to_le32(val), mem) |
1da177e4 | 1232 | |
a1a5ea70 ML |
1233 | extern int i2o_parm_field_get(struct i2o_device *, int, int, void *, int); |
1234 | extern int i2o_parm_table_get(struct i2o_device *, int, int, int, void *, int, | |
1235 | void *, int); | |
1da177e4 | 1236 | |
a1a5ea70 ML |
1237 | /* debugging and troubleshooting/diagnostic helpers. */ |
1238 | #define osm_printk(level, format, arg...) \ | |
1239 | printk(level "%s: " format, OSM_NAME , ## arg) | |
1da177e4 | 1240 | |
a1a5ea70 ML |
1241 | #ifdef DEBUG |
1242 | #define osm_debug(format, arg...) \ | |
1243 | osm_printk(KERN_DEBUG, format , ## arg) | |
1244 | #else | |
1245 | #define osm_debug(format, arg...) \ | |
1246 | do { } while (0) | |
1247 | #endif | |
1da177e4 | 1248 | |
a1a5ea70 ML |
1249 | #define osm_err(format, arg...) \ |
1250 | osm_printk(KERN_ERR, format , ## arg) | |
1251 | #define osm_info(format, arg...) \ | |
1252 | osm_printk(KERN_INFO, format , ## arg) | |
1253 | #define osm_warn(format, arg...) \ | |
1254 | osm_printk(KERN_WARNING, format , ## arg) | |
1da177e4 | 1255 | |
a1a5ea70 ML |
1256 | /* debugging functions */ |
1257 | extern void i2o_report_status(const char *, const char *, struct i2o_message *); | |
1258 | extern void i2o_dump_message(struct i2o_message *); | |
1259 | extern void i2o_dump_hrt(struct i2o_controller *c); | |
1260 | extern void i2o_debug_state(struct i2o_controller *c); | |
1da177e4 | 1261 | |
1da177e4 LT |
1262 | #endif /* __KERNEL__ */ |
1263 | #endif /* _I2O_H */ |