]>
Commit | Line | Data |
---|---|---|
edd16368 SC |
1 | /* |
2 | * Disk Array driver for HP Smart Array SAS controllers | |
3 | * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
12 | * NON INFRINGEMENT. See the GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | * | |
18 | * Questions/Comments/Bugfixes to [email protected] | |
19 | * | |
20 | */ | |
21 | ||
22 | #include <linux/module.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/fs.h> | |
30 | #include <linux/timer.h> | |
31 | #include <linux/seq_file.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/smp_lock.h> | |
35 | #include <linux/compat.h> | |
36 | #include <linux/blktrace_api.h> | |
37 | #include <linux/uaccess.h> | |
38 | #include <linux/io.h> | |
39 | #include <linux/dma-mapping.h> | |
40 | #include <linux/completion.h> | |
41 | #include <linux/moduleparam.h> | |
42 | #include <scsi/scsi.h> | |
43 | #include <scsi/scsi_cmnd.h> | |
44 | #include <scsi/scsi_device.h> | |
45 | #include <scsi/scsi_host.h> | |
667e23d4 | 46 | #include <scsi/scsi_tcq.h> |
edd16368 SC |
47 | #include <linux/cciss_ioctl.h> |
48 | #include <linux/string.h> | |
49 | #include <linux/bitmap.h> | |
50 | #include <asm/atomic.h> | |
51 | #include <linux/kthread.h> | |
52 | #include "hpsa_cmd.h" | |
53 | #include "hpsa.h" | |
54 | ||
55 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ | |
31468401 | 56 | #define HPSA_DRIVER_VERSION "2.0.2-1" |
edd16368 SC |
57 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
58 | ||
59 | /* How long to wait (in milliseconds) for board to go into simple mode */ | |
60 | #define MAX_CONFIG_WAIT 30000 | |
61 | #define MAX_IOCTL_CONFIG_WAIT 1000 | |
62 | ||
63 | /*define how many times we will try a command because of bus resets */ | |
64 | #define MAX_CMD_RETRIES 3 | |
65 | ||
66 | /* Embedded module documentation macros - see modules.h */ | |
67 | MODULE_AUTHOR("Hewlett-Packard Company"); | |
68 | MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ | |
69 | HPSA_DRIVER_VERSION); | |
70 | MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); | |
71 | MODULE_VERSION(HPSA_DRIVER_VERSION); | |
72 | MODULE_LICENSE("GPL"); | |
73 | ||
74 | static int hpsa_allow_any; | |
75 | module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); | |
76 | MODULE_PARM_DESC(hpsa_allow_any, | |
77 | "Allow hpsa driver to access unknown HP Smart Array hardware"); | |
78 | ||
79 | /* define the PCI info for the cards we can control */ | |
80 | static const struct pci_device_id hpsa_pci_device_id[] = { | |
edd16368 SC |
81 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
82 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, | |
83 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, | |
84 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, | |
85 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, | |
86 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, | |
87 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, | |
f8b01eb9 | 88 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, |
2e931f31 SC |
89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, |
90 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, | |
91 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, | |
92 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, | |
93 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, | |
f8b01eb9 MM |
94 | #define PCI_DEVICE_ID_HP_CISSF 0x333f |
95 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, | |
edd16368 SC |
96 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
97 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, | |
6798cc0a SC |
98 | {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
99 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, | |
edd16368 SC |
100 | {0,} |
101 | }; | |
102 | ||
103 | MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); | |
104 | ||
105 | /* board_id = Subsystem Device ID & Vendor ID | |
106 | * product = Marketing Name for the board | |
107 | * access = Address of the struct of function pointers | |
108 | */ | |
109 | static struct board_type products[] = { | |
edd16368 SC |
110 | {0x3241103C, "Smart Array P212", &SA5_access}, |
111 | {0x3243103C, "Smart Array P410", &SA5_access}, | |
112 | {0x3245103C, "Smart Array P410i", &SA5_access}, | |
113 | {0x3247103C, "Smart Array P411", &SA5_access}, | |
114 | {0x3249103C, "Smart Array P812", &SA5_access}, | |
115 | {0x324a103C, "Smart Array P712m", &SA5_access}, | |
116 | {0x324b103C, "Smart Array P711m", &SA5_access}, | |
f8b01eb9 MM |
117 | {0x3233103C, "StorageWorks P1210m", &SA5_access}, |
118 | {0x333F103C, "StorageWorks P1210m", &SA5_access}, | |
2e931f31 SC |
119 | {0x3250103C, "Smart Array", &SA5_access}, |
120 | {0x3250113C, "Smart Array", &SA5_access}, | |
121 | {0x3250123C, "Smart Array", &SA5_access}, | |
122 | {0x3250133C, "Smart Array", &SA5_access}, | |
123 | {0x3250143C, "Smart Array", &SA5_access}, | |
edd16368 SC |
124 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
125 | }; | |
126 | ||
127 | static int number_of_controllers; | |
128 | ||
10f66018 SC |
129 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
130 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | |
edd16368 SC |
131 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); |
132 | static void start_io(struct ctlr_info *h); | |
133 | ||
134 | #ifdef CONFIG_COMPAT | |
135 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); | |
136 | #endif | |
137 | ||
138 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); | |
139 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); | |
140 | static struct CommandList *cmd_alloc(struct ctlr_info *h); | |
141 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); | |
01a02ffc SC |
142 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
143 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | |
edd16368 SC |
144 | int cmd_type); |
145 | ||
f281233d | 146 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
a08a8471 SC |
147 | static void hpsa_scan_start(struct Scsi_Host *); |
148 | static int hpsa_scan_finished(struct Scsi_Host *sh, | |
149 | unsigned long elapsed_time); | |
667e23d4 SC |
150 | static int hpsa_change_queue_depth(struct scsi_device *sdev, |
151 | int qdepth, int reason); | |
edd16368 SC |
152 | |
153 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | |
154 | static int hpsa_slave_alloc(struct scsi_device *sdev); | |
155 | static void hpsa_slave_destroy(struct scsi_device *sdev); | |
156 | ||
157 | static ssize_t raid_level_show(struct device *dev, | |
158 | struct device_attribute *attr, char *buf); | |
159 | static ssize_t lunid_show(struct device *dev, | |
160 | struct device_attribute *attr, char *buf); | |
161 | static ssize_t unique_id_show(struct device *dev, | |
162 | struct device_attribute *attr, char *buf); | |
d28ce020 SC |
163 | static ssize_t host_show_firmware_revision(struct device *dev, |
164 | struct device_attribute *attr, char *buf); | |
edd16368 SC |
165 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); |
166 | static ssize_t host_store_rescan(struct device *dev, | |
167 | struct device_attribute *attr, const char *buf, size_t count); | |
168 | static int check_for_unit_attention(struct ctlr_info *h, | |
169 | struct CommandList *c); | |
170 | static void check_ioctl_unit_attention(struct ctlr_info *h, | |
171 | struct CommandList *c); | |
303932fd DB |
172 | /* performant mode helper functions */ |
173 | static void calc_bucket_map(int *bucket, int num_buckets, | |
174 | int nsgs, int *bucket_map); | |
7136f9a7 | 175 | static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); |
303932fd | 176 | static inline u32 next_command(struct ctlr_info *h); |
1df8552a SC |
177 | static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, |
178 | void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, | |
179 | u64 *cfg_offset); | |
180 | static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, | |
181 | unsigned long *memory_bar); | |
18867659 | 182 | static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); |
edd16368 SC |
183 | |
184 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); | |
185 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); | |
186 | static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); | |
187 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); | |
d28ce020 SC |
188 | static DEVICE_ATTR(firmware_revision, S_IRUGO, |
189 | host_show_firmware_revision, NULL); | |
edd16368 SC |
190 | |
191 | static struct device_attribute *hpsa_sdev_attrs[] = { | |
192 | &dev_attr_raid_level, | |
193 | &dev_attr_lunid, | |
194 | &dev_attr_unique_id, | |
195 | NULL, | |
196 | }; | |
197 | ||
198 | static struct device_attribute *hpsa_shost_attrs[] = { | |
199 | &dev_attr_rescan, | |
d28ce020 | 200 | &dev_attr_firmware_revision, |
edd16368 SC |
201 | NULL, |
202 | }; | |
203 | ||
204 | static struct scsi_host_template hpsa_driver_template = { | |
205 | .module = THIS_MODULE, | |
206 | .name = "hpsa", | |
207 | .proc_name = "hpsa", | |
208 | .queuecommand = hpsa_scsi_queue_command, | |
a08a8471 SC |
209 | .scan_start = hpsa_scan_start, |
210 | .scan_finished = hpsa_scan_finished, | |
667e23d4 | 211 | .change_queue_depth = hpsa_change_queue_depth, |
edd16368 | 212 | .this_id = -1, |
edd16368 SC |
213 | .use_clustering = ENABLE_CLUSTERING, |
214 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, | |
215 | .ioctl = hpsa_ioctl, | |
216 | .slave_alloc = hpsa_slave_alloc, | |
217 | .slave_destroy = hpsa_slave_destroy, | |
218 | #ifdef CONFIG_COMPAT | |
219 | .compat_ioctl = hpsa_compat_ioctl, | |
220 | #endif | |
221 | .sdev_attrs = hpsa_sdev_attrs, | |
222 | .shost_attrs = hpsa_shost_attrs, | |
223 | }; | |
224 | ||
225 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) | |
226 | { | |
227 | unsigned long *priv = shost_priv(sdev->host); | |
228 | return (struct ctlr_info *) *priv; | |
229 | } | |
230 | ||
a23513e8 SC |
231 | static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) |
232 | { | |
233 | unsigned long *priv = shost_priv(sh); | |
234 | return (struct ctlr_info *) *priv; | |
235 | } | |
236 | ||
edd16368 SC |
237 | static int check_for_unit_attention(struct ctlr_info *h, |
238 | struct CommandList *c) | |
239 | { | |
240 | if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) | |
241 | return 0; | |
242 | ||
243 | switch (c->err_info->SenseInfo[12]) { | |
244 | case STATE_CHANGED: | |
245 | dev_warn(&h->pdev->dev, "hpsa%d: a state change " | |
246 | "detected, command retried\n", h->ctlr); | |
247 | break; | |
248 | case LUN_FAILED: | |
249 | dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " | |
250 | "detected, action required\n", h->ctlr); | |
251 | break; | |
252 | case REPORT_LUNS_CHANGED: | |
253 | dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " | |
31468401 | 254 | "changed, action required\n", h->ctlr); |
edd16368 | 255 | /* |
edd16368 SC |
256 | * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. |
257 | */ | |
258 | break; | |
259 | case POWER_OR_RESET: | |
260 | dev_warn(&h->pdev->dev, "hpsa%d: a power on " | |
261 | "or device reset detected\n", h->ctlr); | |
262 | break; | |
263 | case UNIT_ATTENTION_CLEARED: | |
264 | dev_warn(&h->pdev->dev, "hpsa%d: unit attention " | |
265 | "cleared by another initiator\n", h->ctlr); | |
266 | break; | |
267 | default: | |
268 | dev_warn(&h->pdev->dev, "hpsa%d: unknown " | |
269 | "unit attention detected\n", h->ctlr); | |
270 | break; | |
271 | } | |
272 | return 1; | |
273 | } | |
274 | ||
275 | static ssize_t host_store_rescan(struct device *dev, | |
276 | struct device_attribute *attr, | |
277 | const char *buf, size_t count) | |
278 | { | |
279 | struct ctlr_info *h; | |
280 | struct Scsi_Host *shost = class_to_shost(dev); | |
a23513e8 | 281 | h = shost_to_hba(shost); |
31468401 | 282 | hpsa_scan_start(h->scsi_host); |
edd16368 SC |
283 | return count; |
284 | } | |
285 | ||
d28ce020 SC |
286 | static ssize_t host_show_firmware_revision(struct device *dev, |
287 | struct device_attribute *attr, char *buf) | |
288 | { | |
289 | struct ctlr_info *h; | |
290 | struct Scsi_Host *shost = class_to_shost(dev); | |
291 | unsigned char *fwrev; | |
292 | ||
293 | h = shost_to_hba(shost); | |
294 | if (!h->hba_inquiry_data) | |
295 | return 0; | |
296 | fwrev = &h->hba_inquiry_data[32]; | |
297 | return snprintf(buf, 20, "%c%c%c%c\n", | |
298 | fwrev[0], fwrev[1], fwrev[2], fwrev[3]); | |
299 | } | |
300 | ||
edd16368 SC |
301 | /* Enqueuing and dequeuing functions for cmdlists. */ |
302 | static inline void addQ(struct hlist_head *list, struct CommandList *c) | |
303 | { | |
304 | hlist_add_head(&c->list, list); | |
305 | } | |
306 | ||
303932fd DB |
307 | static inline u32 next_command(struct ctlr_info *h) |
308 | { | |
309 | u32 a; | |
310 | ||
311 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) | |
312 | return h->access.command_completed(h); | |
313 | ||
314 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | |
315 | a = *(h->reply_pool_head); /* Next cmd in ring buffer */ | |
316 | (h->reply_pool_head)++; | |
317 | h->commands_outstanding--; | |
318 | } else { | |
319 | a = FIFO_EMPTY; | |
320 | } | |
321 | /* Check for wraparound */ | |
322 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | |
323 | h->reply_pool_head = h->reply_pool; | |
324 | h->reply_pool_wraparound ^= 1; | |
325 | } | |
326 | return a; | |
327 | } | |
328 | ||
329 | /* set_performant_mode: Modify the tag for cciss performant | |
330 | * set bit 0 for pull model, bits 3-1 for block fetch | |
331 | * register number | |
332 | */ | |
333 | static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) | |
334 | { | |
335 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) | |
336 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | |
337 | } | |
338 | ||
edd16368 SC |
339 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, |
340 | struct CommandList *c) | |
341 | { | |
342 | unsigned long flags; | |
303932fd DB |
343 | |
344 | set_performant_mode(h, c); | |
edd16368 SC |
345 | spin_lock_irqsave(&h->lock, flags); |
346 | addQ(&h->reqQ, c); | |
347 | h->Qdepth++; | |
348 | start_io(h); | |
349 | spin_unlock_irqrestore(&h->lock, flags); | |
350 | } | |
351 | ||
352 | static inline void removeQ(struct CommandList *c) | |
353 | { | |
354 | if (WARN_ON(hlist_unhashed(&c->list))) | |
355 | return; | |
356 | hlist_del_init(&c->list); | |
357 | } | |
358 | ||
359 | static inline int is_hba_lunid(unsigned char scsi3addr[]) | |
360 | { | |
361 | return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; | |
362 | } | |
363 | ||
364 | static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) | |
365 | { | |
366 | return (scsi3addr[3] & 0xC0) == 0x40; | |
367 | } | |
368 | ||
339b2b14 SC |
369 | static inline int is_scsi_rev_5(struct ctlr_info *h) |
370 | { | |
371 | if (!h->hba_inquiry_data) | |
372 | return 0; | |
373 | if ((h->hba_inquiry_data[2] & 0x07) == 5) | |
374 | return 1; | |
375 | return 0; | |
376 | } | |
377 | ||
edd16368 SC |
378 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
379 | "UNKNOWN" | |
380 | }; | |
381 | #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) | |
382 | ||
383 | static ssize_t raid_level_show(struct device *dev, | |
384 | struct device_attribute *attr, char *buf) | |
385 | { | |
386 | ssize_t l = 0; | |
82a72c0a | 387 | unsigned char rlevel; |
edd16368 SC |
388 | struct ctlr_info *h; |
389 | struct scsi_device *sdev; | |
390 | struct hpsa_scsi_dev_t *hdev; | |
391 | unsigned long flags; | |
392 | ||
393 | sdev = to_scsi_device(dev); | |
394 | h = sdev_to_hba(sdev); | |
395 | spin_lock_irqsave(&h->lock, flags); | |
396 | hdev = sdev->hostdata; | |
397 | if (!hdev) { | |
398 | spin_unlock_irqrestore(&h->lock, flags); | |
399 | return -ENODEV; | |
400 | } | |
401 | ||
402 | /* Is this even a logical drive? */ | |
403 | if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { | |
404 | spin_unlock_irqrestore(&h->lock, flags); | |
405 | l = snprintf(buf, PAGE_SIZE, "N/A\n"); | |
406 | return l; | |
407 | } | |
408 | ||
409 | rlevel = hdev->raid_level; | |
410 | spin_unlock_irqrestore(&h->lock, flags); | |
82a72c0a | 411 | if (rlevel > RAID_UNKNOWN) |
edd16368 SC |
412 | rlevel = RAID_UNKNOWN; |
413 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); | |
414 | return l; | |
415 | } | |
416 | ||
417 | static ssize_t lunid_show(struct device *dev, | |
418 | struct device_attribute *attr, char *buf) | |
419 | { | |
420 | struct ctlr_info *h; | |
421 | struct scsi_device *sdev; | |
422 | struct hpsa_scsi_dev_t *hdev; | |
423 | unsigned long flags; | |
424 | unsigned char lunid[8]; | |
425 | ||
426 | sdev = to_scsi_device(dev); | |
427 | h = sdev_to_hba(sdev); | |
428 | spin_lock_irqsave(&h->lock, flags); | |
429 | hdev = sdev->hostdata; | |
430 | if (!hdev) { | |
431 | spin_unlock_irqrestore(&h->lock, flags); | |
432 | return -ENODEV; | |
433 | } | |
434 | memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); | |
435 | spin_unlock_irqrestore(&h->lock, flags); | |
436 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
437 | lunid[0], lunid[1], lunid[2], lunid[3], | |
438 | lunid[4], lunid[5], lunid[6], lunid[7]); | |
439 | } | |
440 | ||
441 | static ssize_t unique_id_show(struct device *dev, | |
442 | struct device_attribute *attr, char *buf) | |
443 | { | |
444 | struct ctlr_info *h; | |
445 | struct scsi_device *sdev; | |
446 | struct hpsa_scsi_dev_t *hdev; | |
447 | unsigned long flags; | |
448 | unsigned char sn[16]; | |
449 | ||
450 | sdev = to_scsi_device(dev); | |
451 | h = sdev_to_hba(sdev); | |
452 | spin_lock_irqsave(&h->lock, flags); | |
453 | hdev = sdev->hostdata; | |
454 | if (!hdev) { | |
455 | spin_unlock_irqrestore(&h->lock, flags); | |
456 | return -ENODEV; | |
457 | } | |
458 | memcpy(sn, hdev->device_id, sizeof(sn)); | |
459 | spin_unlock_irqrestore(&h->lock, flags); | |
460 | return snprintf(buf, 16 * 2 + 2, | |
461 | "%02X%02X%02X%02X%02X%02X%02X%02X" | |
462 | "%02X%02X%02X%02X%02X%02X%02X%02X\n", | |
463 | sn[0], sn[1], sn[2], sn[3], | |
464 | sn[4], sn[5], sn[6], sn[7], | |
465 | sn[8], sn[9], sn[10], sn[11], | |
466 | sn[12], sn[13], sn[14], sn[15]); | |
467 | } | |
468 | ||
469 | static int hpsa_find_target_lun(struct ctlr_info *h, | |
470 | unsigned char scsi3addr[], int bus, int *target, int *lun) | |
471 | { | |
472 | /* finds an unused bus, target, lun for a new physical device | |
473 | * assumes h->devlock is held | |
474 | */ | |
475 | int i, found = 0; | |
476 | DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); | |
477 | ||
478 | memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); | |
479 | ||
480 | for (i = 0; i < h->ndevices; i++) { | |
481 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) | |
482 | set_bit(h->dev[i]->target, lun_taken); | |
483 | } | |
484 | ||
485 | for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { | |
486 | if (!test_bit(i, lun_taken)) { | |
487 | /* *bus = 1; */ | |
488 | *target = i; | |
489 | *lun = 0; | |
490 | found = 1; | |
491 | break; | |
492 | } | |
493 | } | |
494 | return !found; | |
495 | } | |
496 | ||
497 | /* Add an entry into h->dev[] array. */ | |
498 | static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, | |
499 | struct hpsa_scsi_dev_t *device, | |
500 | struct hpsa_scsi_dev_t *added[], int *nadded) | |
501 | { | |
502 | /* assumes h->devlock is held */ | |
503 | int n = h->ndevices; | |
504 | int i; | |
505 | unsigned char addr1[8], addr2[8]; | |
506 | struct hpsa_scsi_dev_t *sd; | |
507 | ||
508 | if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { | |
509 | dev_err(&h->pdev->dev, "too many devices, some will be " | |
510 | "inaccessible.\n"); | |
511 | return -1; | |
512 | } | |
513 | ||
514 | /* physical devices do not have lun or target assigned until now. */ | |
515 | if (device->lun != -1) | |
516 | /* Logical device, lun is already assigned. */ | |
517 | goto lun_assigned; | |
518 | ||
519 | /* If this device a non-zero lun of a multi-lun device | |
520 | * byte 4 of the 8-byte LUN addr will contain the logical | |
521 | * unit no, zero otherise. | |
522 | */ | |
523 | if (device->scsi3addr[4] == 0) { | |
524 | /* This is not a non-zero lun of a multi-lun device */ | |
525 | if (hpsa_find_target_lun(h, device->scsi3addr, | |
526 | device->bus, &device->target, &device->lun) != 0) | |
527 | return -1; | |
528 | goto lun_assigned; | |
529 | } | |
530 | ||
531 | /* This is a non-zero lun of a multi-lun device. | |
532 | * Search through our list and find the device which | |
533 | * has the same 8 byte LUN address, excepting byte 4. | |
534 | * Assign the same bus and target for this new LUN. | |
535 | * Use the logical unit number from the firmware. | |
536 | */ | |
537 | memcpy(addr1, device->scsi3addr, 8); | |
538 | addr1[4] = 0; | |
539 | for (i = 0; i < n; i++) { | |
540 | sd = h->dev[i]; | |
541 | memcpy(addr2, sd->scsi3addr, 8); | |
542 | addr2[4] = 0; | |
543 | /* differ only in byte 4? */ | |
544 | if (memcmp(addr1, addr2, 8) == 0) { | |
545 | device->bus = sd->bus; | |
546 | device->target = sd->target; | |
547 | device->lun = device->scsi3addr[4]; | |
548 | break; | |
549 | } | |
550 | } | |
551 | if (device->lun == -1) { | |
552 | dev_warn(&h->pdev->dev, "physical device with no LUN=0," | |
553 | " suspect firmware bug or unsupported hardware " | |
554 | "configuration.\n"); | |
555 | return -1; | |
556 | } | |
557 | ||
558 | lun_assigned: | |
559 | ||
560 | h->dev[n] = device; | |
561 | h->ndevices++; | |
562 | added[*nadded] = device; | |
563 | (*nadded)++; | |
564 | ||
565 | /* initially, (before registering with scsi layer) we don't | |
566 | * know our hostno and we don't want to print anything first | |
567 | * time anyway (the scsi layer's inquiries will show that info) | |
568 | */ | |
569 | /* if (hostno != -1) */ | |
570 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", | |
571 | scsi_device_type(device->devtype), hostno, | |
572 | device->bus, device->target, device->lun); | |
573 | return 0; | |
574 | } | |
575 | ||
2a8ccf31 SC |
576 | /* Replace an entry from h->dev[] array. */ |
577 | static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, | |
578 | int entry, struct hpsa_scsi_dev_t *new_entry, | |
579 | struct hpsa_scsi_dev_t *added[], int *nadded, | |
580 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | |
581 | { | |
582 | /* assumes h->devlock is held */ | |
583 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | |
584 | removed[*nremoved] = h->dev[entry]; | |
585 | (*nremoved)++; | |
586 | h->dev[entry] = new_entry; | |
587 | added[*nadded] = new_entry; | |
588 | (*nadded)++; | |
589 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", | |
590 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, | |
591 | new_entry->target, new_entry->lun); | |
592 | } | |
593 | ||
edd16368 SC |
594 | /* Remove an entry from h->dev[] array. */ |
595 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | |
596 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | |
597 | { | |
598 | /* assumes h->devlock is held */ | |
599 | int i; | |
600 | struct hpsa_scsi_dev_t *sd; | |
601 | ||
b2ed4f79 | 602 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); |
edd16368 SC |
603 | |
604 | sd = h->dev[entry]; | |
605 | removed[*nremoved] = h->dev[entry]; | |
606 | (*nremoved)++; | |
607 | ||
608 | for (i = entry; i < h->ndevices-1; i++) | |
609 | h->dev[i] = h->dev[i+1]; | |
610 | h->ndevices--; | |
611 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", | |
612 | scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, | |
613 | sd->lun); | |
614 | } | |
615 | ||
616 | #define SCSI3ADDR_EQ(a, b) ( \ | |
617 | (a)[7] == (b)[7] && \ | |
618 | (a)[6] == (b)[6] && \ | |
619 | (a)[5] == (b)[5] && \ | |
620 | (a)[4] == (b)[4] && \ | |
621 | (a)[3] == (b)[3] && \ | |
622 | (a)[2] == (b)[2] && \ | |
623 | (a)[1] == (b)[1] && \ | |
624 | (a)[0] == (b)[0]) | |
625 | ||
626 | static void fixup_botched_add(struct ctlr_info *h, | |
627 | struct hpsa_scsi_dev_t *added) | |
628 | { | |
629 | /* called when scsi_add_device fails in order to re-adjust | |
630 | * h->dev[] to match the mid layer's view. | |
631 | */ | |
632 | unsigned long flags; | |
633 | int i, j; | |
634 | ||
635 | spin_lock_irqsave(&h->lock, flags); | |
636 | for (i = 0; i < h->ndevices; i++) { | |
637 | if (h->dev[i] == added) { | |
638 | for (j = i; j < h->ndevices-1; j++) | |
639 | h->dev[j] = h->dev[j+1]; | |
640 | h->ndevices--; | |
641 | break; | |
642 | } | |
643 | } | |
644 | spin_unlock_irqrestore(&h->lock, flags); | |
645 | kfree(added); | |
646 | } | |
647 | ||
648 | static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, | |
649 | struct hpsa_scsi_dev_t *dev2) | |
650 | { | |
651 | if ((is_logical_dev_addr_mode(dev1->scsi3addr) || | |
652 | (dev1->lun != -1 && dev2->lun != -1)) && | |
653 | dev1->devtype != 0x0C) | |
654 | return (memcmp(dev1, dev2, sizeof(*dev1)) == 0); | |
655 | ||
656 | /* we compare everything except lun and target as these | |
657 | * are not yet assigned. Compare parts likely | |
658 | * to differ first | |
659 | */ | |
660 | if (memcmp(dev1->scsi3addr, dev2->scsi3addr, | |
661 | sizeof(dev1->scsi3addr)) != 0) | |
662 | return 0; | |
663 | if (memcmp(dev1->device_id, dev2->device_id, | |
664 | sizeof(dev1->device_id)) != 0) | |
665 | return 0; | |
666 | if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) | |
667 | return 0; | |
668 | if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) | |
669 | return 0; | |
670 | if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0) | |
671 | return 0; | |
672 | if (dev1->devtype != dev2->devtype) | |
673 | return 0; | |
674 | if (dev1->raid_level != dev2->raid_level) | |
675 | return 0; | |
676 | if (dev1->bus != dev2->bus) | |
677 | return 0; | |
678 | return 1; | |
679 | } | |
680 | ||
681 | /* Find needle in haystack. If exact match found, return DEVICE_SAME, | |
682 | * and return needle location in *index. If scsi3addr matches, but not | |
683 | * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle | |
684 | * location in *index. If needle not found, return DEVICE_NOT_FOUND. | |
685 | */ | |
686 | static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, | |
687 | struct hpsa_scsi_dev_t *haystack[], int haystack_size, | |
688 | int *index) | |
689 | { | |
690 | int i; | |
691 | #define DEVICE_NOT_FOUND 0 | |
692 | #define DEVICE_CHANGED 1 | |
693 | #define DEVICE_SAME 2 | |
694 | for (i = 0; i < haystack_size; i++) { | |
23231048 SC |
695 | if (haystack[i] == NULL) /* previously removed. */ |
696 | continue; | |
edd16368 SC |
697 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
698 | *index = i; | |
699 | if (device_is_the_same(needle, haystack[i])) | |
700 | return DEVICE_SAME; | |
701 | else | |
702 | return DEVICE_CHANGED; | |
703 | } | |
704 | } | |
705 | *index = -1; | |
706 | return DEVICE_NOT_FOUND; | |
707 | } | |
708 | ||
4967bd3e | 709 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, |
edd16368 SC |
710 | struct hpsa_scsi_dev_t *sd[], int nsds) |
711 | { | |
712 | /* sd contains scsi3 addresses and devtypes, and inquiry | |
713 | * data. This function takes what's in sd to be the current | |
714 | * reality and updates h->dev[] to reflect that reality. | |
715 | */ | |
716 | int i, entry, device_change, changes = 0; | |
717 | struct hpsa_scsi_dev_t *csd; | |
718 | unsigned long flags; | |
719 | struct hpsa_scsi_dev_t **added, **removed; | |
720 | int nadded, nremoved; | |
721 | struct Scsi_Host *sh = NULL; | |
722 | ||
723 | added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, | |
724 | GFP_KERNEL); | |
725 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, | |
726 | GFP_KERNEL); | |
727 | ||
728 | if (!added || !removed) { | |
729 | dev_warn(&h->pdev->dev, "out of memory in " | |
730 | "adjust_hpsa_scsi_table\n"); | |
731 | goto free_and_out; | |
732 | } | |
733 | ||
734 | spin_lock_irqsave(&h->devlock, flags); | |
735 | ||
736 | /* find any devices in h->dev[] that are not in | |
737 | * sd[] and remove them from h->dev[], and for any | |
738 | * devices which have changed, remove the old device | |
739 | * info and add the new device info. | |
740 | */ | |
741 | i = 0; | |
742 | nremoved = 0; | |
743 | nadded = 0; | |
744 | while (i < h->ndevices) { | |
745 | csd = h->dev[i]; | |
746 | device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); | |
747 | if (device_change == DEVICE_NOT_FOUND) { | |
748 | changes++; | |
749 | hpsa_scsi_remove_entry(h, hostno, i, | |
750 | removed, &nremoved); | |
751 | continue; /* remove ^^^, hence i not incremented */ | |
752 | } else if (device_change == DEVICE_CHANGED) { | |
753 | changes++; | |
2a8ccf31 SC |
754 | hpsa_scsi_replace_entry(h, hostno, i, sd[entry], |
755 | added, &nadded, removed, &nremoved); | |
c7f172dc SC |
756 | /* Set it to NULL to prevent it from being freed |
757 | * at the bottom of hpsa_update_scsi_devices() | |
758 | */ | |
759 | sd[entry] = NULL; | |
edd16368 SC |
760 | } |
761 | i++; | |
762 | } | |
763 | ||
764 | /* Now, make sure every device listed in sd[] is also | |
765 | * listed in h->dev[], adding them if they aren't found | |
766 | */ | |
767 | ||
768 | for (i = 0; i < nsds; i++) { | |
769 | if (!sd[i]) /* if already added above. */ | |
770 | continue; | |
771 | device_change = hpsa_scsi_find_entry(sd[i], h->dev, | |
772 | h->ndevices, &entry); | |
773 | if (device_change == DEVICE_NOT_FOUND) { | |
774 | changes++; | |
775 | if (hpsa_scsi_add_entry(h, hostno, sd[i], | |
776 | added, &nadded) != 0) | |
777 | break; | |
778 | sd[i] = NULL; /* prevent from being freed later. */ | |
779 | } else if (device_change == DEVICE_CHANGED) { | |
780 | /* should never happen... */ | |
781 | changes++; | |
782 | dev_warn(&h->pdev->dev, | |
783 | "device unexpectedly changed.\n"); | |
784 | /* but if it does happen, we just ignore that device */ | |
785 | } | |
786 | } | |
787 | spin_unlock_irqrestore(&h->devlock, flags); | |
788 | ||
789 | /* Don't notify scsi mid layer of any changes the first time through | |
790 | * (or if there are no changes) scsi_scan_host will do it later the | |
791 | * first time through. | |
792 | */ | |
793 | if (hostno == -1 || !changes) | |
794 | goto free_and_out; | |
795 | ||
796 | sh = h->scsi_host; | |
797 | /* Notify scsi mid layer of any removed devices */ | |
798 | for (i = 0; i < nremoved; i++) { | |
799 | struct scsi_device *sdev = | |
800 | scsi_device_lookup(sh, removed[i]->bus, | |
801 | removed[i]->target, removed[i]->lun); | |
802 | if (sdev != NULL) { | |
803 | scsi_remove_device(sdev); | |
804 | scsi_device_put(sdev); | |
805 | } else { | |
806 | /* We don't expect to get here. | |
807 | * future cmds to this device will get selection | |
808 | * timeout as if the device was gone. | |
809 | */ | |
810 | dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " | |
811 | " for removal.", hostno, removed[i]->bus, | |
812 | removed[i]->target, removed[i]->lun); | |
813 | } | |
814 | kfree(removed[i]); | |
815 | removed[i] = NULL; | |
816 | } | |
817 | ||
818 | /* Notify scsi mid layer of any added devices */ | |
819 | for (i = 0; i < nadded; i++) { | |
820 | if (scsi_add_device(sh, added[i]->bus, | |
821 | added[i]->target, added[i]->lun) == 0) | |
822 | continue; | |
823 | dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " | |
824 | "device not added.\n", hostno, added[i]->bus, | |
825 | added[i]->target, added[i]->lun); | |
826 | /* now we have to remove it from h->dev, | |
827 | * since it didn't get added to scsi mid layer | |
828 | */ | |
829 | fixup_botched_add(h, added[i]); | |
830 | } | |
831 | ||
832 | free_and_out: | |
833 | kfree(added); | |
834 | kfree(removed); | |
edd16368 SC |
835 | } |
836 | ||
837 | /* | |
838 | * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * | |
839 | * Assume's h->devlock is held. | |
840 | */ | |
841 | static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, | |
842 | int bus, int target, int lun) | |
843 | { | |
844 | int i; | |
845 | struct hpsa_scsi_dev_t *sd; | |
846 | ||
847 | for (i = 0; i < h->ndevices; i++) { | |
848 | sd = h->dev[i]; | |
849 | if (sd->bus == bus && sd->target == target && sd->lun == lun) | |
850 | return sd; | |
851 | } | |
852 | return NULL; | |
853 | } | |
854 | ||
855 | /* link sdev->hostdata to our per-device structure. */ | |
856 | static int hpsa_slave_alloc(struct scsi_device *sdev) | |
857 | { | |
858 | struct hpsa_scsi_dev_t *sd; | |
859 | unsigned long flags; | |
860 | struct ctlr_info *h; | |
861 | ||
862 | h = sdev_to_hba(sdev); | |
863 | spin_lock_irqsave(&h->devlock, flags); | |
864 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), | |
865 | sdev_id(sdev), sdev->lun); | |
866 | if (sd != NULL) | |
867 | sdev->hostdata = sd; | |
868 | spin_unlock_irqrestore(&h->devlock, flags); | |
869 | return 0; | |
870 | } | |
871 | ||
872 | static void hpsa_slave_destroy(struct scsi_device *sdev) | |
873 | { | |
bcc44255 | 874 | /* nothing to do. */ |
edd16368 SC |
875 | } |
876 | ||
877 | static void hpsa_scsi_setup(struct ctlr_info *h) | |
878 | { | |
879 | h->ndevices = 0; | |
880 | h->scsi_host = NULL; | |
881 | spin_lock_init(&h->devlock); | |
edd16368 SC |
882 | } |
883 | ||
33a2ffce SC |
884 | static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) |
885 | { | |
886 | int i; | |
887 | ||
888 | if (!h->cmd_sg_list) | |
889 | return; | |
890 | for (i = 0; i < h->nr_cmds; i++) { | |
891 | kfree(h->cmd_sg_list[i]); | |
892 | h->cmd_sg_list[i] = NULL; | |
893 | } | |
894 | kfree(h->cmd_sg_list); | |
895 | h->cmd_sg_list = NULL; | |
896 | } | |
897 | ||
898 | static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) | |
899 | { | |
900 | int i; | |
901 | ||
902 | if (h->chainsize <= 0) | |
903 | return 0; | |
904 | ||
905 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, | |
906 | GFP_KERNEL); | |
907 | if (!h->cmd_sg_list) | |
908 | return -ENOMEM; | |
909 | for (i = 0; i < h->nr_cmds; i++) { | |
910 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * | |
911 | h->chainsize, GFP_KERNEL); | |
912 | if (!h->cmd_sg_list[i]) | |
913 | goto clean; | |
914 | } | |
915 | return 0; | |
916 | ||
917 | clean: | |
918 | hpsa_free_sg_chain_blocks(h); | |
919 | return -ENOMEM; | |
920 | } | |
921 | ||
922 | static void hpsa_map_sg_chain_block(struct ctlr_info *h, | |
923 | struct CommandList *c) | |
924 | { | |
925 | struct SGDescriptor *chain_sg, *chain_block; | |
926 | u64 temp64; | |
927 | ||
928 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; | |
929 | chain_block = h->cmd_sg_list[c->cmdindex]; | |
930 | chain_sg->Ext = HPSA_SG_CHAIN; | |
931 | chain_sg->Len = sizeof(*chain_sg) * | |
932 | (c->Header.SGTotal - h->max_cmd_sg_entries); | |
933 | temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, | |
934 | PCI_DMA_TODEVICE); | |
935 | chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); | |
936 | chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); | |
937 | } | |
938 | ||
939 | static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, | |
940 | struct CommandList *c) | |
941 | { | |
942 | struct SGDescriptor *chain_sg; | |
943 | union u64bit temp64; | |
944 | ||
945 | if (c->Header.SGTotal <= h->max_cmd_sg_entries) | |
946 | return; | |
947 | ||
948 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; | |
949 | temp64.val32.lower = chain_sg->Addr.lower; | |
950 | temp64.val32.upper = chain_sg->Addr.upper; | |
951 | pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); | |
952 | } | |
953 | ||
edd16368 | 954 | static void complete_scsi_command(struct CommandList *cp, |
01a02ffc | 955 | int timeout, u32 tag) |
edd16368 SC |
956 | { |
957 | struct scsi_cmnd *cmd; | |
958 | struct ctlr_info *h; | |
959 | struct ErrorInfo *ei; | |
960 | ||
961 | unsigned char sense_key; | |
962 | unsigned char asc; /* additional sense code */ | |
963 | unsigned char ascq; /* additional sense code qualifier */ | |
964 | ||
965 | ei = cp->err_info; | |
966 | cmd = (struct scsi_cmnd *) cp->scsi_cmd; | |
967 | h = cp->h; | |
968 | ||
969 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ | |
33a2ffce SC |
970 | if (cp->Header.SGTotal > h->max_cmd_sg_entries) |
971 | hpsa_unmap_sg_chain_block(h, cp); | |
edd16368 SC |
972 | |
973 | cmd->result = (DID_OK << 16); /* host byte */ | |
974 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | |
5512672f | 975 | cmd->result |= ei->ScsiStatus; |
edd16368 SC |
976 | |
977 | /* copy the sense data whether we need to or not. */ | |
978 | memcpy(cmd->sense_buffer, ei->SenseInfo, | |
979 | ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? | |
980 | SCSI_SENSE_BUFFERSIZE : | |
981 | ei->SenseLen); | |
982 | scsi_set_resid(cmd, ei->ResidualCnt); | |
983 | ||
984 | if (ei->CommandStatus == 0) { | |
985 | cmd->scsi_done(cmd); | |
986 | cmd_free(h, cp); | |
987 | return; | |
988 | } | |
989 | ||
990 | /* an error has occurred */ | |
991 | switch (ei->CommandStatus) { | |
992 | ||
993 | case CMD_TARGET_STATUS: | |
994 | if (ei->ScsiStatus) { | |
995 | /* Get sense key */ | |
996 | sense_key = 0xf & ei->SenseInfo[2]; | |
997 | /* Get additional sense code */ | |
998 | asc = ei->SenseInfo[12]; | |
999 | /* Get addition sense code qualifier */ | |
1000 | ascq = ei->SenseInfo[13]; | |
1001 | } | |
1002 | ||
1003 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { | |
1004 | if (check_for_unit_attention(h, cp)) { | |
1005 | cmd->result = DID_SOFT_ERROR << 16; | |
1006 | break; | |
1007 | } | |
1008 | if (sense_key == ILLEGAL_REQUEST) { | |
1009 | /* | |
1010 | * SCSI REPORT_LUNS is commonly unsupported on | |
1011 | * Smart Array. Suppress noisy complaint. | |
1012 | */ | |
1013 | if (cp->Request.CDB[0] == REPORT_LUNS) | |
1014 | break; | |
1015 | ||
1016 | /* If ASC/ASCQ indicate Logical Unit | |
1017 | * Not Supported condition, | |
1018 | */ | |
1019 | if ((asc == 0x25) && (ascq == 0x0)) { | |
1020 | dev_warn(&h->pdev->dev, "cp %p " | |
1021 | "has check condition\n", cp); | |
1022 | break; | |
1023 | } | |
1024 | } | |
1025 | ||
1026 | if (sense_key == NOT_READY) { | |
1027 | /* If Sense is Not Ready, Logical Unit | |
1028 | * Not ready, Manual Intervention | |
1029 | * required | |
1030 | */ | |
1031 | if ((asc == 0x04) && (ascq == 0x03)) { | |
edd16368 SC |
1032 | dev_warn(&h->pdev->dev, "cp %p " |
1033 | "has check condition: unit " | |
1034 | "not ready, manual " | |
1035 | "intervention required\n", cp); | |
1036 | break; | |
1037 | } | |
1038 | } | |
1d3b3609 MG |
1039 | if (sense_key == ABORTED_COMMAND) { |
1040 | /* Aborted command is retryable */ | |
1041 | dev_warn(&h->pdev->dev, "cp %p " | |
1042 | "has check condition: aborted command: " | |
1043 | "ASC: 0x%x, ASCQ: 0x%x\n", | |
1044 | cp, asc, ascq); | |
1045 | cmd->result = DID_SOFT_ERROR << 16; | |
1046 | break; | |
1047 | } | |
edd16368 SC |
1048 | /* Must be some other type of check condition */ |
1049 | dev_warn(&h->pdev->dev, "cp %p has check condition: " | |
1050 | "unknown type: " | |
1051 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " | |
1052 | "Returning result: 0x%x, " | |
1053 | "cmd=[%02x %02x %02x %02x %02x " | |
807be732 | 1054 | "%02x %02x %02x %02x %02x %02x " |
edd16368 SC |
1055 | "%02x %02x %02x %02x %02x]\n", |
1056 | cp, sense_key, asc, ascq, | |
1057 | cmd->result, | |
1058 | cmd->cmnd[0], cmd->cmnd[1], | |
1059 | cmd->cmnd[2], cmd->cmnd[3], | |
1060 | cmd->cmnd[4], cmd->cmnd[5], | |
1061 | cmd->cmnd[6], cmd->cmnd[7], | |
807be732 MM |
1062 | cmd->cmnd[8], cmd->cmnd[9], |
1063 | cmd->cmnd[10], cmd->cmnd[11], | |
1064 | cmd->cmnd[12], cmd->cmnd[13], | |
1065 | cmd->cmnd[14], cmd->cmnd[15]); | |
edd16368 SC |
1066 | break; |
1067 | } | |
1068 | ||
1069 | ||
1070 | /* Problem was not a check condition | |
1071 | * Pass it up to the upper layers... | |
1072 | */ | |
1073 | if (ei->ScsiStatus) { | |
1074 | dev_warn(&h->pdev->dev, "cp %p has status 0x%x " | |
1075 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " | |
1076 | "Returning result: 0x%x\n", | |
1077 | cp, ei->ScsiStatus, | |
1078 | sense_key, asc, ascq, | |
1079 | cmd->result); | |
1080 | } else { /* scsi status is zero??? How??? */ | |
1081 | dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " | |
1082 | "Returning no connection.\n", cp), | |
1083 | ||
1084 | /* Ordinarily, this case should never happen, | |
1085 | * but there is a bug in some released firmware | |
1086 | * revisions that allows it to happen if, for | |
1087 | * example, a 4100 backplane loses power and | |
1088 | * the tape drive is in it. We assume that | |
1089 | * it's a fatal error of some kind because we | |
1090 | * can't show that it wasn't. We will make it | |
1091 | * look like selection timeout since that is | |
1092 | * the most common reason for this to occur, | |
1093 | * and it's severe enough. | |
1094 | */ | |
1095 | ||
1096 | cmd->result = DID_NO_CONNECT << 16; | |
1097 | } | |
1098 | break; | |
1099 | ||
1100 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | |
1101 | break; | |
1102 | case CMD_DATA_OVERRUN: | |
1103 | dev_warn(&h->pdev->dev, "cp %p has" | |
1104 | " completed with data overrun " | |
1105 | "reported\n", cp); | |
1106 | break; | |
1107 | case CMD_INVALID: { | |
1108 | /* print_bytes(cp, sizeof(*cp), 1, 0); | |
1109 | print_cmd(cp); */ | |
1110 | /* We get CMD_INVALID if you address a non-existent device | |
1111 | * instead of a selection timeout (no response). You will | |
1112 | * see this if you yank out a drive, then try to access it. | |
1113 | * This is kind of a shame because it means that any other | |
1114 | * CMD_INVALID (e.g. driver bug) will get interpreted as a | |
1115 | * missing target. */ | |
1116 | cmd->result = DID_NO_CONNECT << 16; | |
1117 | } | |
1118 | break; | |
1119 | case CMD_PROTOCOL_ERR: | |
1120 | dev_warn(&h->pdev->dev, "cp %p has " | |
1121 | "protocol error \n", cp); | |
1122 | break; | |
1123 | case CMD_HARDWARE_ERR: | |
1124 | cmd->result = DID_ERROR << 16; | |
1125 | dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); | |
1126 | break; | |
1127 | case CMD_CONNECTION_LOST: | |
1128 | cmd->result = DID_ERROR << 16; | |
1129 | dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); | |
1130 | break; | |
1131 | case CMD_ABORTED: | |
1132 | cmd->result = DID_ABORT << 16; | |
1133 | dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", | |
1134 | cp, ei->ScsiStatus); | |
1135 | break; | |
1136 | case CMD_ABORT_FAILED: | |
1137 | cmd->result = DID_ERROR << 16; | |
1138 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); | |
1139 | break; | |
1140 | case CMD_UNSOLICITED_ABORT: | |
5f0325ab | 1141 | cmd->result = DID_RESET << 16; |
edd16368 SC |
1142 | dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " |
1143 | "abort\n", cp); | |
1144 | break; | |
1145 | case CMD_TIMEOUT: | |
1146 | cmd->result = DID_TIME_OUT << 16; | |
1147 | dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); | |
1148 | break; | |
1149 | default: | |
1150 | cmd->result = DID_ERROR << 16; | |
1151 | dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", | |
1152 | cp, ei->CommandStatus); | |
1153 | } | |
1154 | cmd->scsi_done(cmd); | |
1155 | cmd_free(h, cp); | |
1156 | } | |
1157 | ||
1158 | static int hpsa_scsi_detect(struct ctlr_info *h) | |
1159 | { | |
1160 | struct Scsi_Host *sh; | |
1161 | int error; | |
1162 | ||
1163 | sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); | |
1164 | if (sh == NULL) | |
1165 | goto fail; | |
1166 | ||
1167 | sh->io_port = 0; | |
1168 | sh->n_io_port = 0; | |
1169 | sh->this_id = -1; | |
1170 | sh->max_channel = 3; | |
1171 | sh->max_cmd_len = MAX_COMMAND_SIZE; | |
1172 | sh->max_lun = HPSA_MAX_LUN; | |
1173 | sh->max_id = HPSA_MAX_LUN; | |
303932fd DB |
1174 | sh->can_queue = h->nr_cmds; |
1175 | sh->cmd_per_lun = h->nr_cmds; | |
33a2ffce | 1176 | sh->sg_tablesize = h->maxsgentries; |
edd16368 SC |
1177 | h->scsi_host = sh; |
1178 | sh->hostdata[0] = (unsigned long) h; | |
303932fd | 1179 | sh->irq = h->intr[PERF_MODE_INT]; |
edd16368 SC |
1180 | sh->unique_id = sh->irq; |
1181 | error = scsi_add_host(sh, &h->pdev->dev); | |
1182 | if (error) | |
1183 | goto fail_host_put; | |
1184 | scsi_scan_host(sh); | |
1185 | return 0; | |
1186 | ||
1187 | fail_host_put: | |
1188 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" | |
1189 | " failed for controller %d\n", h->ctlr); | |
1190 | scsi_host_put(sh); | |
ecd9aad4 | 1191 | return error; |
edd16368 SC |
1192 | fail: |
1193 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" | |
1194 | " failed for controller %d\n", h->ctlr); | |
ecd9aad4 | 1195 | return -ENOMEM; |
edd16368 SC |
1196 | } |
1197 | ||
1198 | static void hpsa_pci_unmap(struct pci_dev *pdev, | |
1199 | struct CommandList *c, int sg_used, int data_direction) | |
1200 | { | |
1201 | int i; | |
1202 | union u64bit addr64; | |
1203 | ||
1204 | for (i = 0; i < sg_used; i++) { | |
1205 | addr64.val32.lower = c->SG[i].Addr.lower; | |
1206 | addr64.val32.upper = c->SG[i].Addr.upper; | |
1207 | pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, | |
1208 | data_direction); | |
1209 | } | |
1210 | } | |
1211 | ||
1212 | static void hpsa_map_one(struct pci_dev *pdev, | |
1213 | struct CommandList *cp, | |
1214 | unsigned char *buf, | |
1215 | size_t buflen, | |
1216 | int data_direction) | |
1217 | { | |
01a02ffc | 1218 | u64 addr64; |
edd16368 SC |
1219 | |
1220 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { | |
1221 | cp->Header.SGList = 0; | |
1222 | cp->Header.SGTotal = 0; | |
1223 | return; | |
1224 | } | |
1225 | ||
01a02ffc | 1226 | addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); |
edd16368 | 1227 | cp->SG[0].Addr.lower = |
01a02ffc | 1228 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
edd16368 | 1229 | cp->SG[0].Addr.upper = |
01a02ffc | 1230 | (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); |
edd16368 | 1231 | cp->SG[0].Len = buflen; |
01a02ffc SC |
1232 | cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ |
1233 | cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ | |
edd16368 SC |
1234 | } |
1235 | ||
1236 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | |
1237 | struct CommandList *c) | |
1238 | { | |
1239 | DECLARE_COMPLETION_ONSTACK(wait); | |
1240 | ||
1241 | c->waiting = &wait; | |
1242 | enqueue_cmd_and_start_io(h, c); | |
1243 | wait_for_completion(&wait); | |
1244 | } | |
1245 | ||
1246 | static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, | |
1247 | struct CommandList *c, int data_direction) | |
1248 | { | |
1249 | int retry_count = 0; | |
1250 | ||
1251 | do { | |
1252 | memset(c->err_info, 0, sizeof(c->err_info)); | |
1253 | hpsa_scsi_do_simple_cmd_core(h, c); | |
1254 | retry_count++; | |
1255 | } while (check_for_unit_attention(h, c) && retry_count <= 3); | |
1256 | hpsa_pci_unmap(h->pdev, c, 1, data_direction); | |
1257 | } | |
1258 | ||
1259 | static void hpsa_scsi_interpret_error(struct CommandList *cp) | |
1260 | { | |
1261 | struct ErrorInfo *ei; | |
1262 | struct device *d = &cp->h->pdev->dev; | |
1263 | ||
1264 | ei = cp->err_info; | |
1265 | switch (ei->CommandStatus) { | |
1266 | case CMD_TARGET_STATUS: | |
1267 | dev_warn(d, "cmd %p has completed with errors\n", cp); | |
1268 | dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, | |
1269 | ei->ScsiStatus); | |
1270 | if (ei->ScsiStatus == 0) | |
1271 | dev_warn(d, "SCSI status is abnormally zero. " | |
1272 | "(probably indicates selection timeout " | |
1273 | "reported incorrectly due to a known " | |
1274 | "firmware bug, circa July, 2001.)\n"); | |
1275 | break; | |
1276 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | |
1277 | dev_info(d, "UNDERRUN\n"); | |
1278 | break; | |
1279 | case CMD_DATA_OVERRUN: | |
1280 | dev_warn(d, "cp %p has completed with data overrun\n", cp); | |
1281 | break; | |
1282 | case CMD_INVALID: { | |
1283 | /* controller unfortunately reports SCSI passthru's | |
1284 | * to non-existent targets as invalid commands. | |
1285 | */ | |
1286 | dev_warn(d, "cp %p is reported invalid (probably means " | |
1287 | "target device no longer present)\n", cp); | |
1288 | /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); | |
1289 | print_cmd(cp); */ | |
1290 | } | |
1291 | break; | |
1292 | case CMD_PROTOCOL_ERR: | |
1293 | dev_warn(d, "cp %p has protocol error \n", cp); | |
1294 | break; | |
1295 | case CMD_HARDWARE_ERR: | |
1296 | /* cmd->result = DID_ERROR << 16; */ | |
1297 | dev_warn(d, "cp %p had hardware error\n", cp); | |
1298 | break; | |
1299 | case CMD_CONNECTION_LOST: | |
1300 | dev_warn(d, "cp %p had connection lost\n", cp); | |
1301 | break; | |
1302 | case CMD_ABORTED: | |
1303 | dev_warn(d, "cp %p was aborted\n", cp); | |
1304 | break; | |
1305 | case CMD_ABORT_FAILED: | |
1306 | dev_warn(d, "cp %p reports abort failed\n", cp); | |
1307 | break; | |
1308 | case CMD_UNSOLICITED_ABORT: | |
1309 | dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); | |
1310 | break; | |
1311 | case CMD_TIMEOUT: | |
1312 | dev_warn(d, "cp %p timed out\n", cp); | |
1313 | break; | |
1314 | default: | |
1315 | dev_warn(d, "cp %p returned unknown status %x\n", cp, | |
1316 | ei->CommandStatus); | |
1317 | } | |
1318 | } | |
1319 | ||
1320 | static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |
1321 | unsigned char page, unsigned char *buf, | |
1322 | unsigned char bufsize) | |
1323 | { | |
1324 | int rc = IO_OK; | |
1325 | struct CommandList *c; | |
1326 | struct ErrorInfo *ei; | |
1327 | ||
1328 | c = cmd_special_alloc(h); | |
1329 | ||
1330 | if (c == NULL) { /* trouble... */ | |
1331 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
ecd9aad4 | 1332 | return -ENOMEM; |
edd16368 SC |
1333 | } |
1334 | ||
1335 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); | |
1336 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | |
1337 | ei = c->err_info; | |
1338 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
1339 | hpsa_scsi_interpret_error(c); | |
1340 | rc = -1; | |
1341 | } | |
1342 | cmd_special_free(h, c); | |
1343 | return rc; | |
1344 | } | |
1345 | ||
1346 | static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) | |
1347 | { | |
1348 | int rc = IO_OK; | |
1349 | struct CommandList *c; | |
1350 | struct ErrorInfo *ei; | |
1351 | ||
1352 | c = cmd_special_alloc(h); | |
1353 | ||
1354 | if (c == NULL) { /* trouble... */ | |
1355 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
e9ea04a6 | 1356 | return -ENOMEM; |
edd16368 SC |
1357 | } |
1358 | ||
1359 | fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); | |
1360 | hpsa_scsi_do_simple_cmd_core(h, c); | |
1361 | /* no unmap needed here because no data xfer. */ | |
1362 | ||
1363 | ei = c->err_info; | |
1364 | if (ei->CommandStatus != 0) { | |
1365 | hpsa_scsi_interpret_error(c); | |
1366 | rc = -1; | |
1367 | } | |
1368 | cmd_special_free(h, c); | |
1369 | return rc; | |
1370 | } | |
1371 | ||
1372 | static void hpsa_get_raid_level(struct ctlr_info *h, | |
1373 | unsigned char *scsi3addr, unsigned char *raid_level) | |
1374 | { | |
1375 | int rc; | |
1376 | unsigned char *buf; | |
1377 | ||
1378 | *raid_level = RAID_UNKNOWN; | |
1379 | buf = kzalloc(64, GFP_KERNEL); | |
1380 | if (!buf) | |
1381 | return; | |
1382 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); | |
1383 | if (rc == 0) | |
1384 | *raid_level = buf[8]; | |
1385 | if (*raid_level > RAID_UNKNOWN) | |
1386 | *raid_level = RAID_UNKNOWN; | |
1387 | kfree(buf); | |
1388 | return; | |
1389 | } | |
1390 | ||
1391 | /* Get the device id from inquiry page 0x83 */ | |
1392 | static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, | |
1393 | unsigned char *device_id, int buflen) | |
1394 | { | |
1395 | int rc; | |
1396 | unsigned char *buf; | |
1397 | ||
1398 | if (buflen > 16) | |
1399 | buflen = 16; | |
1400 | buf = kzalloc(64, GFP_KERNEL); | |
1401 | if (!buf) | |
1402 | return -1; | |
1403 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); | |
1404 | if (rc == 0) | |
1405 | memcpy(device_id, &buf[8], buflen); | |
1406 | kfree(buf); | |
1407 | return rc != 0; | |
1408 | } | |
1409 | ||
1410 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |
1411 | struct ReportLUNdata *buf, int bufsize, | |
1412 | int extended_response) | |
1413 | { | |
1414 | int rc = IO_OK; | |
1415 | struct CommandList *c; | |
1416 | unsigned char scsi3addr[8]; | |
1417 | struct ErrorInfo *ei; | |
1418 | ||
1419 | c = cmd_special_alloc(h); | |
1420 | if (c == NULL) { /* trouble... */ | |
1421 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
1422 | return -1; | |
1423 | } | |
e89c0ae7 SC |
1424 | /* address the controller */ |
1425 | memset(scsi3addr, 0, sizeof(scsi3addr)); | |
edd16368 SC |
1426 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
1427 | buf, bufsize, 0, scsi3addr, TYPE_CMD); | |
1428 | if (extended_response) | |
1429 | c->Request.CDB[1] = extended_response; | |
1430 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | |
1431 | ei = c->err_info; | |
1432 | if (ei->CommandStatus != 0 && | |
1433 | ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
1434 | hpsa_scsi_interpret_error(c); | |
1435 | rc = -1; | |
1436 | } | |
1437 | cmd_special_free(h, c); | |
1438 | return rc; | |
1439 | } | |
1440 | ||
1441 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, | |
1442 | struct ReportLUNdata *buf, | |
1443 | int bufsize, int extended_response) | |
1444 | { | |
1445 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); | |
1446 | } | |
1447 | ||
1448 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, | |
1449 | struct ReportLUNdata *buf, int bufsize) | |
1450 | { | |
1451 | return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); | |
1452 | } | |
1453 | ||
1454 | static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, | |
1455 | int bus, int target, int lun) | |
1456 | { | |
1457 | device->bus = bus; | |
1458 | device->target = target; | |
1459 | device->lun = lun; | |
1460 | } | |
1461 | ||
1462 | static int hpsa_update_device_info(struct ctlr_info *h, | |
1463 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) | |
1464 | { | |
1465 | #define OBDR_TAPE_INQ_SIZE 49 | |
ea6d3bc3 | 1466 | unsigned char *inq_buff; |
edd16368 | 1467 | |
ea6d3bc3 | 1468 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
edd16368 SC |
1469 | if (!inq_buff) |
1470 | goto bail_out; | |
1471 | ||
edd16368 SC |
1472 | /* Do an inquiry to the device to see what it is. */ |
1473 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, | |
1474 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { | |
1475 | /* Inquiry failed (msg printed already) */ | |
1476 | dev_err(&h->pdev->dev, | |
1477 | "hpsa_update_device_info: inquiry failed\n"); | |
1478 | goto bail_out; | |
1479 | } | |
1480 | ||
edd16368 SC |
1481 | this_device->devtype = (inq_buff[0] & 0x1f); |
1482 | memcpy(this_device->scsi3addr, scsi3addr, 8); | |
1483 | memcpy(this_device->vendor, &inq_buff[8], | |
1484 | sizeof(this_device->vendor)); | |
1485 | memcpy(this_device->model, &inq_buff[16], | |
1486 | sizeof(this_device->model)); | |
1487 | memcpy(this_device->revision, &inq_buff[32], | |
1488 | sizeof(this_device->revision)); | |
1489 | memset(this_device->device_id, 0, | |
1490 | sizeof(this_device->device_id)); | |
1491 | hpsa_get_device_id(h, scsi3addr, this_device->device_id, | |
1492 | sizeof(this_device->device_id)); | |
1493 | ||
1494 | if (this_device->devtype == TYPE_DISK && | |
1495 | is_logical_dev_addr_mode(scsi3addr)) | |
1496 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); | |
1497 | else | |
1498 | this_device->raid_level = RAID_UNKNOWN; | |
1499 | ||
1500 | kfree(inq_buff); | |
1501 | return 0; | |
1502 | ||
1503 | bail_out: | |
1504 | kfree(inq_buff); | |
1505 | return 1; | |
1506 | } | |
1507 | ||
1508 | static unsigned char *msa2xxx_model[] = { | |
1509 | "MSA2012", | |
1510 | "MSA2024", | |
1511 | "MSA2312", | |
1512 | "MSA2324", | |
1513 | NULL, | |
1514 | }; | |
1515 | ||
1516 | static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) | |
1517 | { | |
1518 | int i; | |
1519 | ||
1520 | for (i = 0; msa2xxx_model[i]; i++) | |
1521 | if (strncmp(device->model, msa2xxx_model[i], | |
1522 | strlen(msa2xxx_model[i])) == 0) | |
1523 | return 1; | |
1524 | return 0; | |
1525 | } | |
1526 | ||
1527 | /* Helper function to assign bus, target, lun mapping of devices. | |
1528 | * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical | |
1529 | * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. | |
1530 | * Logical drive target and lun are assigned at this time, but | |
1531 | * physical device lun and target assignment are deferred (assigned | |
1532 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) | |
1533 | */ | |
1534 | static void figure_bus_target_lun(struct ctlr_info *h, | |
01a02ffc | 1535 | u8 *lunaddrbytes, int *bus, int *target, int *lun, |
edd16368 SC |
1536 | struct hpsa_scsi_dev_t *device) |
1537 | { | |
01a02ffc | 1538 | u32 lunid; |
edd16368 SC |
1539 | |
1540 | if (is_logical_dev_addr_mode(lunaddrbytes)) { | |
1541 | /* logical device */ | |
339b2b14 SC |
1542 | if (unlikely(is_scsi_rev_5(h))) { |
1543 | /* p1210m, logical drives lun assignments | |
1544 | * match SCSI REPORT LUNS data. | |
1545 | */ | |
1546 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); | |
edd16368 | 1547 | *bus = 0; |
339b2b14 SC |
1548 | *target = 0; |
1549 | *lun = (lunid & 0x3fff) + 1; | |
1550 | } else { | |
1551 | /* not p1210m... */ | |
1552 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); | |
1553 | if (is_msa2xxx(h, device)) { | |
1554 | /* msa2xxx way, put logicals on bus 1 | |
1555 | * and match target/lun numbers box | |
1556 | * reports. | |
1557 | */ | |
1558 | *bus = 1; | |
1559 | *target = (lunid >> 16) & 0x3fff; | |
1560 | *lun = lunid & 0x00ff; | |
1561 | } else { | |
1562 | /* Traditional smart array way. */ | |
1563 | *bus = 0; | |
1564 | *lun = 0; | |
1565 | *target = lunid & 0x3fff; | |
1566 | } | |
edd16368 SC |
1567 | } |
1568 | } else { | |
1569 | /* physical device */ | |
1570 | if (is_hba_lunid(lunaddrbytes)) | |
339b2b14 SC |
1571 | if (unlikely(is_scsi_rev_5(h))) { |
1572 | *bus = 0; /* put p1210m ctlr at 0,0,0 */ | |
1573 | *target = 0; | |
1574 | *lun = 0; | |
1575 | return; | |
1576 | } else | |
1577 | *bus = 3; /* traditional smartarray */ | |
edd16368 | 1578 | else |
339b2b14 | 1579 | *bus = 2; /* physical disk */ |
edd16368 SC |
1580 | *target = -1; |
1581 | *lun = -1; /* we will fill these in later. */ | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | /* | |
1586 | * If there is no lun 0 on a target, linux won't find any devices. | |
1587 | * For the MSA2xxx boxes, we have to manually detect the enclosure | |
1588 | * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report | |
1589 | * it for some reason. *tmpdevice is the target we're adding, | |
1590 | * this_device is a pointer into the current element of currentsd[] | |
1591 | * that we're building up in update_scsi_devices(), below. | |
1592 | * lunzerobits is a bitmap that tracks which targets already have a | |
1593 | * lun 0 assigned. | |
1594 | * Returns 1 if an enclosure was added, 0 if not. | |
1595 | */ | |
1596 | static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |
1597 | struct hpsa_scsi_dev_t *tmpdevice, | |
01a02ffc | 1598 | struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, |
edd16368 SC |
1599 | int bus, int target, int lun, unsigned long lunzerobits[], |
1600 | int *nmsa2xxx_enclosures) | |
1601 | { | |
1602 | unsigned char scsi3addr[8]; | |
1603 | ||
1604 | if (test_bit(target, lunzerobits)) | |
1605 | return 0; /* There is already a lun 0 on this target. */ | |
1606 | ||
1607 | if (!is_logical_dev_addr_mode(lunaddrbytes)) | |
1608 | return 0; /* It's the logical targets that may lack lun 0. */ | |
1609 | ||
1610 | if (!is_msa2xxx(h, tmpdevice)) | |
1611 | return 0; /* It's only the MSA2xxx that have this problem. */ | |
1612 | ||
1613 | if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ | |
1614 | return 0; | |
1615 | ||
1616 | if (is_hba_lunid(scsi3addr)) | |
1617 | return 0; /* Don't add the RAID controller here. */ | |
1618 | ||
339b2b14 SC |
1619 | if (is_scsi_rev_5(h)) |
1620 | return 0; /* p1210m doesn't need to do this. */ | |
1621 | ||
edd16368 SC |
1622 | #define MAX_MSA2XXX_ENCLOSURES 32 |
1623 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { | |
1624 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " | |
1625 | "enclosures exceeded. Check your hardware " | |
1626 | "configuration."); | |
1627 | return 0; | |
1628 | } | |
1629 | ||
1630 | memset(scsi3addr, 0, 8); | |
1631 | scsi3addr[3] = target; | |
1632 | if (hpsa_update_device_info(h, scsi3addr, this_device)) | |
1633 | return 0; | |
1634 | (*nmsa2xxx_enclosures)++; | |
1635 | hpsa_set_bus_target_lun(this_device, bus, target, 0); | |
1636 | set_bit(target, lunzerobits); | |
1637 | return 1; | |
1638 | } | |
1639 | ||
1640 | /* | |
1641 | * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, | |
1642 | * logdev. The number of luns in physdev and logdev are returned in | |
1643 | * *nphysicals and *nlogicals, respectively. | |
1644 | * Returns 0 on success, -1 otherwise. | |
1645 | */ | |
1646 | static int hpsa_gather_lun_info(struct ctlr_info *h, | |
1647 | int reportlunsize, | |
01a02ffc SC |
1648 | struct ReportLUNdata *physdev, u32 *nphysicals, |
1649 | struct ReportLUNdata *logdev, u32 *nlogicals) | |
edd16368 SC |
1650 | { |
1651 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { | |
1652 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); | |
1653 | return -1; | |
1654 | } | |
6df1e954 | 1655 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; |
edd16368 SC |
1656 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
1657 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." | |
1658 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | |
1659 | *nphysicals - HPSA_MAX_PHYS_LUN); | |
1660 | *nphysicals = HPSA_MAX_PHYS_LUN; | |
1661 | } | |
1662 | if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { | |
1663 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); | |
1664 | return -1; | |
1665 | } | |
6df1e954 | 1666 | *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; |
edd16368 SC |
1667 | /* Reject Logicals in excess of our max capability. */ |
1668 | if (*nlogicals > HPSA_MAX_LUN) { | |
1669 | dev_warn(&h->pdev->dev, | |
1670 | "maximum logical LUNs (%d) exceeded. " | |
1671 | "%d LUNs ignored.\n", HPSA_MAX_LUN, | |
1672 | *nlogicals - HPSA_MAX_LUN); | |
1673 | *nlogicals = HPSA_MAX_LUN; | |
1674 | } | |
1675 | if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { | |
1676 | dev_warn(&h->pdev->dev, | |
1677 | "maximum logical + physical LUNs (%d) exceeded. " | |
1678 | "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | |
1679 | *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); | |
1680 | *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; | |
1681 | } | |
1682 | return 0; | |
1683 | } | |
1684 | ||
339b2b14 SC |
1685 | u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, |
1686 | int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, | |
1687 | struct ReportLUNdata *logdev_list) | |
1688 | { | |
1689 | /* Helper function, figure out where the LUN ID info is coming from | |
1690 | * given index i, lists of physical and logical devices, where in | |
1691 | * the list the raid controller is supposed to appear (first or last) | |
1692 | */ | |
1693 | ||
1694 | int logicals_start = nphysicals + (raid_ctlr_position == 0); | |
1695 | int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); | |
1696 | ||
1697 | if (i == raid_ctlr_position) | |
1698 | return RAID_CTLR_LUNID; | |
1699 | ||
1700 | if (i < logicals_start) | |
1701 | return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; | |
1702 | ||
1703 | if (i < last_device) | |
1704 | return &logdev_list->LUN[i - nphysicals - | |
1705 | (raid_ctlr_position == 0)][0]; | |
1706 | BUG(); | |
1707 | return NULL; | |
1708 | } | |
1709 | ||
edd16368 SC |
1710 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
1711 | { | |
1712 | /* the idea here is we could get notified | |
1713 | * that some devices have changed, so we do a report | |
1714 | * physical luns and report logical luns cmd, and adjust | |
1715 | * our list of devices accordingly. | |
1716 | * | |
1717 | * The scsi3addr's of devices won't change so long as the | |
1718 | * adapter is not reset. That means we can rescan and | |
1719 | * tell which devices we already know about, vs. new | |
1720 | * devices, vs. disappearing devices. | |
1721 | */ | |
1722 | struct ReportLUNdata *physdev_list = NULL; | |
1723 | struct ReportLUNdata *logdev_list = NULL; | |
1724 | unsigned char *inq_buff = NULL; | |
01a02ffc SC |
1725 | u32 nphysicals = 0; |
1726 | u32 nlogicals = 0; | |
1727 | u32 ndev_allocated = 0; | |
edd16368 SC |
1728 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
1729 | int ncurrent = 0; | |
1730 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; | |
1731 | int i, nmsa2xxx_enclosures, ndevs_to_allocate; | |
1732 | int bus, target, lun; | |
339b2b14 | 1733 | int raid_ctlr_position; |
edd16368 SC |
1734 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); |
1735 | ||
1736 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, | |
1737 | GFP_KERNEL); | |
1738 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); | |
1739 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); | |
1740 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); | |
1741 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); | |
1742 | ||
1743 | if (!currentsd || !physdev_list || !logdev_list || | |
1744 | !inq_buff || !tmpdevice) { | |
1745 | dev_err(&h->pdev->dev, "out of memory\n"); | |
1746 | goto out; | |
1747 | } | |
1748 | memset(lunzerobits, 0, sizeof(lunzerobits)); | |
1749 | ||
1750 | if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, | |
1751 | logdev_list, &nlogicals)) | |
1752 | goto out; | |
1753 | ||
1754 | /* We might see up to 32 MSA2xxx enclosures, actually 8 of them | |
1755 | * but each of them 4 times through different paths. The plus 1 | |
1756 | * is for the RAID controller. | |
1757 | */ | |
1758 | ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; | |
1759 | ||
1760 | /* Allocate the per device structures */ | |
1761 | for (i = 0; i < ndevs_to_allocate; i++) { | |
1762 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); | |
1763 | if (!currentsd[i]) { | |
1764 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", | |
1765 | __FILE__, __LINE__); | |
1766 | goto out; | |
1767 | } | |
1768 | ndev_allocated++; | |
1769 | } | |
1770 | ||
339b2b14 SC |
1771 | if (unlikely(is_scsi_rev_5(h))) |
1772 | raid_ctlr_position = 0; | |
1773 | else | |
1774 | raid_ctlr_position = nphysicals + nlogicals; | |
1775 | ||
edd16368 SC |
1776 | /* adjust our table of devices */ |
1777 | nmsa2xxx_enclosures = 0; | |
1778 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { | |
01a02ffc | 1779 | u8 *lunaddrbytes; |
edd16368 SC |
1780 | |
1781 | /* Figure out where the LUN ID info is coming from */ | |
339b2b14 SC |
1782 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
1783 | i, nphysicals, nlogicals, physdev_list, logdev_list); | |
edd16368 | 1784 | /* skip masked physical devices. */ |
339b2b14 SC |
1785 | if (lunaddrbytes[3] & 0xC0 && |
1786 | i < nphysicals + (raid_ctlr_position == 0)) | |
edd16368 SC |
1787 | continue; |
1788 | ||
1789 | /* Get device type, vendor, model, device id */ | |
1790 | if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) | |
1791 | continue; /* skip it if we can't talk to it. */ | |
1792 | figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, | |
1793 | tmpdevice); | |
1794 | this_device = currentsd[ncurrent]; | |
1795 | ||
1796 | /* | |
1797 | * For the msa2xxx boxes, we have to insert a LUN 0 which | |
1798 | * doesn't show up in CCISS_REPORT_PHYSICAL data, but there | |
1799 | * is nonetheless an enclosure device there. We have to | |
1800 | * present that otherwise linux won't find anything if | |
1801 | * there is no lun 0. | |
1802 | */ | |
1803 | if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, | |
1804 | lunaddrbytes, bus, target, lun, lunzerobits, | |
1805 | &nmsa2xxx_enclosures)) { | |
1806 | ncurrent++; | |
1807 | this_device = currentsd[ncurrent]; | |
1808 | } | |
1809 | ||
1810 | *this_device = *tmpdevice; | |
1811 | hpsa_set_bus_target_lun(this_device, bus, target, lun); | |
1812 | ||
1813 | switch (this_device->devtype) { | |
1814 | case TYPE_ROM: { | |
1815 | /* We don't *really* support actual CD-ROM devices, | |
1816 | * just "One Button Disaster Recovery" tape drive | |
1817 | * which temporarily pretends to be a CD-ROM drive. | |
1818 | * So we check that the device is really an OBDR tape | |
1819 | * device by checking for "$DR-10" in bytes 43-48 of | |
1820 | * the inquiry data. | |
1821 | */ | |
1822 | char obdr_sig[7]; | |
1823 | #define OBDR_TAPE_SIG "$DR-10" | |
1824 | strncpy(obdr_sig, &inq_buff[43], 6); | |
1825 | obdr_sig[6] = '\0'; | |
1826 | if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) | |
1827 | /* Not OBDR device, ignore it. */ | |
1828 | break; | |
1829 | } | |
1830 | ncurrent++; | |
1831 | break; | |
1832 | case TYPE_DISK: | |
1833 | if (i < nphysicals) | |
1834 | break; | |
1835 | ncurrent++; | |
1836 | break; | |
1837 | case TYPE_TAPE: | |
1838 | case TYPE_MEDIUM_CHANGER: | |
1839 | ncurrent++; | |
1840 | break; | |
1841 | case TYPE_RAID: | |
1842 | /* Only present the Smartarray HBA as a RAID controller. | |
1843 | * If it's a RAID controller other than the HBA itself | |
1844 | * (an external RAID controller, MSA500 or similar) | |
1845 | * don't present it. | |
1846 | */ | |
1847 | if (!is_hba_lunid(lunaddrbytes)) | |
1848 | break; | |
1849 | ncurrent++; | |
1850 | break; | |
1851 | default: | |
1852 | break; | |
1853 | } | |
1854 | if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) | |
1855 | break; | |
1856 | } | |
1857 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); | |
1858 | out: | |
1859 | kfree(tmpdevice); | |
1860 | for (i = 0; i < ndev_allocated; i++) | |
1861 | kfree(currentsd[i]); | |
1862 | kfree(currentsd); | |
1863 | kfree(inq_buff); | |
1864 | kfree(physdev_list); | |
1865 | kfree(logdev_list); | |
edd16368 SC |
1866 | } |
1867 | ||
1868 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | |
1869 | * dma mapping and fills in the scatter gather entries of the | |
1870 | * hpsa command, cp. | |
1871 | */ | |
33a2ffce | 1872 | static int hpsa_scatter_gather(struct ctlr_info *h, |
edd16368 SC |
1873 | struct CommandList *cp, |
1874 | struct scsi_cmnd *cmd) | |
1875 | { | |
1876 | unsigned int len; | |
1877 | struct scatterlist *sg; | |
01a02ffc | 1878 | u64 addr64; |
33a2ffce SC |
1879 | int use_sg, i, sg_index, chained; |
1880 | struct SGDescriptor *curr_sg; | |
edd16368 | 1881 | |
33a2ffce | 1882 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
edd16368 SC |
1883 | |
1884 | use_sg = scsi_dma_map(cmd); | |
1885 | if (use_sg < 0) | |
1886 | return use_sg; | |
1887 | ||
1888 | if (!use_sg) | |
1889 | goto sglist_finished; | |
1890 | ||
33a2ffce SC |
1891 | curr_sg = cp->SG; |
1892 | chained = 0; | |
1893 | sg_index = 0; | |
edd16368 | 1894 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
33a2ffce SC |
1895 | if (i == h->max_cmd_sg_entries - 1 && |
1896 | use_sg > h->max_cmd_sg_entries) { | |
1897 | chained = 1; | |
1898 | curr_sg = h->cmd_sg_list[cp->cmdindex]; | |
1899 | sg_index = 0; | |
1900 | } | |
01a02ffc | 1901 | addr64 = (u64) sg_dma_address(sg); |
edd16368 | 1902 | len = sg_dma_len(sg); |
33a2ffce SC |
1903 | curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); |
1904 | curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); | |
1905 | curr_sg->Len = len; | |
1906 | curr_sg->Ext = 0; /* we are not chaining */ | |
1907 | curr_sg++; | |
1908 | } | |
1909 | ||
1910 | if (use_sg + chained > h->maxSG) | |
1911 | h->maxSG = use_sg + chained; | |
1912 | ||
1913 | if (chained) { | |
1914 | cp->Header.SGList = h->max_cmd_sg_entries; | |
1915 | cp->Header.SGTotal = (u16) (use_sg + 1); | |
1916 | hpsa_map_sg_chain_block(h, cp); | |
1917 | return 0; | |
edd16368 SC |
1918 | } |
1919 | ||
1920 | sglist_finished: | |
1921 | ||
01a02ffc SC |
1922 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
1923 | cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ | |
edd16368 SC |
1924 | return 0; |
1925 | } | |
1926 | ||
1927 | ||
f281233d | 1928 | static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, |
edd16368 SC |
1929 | void (*done)(struct scsi_cmnd *)) |
1930 | { | |
1931 | struct ctlr_info *h; | |
1932 | struct hpsa_scsi_dev_t *dev; | |
1933 | unsigned char scsi3addr[8]; | |
1934 | struct CommandList *c; | |
1935 | unsigned long flags; | |
1936 | ||
1937 | /* Get the ptr to our adapter structure out of cmd->host. */ | |
1938 | h = sdev_to_hba(cmd->device); | |
1939 | dev = cmd->device->hostdata; | |
1940 | if (!dev) { | |
1941 | cmd->result = DID_NO_CONNECT << 16; | |
1942 | done(cmd); | |
1943 | return 0; | |
1944 | } | |
1945 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | |
1946 | ||
1947 | /* Need a lock as this is being allocated from the pool */ | |
1948 | spin_lock_irqsave(&h->lock, flags); | |
1949 | c = cmd_alloc(h); | |
1950 | spin_unlock_irqrestore(&h->lock, flags); | |
1951 | if (c == NULL) { /* trouble... */ | |
1952 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); | |
1953 | return SCSI_MLQUEUE_HOST_BUSY; | |
1954 | } | |
1955 | ||
1956 | /* Fill in the command list header */ | |
1957 | ||
1958 | cmd->scsi_done = done; /* save this for use by completion code */ | |
1959 | ||
1960 | /* save c in case we have to abort it */ | |
1961 | cmd->host_scribble = (unsigned char *) c; | |
1962 | ||
1963 | c->cmd_type = CMD_SCSI; | |
1964 | c->scsi_cmd = cmd; | |
1965 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | |
1966 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | |
303932fd DB |
1967 | c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); |
1968 | c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; | |
edd16368 SC |
1969 | |
1970 | /* Fill in the request block... */ | |
1971 | ||
1972 | c->Request.Timeout = 0; | |
1973 | memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); | |
1974 | BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); | |
1975 | c->Request.CDBLen = cmd->cmd_len; | |
1976 | memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); | |
1977 | c->Request.Type.Type = TYPE_CMD; | |
1978 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
1979 | switch (cmd->sc_data_direction) { | |
1980 | case DMA_TO_DEVICE: | |
1981 | c->Request.Type.Direction = XFER_WRITE; | |
1982 | break; | |
1983 | case DMA_FROM_DEVICE: | |
1984 | c->Request.Type.Direction = XFER_READ; | |
1985 | break; | |
1986 | case DMA_NONE: | |
1987 | c->Request.Type.Direction = XFER_NONE; | |
1988 | break; | |
1989 | case DMA_BIDIRECTIONAL: | |
1990 | /* This can happen if a buggy application does a scsi passthru | |
1991 | * and sets both inlen and outlen to non-zero. ( see | |
1992 | * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) | |
1993 | */ | |
1994 | ||
1995 | c->Request.Type.Direction = XFER_RSVD; | |
1996 | /* This is technically wrong, and hpsa controllers should | |
1997 | * reject it with CMD_INVALID, which is the most correct | |
1998 | * response, but non-fibre backends appear to let it | |
1999 | * slide by, and give the same results as if this field | |
2000 | * were set correctly. Either way is acceptable for | |
2001 | * our purposes here. | |
2002 | */ | |
2003 | ||
2004 | break; | |
2005 | ||
2006 | default: | |
2007 | dev_err(&h->pdev->dev, "unknown data direction: %d\n", | |
2008 | cmd->sc_data_direction); | |
2009 | BUG(); | |
2010 | break; | |
2011 | } | |
2012 | ||
33a2ffce | 2013 | if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ |
edd16368 SC |
2014 | cmd_free(h, c); |
2015 | return SCSI_MLQUEUE_HOST_BUSY; | |
2016 | } | |
2017 | enqueue_cmd_and_start_io(h, c); | |
2018 | /* the cmd'll come back via intr handler in complete_scsi_command() */ | |
2019 | return 0; | |
2020 | } | |
2021 | ||
f281233d JG |
2022 | static DEF_SCSI_QCMD(hpsa_scsi_queue_command) |
2023 | ||
a08a8471 SC |
2024 | static void hpsa_scan_start(struct Scsi_Host *sh) |
2025 | { | |
2026 | struct ctlr_info *h = shost_to_hba(sh); | |
2027 | unsigned long flags; | |
2028 | ||
2029 | /* wait until any scan already in progress is finished. */ | |
2030 | while (1) { | |
2031 | spin_lock_irqsave(&h->scan_lock, flags); | |
2032 | if (h->scan_finished) | |
2033 | break; | |
2034 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2035 | wait_event(h->scan_wait_queue, h->scan_finished); | |
2036 | /* Note: We don't need to worry about a race between this | |
2037 | * thread and driver unload because the midlayer will | |
2038 | * have incremented the reference count, so unload won't | |
2039 | * happen if we're in here. | |
2040 | */ | |
2041 | } | |
2042 | h->scan_finished = 0; /* mark scan as in progress */ | |
2043 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2044 | ||
2045 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); | |
2046 | ||
2047 | spin_lock_irqsave(&h->scan_lock, flags); | |
2048 | h->scan_finished = 1; /* mark scan as finished. */ | |
2049 | wake_up_all(&h->scan_wait_queue); | |
2050 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2051 | } | |
2052 | ||
2053 | static int hpsa_scan_finished(struct Scsi_Host *sh, | |
2054 | unsigned long elapsed_time) | |
2055 | { | |
2056 | struct ctlr_info *h = shost_to_hba(sh); | |
2057 | unsigned long flags; | |
2058 | int finished; | |
2059 | ||
2060 | spin_lock_irqsave(&h->scan_lock, flags); | |
2061 | finished = h->scan_finished; | |
2062 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2063 | return finished; | |
2064 | } | |
2065 | ||
667e23d4 SC |
2066 | static int hpsa_change_queue_depth(struct scsi_device *sdev, |
2067 | int qdepth, int reason) | |
2068 | { | |
2069 | struct ctlr_info *h = sdev_to_hba(sdev); | |
2070 | ||
2071 | if (reason != SCSI_QDEPTH_DEFAULT) | |
2072 | return -ENOTSUPP; | |
2073 | ||
2074 | if (qdepth < 1) | |
2075 | qdepth = 1; | |
2076 | else | |
2077 | if (qdepth > h->nr_cmds) | |
2078 | qdepth = h->nr_cmds; | |
2079 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | |
2080 | return sdev->queue_depth; | |
2081 | } | |
2082 | ||
edd16368 SC |
2083 | static void hpsa_unregister_scsi(struct ctlr_info *h) |
2084 | { | |
2085 | /* we are being forcibly unloaded, and may not refuse. */ | |
2086 | scsi_remove_host(h->scsi_host); | |
2087 | scsi_host_put(h->scsi_host); | |
2088 | h->scsi_host = NULL; | |
2089 | } | |
2090 | ||
2091 | static int hpsa_register_scsi(struct ctlr_info *h) | |
2092 | { | |
2093 | int rc; | |
2094 | ||
edd16368 SC |
2095 | rc = hpsa_scsi_detect(h); |
2096 | if (rc != 0) | |
2097 | dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" | |
2098 | " hpsa_scsi_detect(), rc is %d\n", rc); | |
2099 | return rc; | |
2100 | } | |
2101 | ||
2102 | static int wait_for_device_to_become_ready(struct ctlr_info *h, | |
2103 | unsigned char lunaddr[]) | |
2104 | { | |
2105 | int rc = 0; | |
2106 | int count = 0; | |
2107 | int waittime = 1; /* seconds */ | |
2108 | struct CommandList *c; | |
2109 | ||
2110 | c = cmd_special_alloc(h); | |
2111 | if (!c) { | |
2112 | dev_warn(&h->pdev->dev, "out of memory in " | |
2113 | "wait_for_device_to_become_ready.\n"); | |
2114 | return IO_ERROR; | |
2115 | } | |
2116 | ||
2117 | /* Send test unit ready until device ready, or give up. */ | |
2118 | while (count < HPSA_TUR_RETRY_LIMIT) { | |
2119 | ||
2120 | /* Wait for a bit. do this first, because if we send | |
2121 | * the TUR right away, the reset will just abort it. | |
2122 | */ | |
2123 | msleep(1000 * waittime); | |
2124 | count++; | |
2125 | ||
2126 | /* Increase wait time with each try, up to a point. */ | |
2127 | if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) | |
2128 | waittime = waittime * 2; | |
2129 | ||
2130 | /* Send the Test Unit Ready */ | |
2131 | fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); | |
2132 | hpsa_scsi_do_simple_cmd_core(h, c); | |
2133 | /* no unmap needed here because no data xfer. */ | |
2134 | ||
2135 | if (c->err_info->CommandStatus == CMD_SUCCESS) | |
2136 | break; | |
2137 | ||
2138 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && | |
2139 | c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && | |
2140 | (c->err_info->SenseInfo[2] == NO_SENSE || | |
2141 | c->err_info->SenseInfo[2] == UNIT_ATTENTION)) | |
2142 | break; | |
2143 | ||
2144 | dev_warn(&h->pdev->dev, "waiting %d secs " | |
2145 | "for device to become ready.\n", waittime); | |
2146 | rc = 1; /* device not ready. */ | |
2147 | } | |
2148 | ||
2149 | if (rc) | |
2150 | dev_warn(&h->pdev->dev, "giving up on device.\n"); | |
2151 | else | |
2152 | dev_warn(&h->pdev->dev, "device is ready.\n"); | |
2153 | ||
2154 | cmd_special_free(h, c); | |
2155 | return rc; | |
2156 | } | |
2157 | ||
2158 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from | |
2159 | * complaining. Doing a host- or bus-reset can't do anything good here. | |
2160 | */ | |
2161 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |
2162 | { | |
2163 | int rc; | |
2164 | struct ctlr_info *h; | |
2165 | struct hpsa_scsi_dev_t *dev; | |
2166 | ||
2167 | /* find the controller to which the command to be aborted was sent */ | |
2168 | h = sdev_to_hba(scsicmd->device); | |
2169 | if (h == NULL) /* paranoia */ | |
2170 | return FAILED; | |
edd16368 SC |
2171 | dev = scsicmd->device->hostdata; |
2172 | if (!dev) { | |
2173 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " | |
2174 | "device lookup failed.\n"); | |
2175 | return FAILED; | |
2176 | } | |
d416b0c7 SC |
2177 | dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", |
2178 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); | |
edd16368 SC |
2179 | /* send a reset to the SCSI LUN which the command was sent to */ |
2180 | rc = hpsa_send_reset(h, dev->scsi3addr); | |
2181 | if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) | |
2182 | return SUCCESS; | |
2183 | ||
2184 | dev_warn(&h->pdev->dev, "resetting device failed.\n"); | |
2185 | return FAILED; | |
2186 | } | |
2187 | ||
2188 | /* | |
2189 | * For operations that cannot sleep, a command block is allocated at init, | |
2190 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track | |
2191 | * which ones are free or in use. Lock must be held when calling this. | |
2192 | * cmd_free() is the complement. | |
2193 | */ | |
2194 | static struct CommandList *cmd_alloc(struct ctlr_info *h) | |
2195 | { | |
2196 | struct CommandList *c; | |
2197 | int i; | |
2198 | union u64bit temp64; | |
2199 | dma_addr_t cmd_dma_handle, err_dma_handle; | |
2200 | ||
2201 | do { | |
2202 | i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); | |
2203 | if (i == h->nr_cmds) | |
2204 | return NULL; | |
2205 | } while (test_and_set_bit | |
2206 | (i & (BITS_PER_LONG - 1), | |
2207 | h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); | |
2208 | c = h->cmd_pool + i; | |
2209 | memset(c, 0, sizeof(*c)); | |
2210 | cmd_dma_handle = h->cmd_pool_dhandle | |
2211 | + i * sizeof(*c); | |
2212 | c->err_info = h->errinfo_pool + i; | |
2213 | memset(c->err_info, 0, sizeof(*c->err_info)); | |
2214 | err_dma_handle = h->errinfo_pool_dhandle | |
2215 | + i * sizeof(*c->err_info); | |
2216 | h->nr_allocs++; | |
2217 | ||
2218 | c->cmdindex = i; | |
2219 | ||
2220 | INIT_HLIST_NODE(&c->list); | |
01a02ffc SC |
2221 | c->busaddr = (u32) cmd_dma_handle; |
2222 | temp64.val = (u64) err_dma_handle; | |
edd16368 SC |
2223 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2224 | c->ErrDesc.Addr.upper = temp64.val32.upper; | |
2225 | c->ErrDesc.Len = sizeof(*c->err_info); | |
2226 | ||
2227 | c->h = h; | |
2228 | return c; | |
2229 | } | |
2230 | ||
2231 | /* For operations that can wait for kmalloc to possibly sleep, | |
2232 | * this routine can be called. Lock need not be held to call | |
2233 | * cmd_special_alloc. cmd_special_free() is the complement. | |
2234 | */ | |
2235 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | |
2236 | { | |
2237 | struct CommandList *c; | |
2238 | union u64bit temp64; | |
2239 | dma_addr_t cmd_dma_handle, err_dma_handle; | |
2240 | ||
2241 | c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); | |
2242 | if (c == NULL) | |
2243 | return NULL; | |
2244 | memset(c, 0, sizeof(*c)); | |
2245 | ||
2246 | c->cmdindex = -1; | |
2247 | ||
2248 | c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), | |
2249 | &err_dma_handle); | |
2250 | ||
2251 | if (c->err_info == NULL) { | |
2252 | pci_free_consistent(h->pdev, | |
2253 | sizeof(*c), c, cmd_dma_handle); | |
2254 | return NULL; | |
2255 | } | |
2256 | memset(c->err_info, 0, sizeof(*c->err_info)); | |
2257 | ||
2258 | INIT_HLIST_NODE(&c->list); | |
01a02ffc SC |
2259 | c->busaddr = (u32) cmd_dma_handle; |
2260 | temp64.val = (u64) err_dma_handle; | |
edd16368 SC |
2261 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2262 | c->ErrDesc.Addr.upper = temp64.val32.upper; | |
2263 | c->ErrDesc.Len = sizeof(*c->err_info); | |
2264 | ||
2265 | c->h = h; | |
2266 | return c; | |
2267 | } | |
2268 | ||
2269 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) | |
2270 | { | |
2271 | int i; | |
2272 | ||
2273 | i = c - h->cmd_pool; | |
2274 | clear_bit(i & (BITS_PER_LONG - 1), | |
2275 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | |
2276 | h->nr_frees++; | |
2277 | } | |
2278 | ||
2279 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) | |
2280 | { | |
2281 | union u64bit temp64; | |
2282 | ||
2283 | temp64.val32.lower = c->ErrDesc.Addr.lower; | |
2284 | temp64.val32.upper = c->ErrDesc.Addr.upper; | |
2285 | pci_free_consistent(h->pdev, sizeof(*c->err_info), | |
2286 | c->err_info, (dma_addr_t) temp64.val); | |
2287 | pci_free_consistent(h->pdev, sizeof(*c), | |
2288 | c, (dma_addr_t) c->busaddr); | |
2289 | } | |
2290 | ||
2291 | #ifdef CONFIG_COMPAT | |
2292 | ||
edd16368 SC |
2293 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) |
2294 | { | |
2295 | IOCTL32_Command_struct __user *arg32 = | |
2296 | (IOCTL32_Command_struct __user *) arg; | |
2297 | IOCTL_Command_struct arg64; | |
2298 | IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); | |
2299 | int err; | |
2300 | u32 cp; | |
2301 | ||
2302 | err = 0; | |
2303 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, | |
2304 | sizeof(arg64.LUN_info)); | |
2305 | err |= copy_from_user(&arg64.Request, &arg32->Request, | |
2306 | sizeof(arg64.Request)); | |
2307 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, | |
2308 | sizeof(arg64.error_info)); | |
2309 | err |= get_user(arg64.buf_size, &arg32->buf_size); | |
2310 | err |= get_user(cp, &arg32->buf); | |
2311 | arg64.buf = compat_ptr(cp); | |
2312 | err |= copy_to_user(p, &arg64, sizeof(arg64)); | |
2313 | ||
2314 | if (err) | |
2315 | return -EFAULT; | |
2316 | ||
e39eeaed | 2317 | err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); |
edd16368 SC |
2318 | if (err) |
2319 | return err; | |
2320 | err |= copy_in_user(&arg32->error_info, &p->error_info, | |
2321 | sizeof(arg32->error_info)); | |
2322 | if (err) | |
2323 | return -EFAULT; | |
2324 | return err; | |
2325 | } | |
2326 | ||
2327 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | |
2328 | int cmd, void *arg) | |
2329 | { | |
2330 | BIG_IOCTL32_Command_struct __user *arg32 = | |
2331 | (BIG_IOCTL32_Command_struct __user *) arg; | |
2332 | BIG_IOCTL_Command_struct arg64; | |
2333 | BIG_IOCTL_Command_struct __user *p = | |
2334 | compat_alloc_user_space(sizeof(arg64)); | |
2335 | int err; | |
2336 | u32 cp; | |
2337 | ||
2338 | err = 0; | |
2339 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, | |
2340 | sizeof(arg64.LUN_info)); | |
2341 | err |= copy_from_user(&arg64.Request, &arg32->Request, | |
2342 | sizeof(arg64.Request)); | |
2343 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, | |
2344 | sizeof(arg64.error_info)); | |
2345 | err |= get_user(arg64.buf_size, &arg32->buf_size); | |
2346 | err |= get_user(arg64.malloc_size, &arg32->malloc_size); | |
2347 | err |= get_user(cp, &arg32->buf); | |
2348 | arg64.buf = compat_ptr(cp); | |
2349 | err |= copy_to_user(p, &arg64, sizeof(arg64)); | |
2350 | ||
2351 | if (err) | |
2352 | return -EFAULT; | |
2353 | ||
e39eeaed | 2354 | err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); |
edd16368 SC |
2355 | if (err) |
2356 | return err; | |
2357 | err |= copy_in_user(&arg32->error_info, &p->error_info, | |
2358 | sizeof(arg32->error_info)); | |
2359 | if (err) | |
2360 | return -EFAULT; | |
2361 | return err; | |
2362 | } | |
71fe75a7 SC |
2363 | |
2364 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) | |
2365 | { | |
2366 | switch (cmd) { | |
2367 | case CCISS_GETPCIINFO: | |
2368 | case CCISS_GETINTINFO: | |
2369 | case CCISS_SETINTINFO: | |
2370 | case CCISS_GETNODENAME: | |
2371 | case CCISS_SETNODENAME: | |
2372 | case CCISS_GETHEARTBEAT: | |
2373 | case CCISS_GETBUSTYPES: | |
2374 | case CCISS_GETFIRMVER: | |
2375 | case CCISS_GETDRIVVER: | |
2376 | case CCISS_REVALIDVOLS: | |
2377 | case CCISS_DEREGDISK: | |
2378 | case CCISS_REGNEWDISK: | |
2379 | case CCISS_REGNEWD: | |
2380 | case CCISS_RESCANDISK: | |
2381 | case CCISS_GETLUNINFO: | |
2382 | return hpsa_ioctl(dev, cmd, arg); | |
2383 | ||
2384 | case CCISS_PASSTHRU32: | |
2385 | return hpsa_ioctl32_passthru(dev, cmd, arg); | |
2386 | case CCISS_BIG_PASSTHRU32: | |
2387 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); | |
2388 | ||
2389 | default: | |
2390 | return -ENOIOCTLCMD; | |
2391 | } | |
2392 | } | |
edd16368 SC |
2393 | #endif |
2394 | ||
2395 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) | |
2396 | { | |
2397 | struct hpsa_pci_info pciinfo; | |
2398 | ||
2399 | if (!argp) | |
2400 | return -EINVAL; | |
2401 | pciinfo.domain = pci_domain_nr(h->pdev->bus); | |
2402 | pciinfo.bus = h->pdev->bus->number; | |
2403 | pciinfo.dev_fn = h->pdev->devfn; | |
2404 | pciinfo.board_id = h->board_id; | |
2405 | if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) | |
2406 | return -EFAULT; | |
2407 | return 0; | |
2408 | } | |
2409 | ||
2410 | static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) | |
2411 | { | |
2412 | DriverVer_type DriverVer; | |
2413 | unsigned char vmaj, vmin, vsubmin; | |
2414 | int rc; | |
2415 | ||
2416 | rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", | |
2417 | &vmaj, &vmin, &vsubmin); | |
2418 | if (rc != 3) { | |
2419 | dev_info(&h->pdev->dev, "driver version string '%s' " | |
2420 | "unrecognized.", HPSA_DRIVER_VERSION); | |
2421 | vmaj = 0; | |
2422 | vmin = 0; | |
2423 | vsubmin = 0; | |
2424 | } | |
2425 | DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; | |
2426 | if (!argp) | |
2427 | return -EINVAL; | |
2428 | if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) | |
2429 | return -EFAULT; | |
2430 | return 0; | |
2431 | } | |
2432 | ||
2433 | static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |
2434 | { | |
2435 | IOCTL_Command_struct iocommand; | |
2436 | struct CommandList *c; | |
2437 | char *buff = NULL; | |
2438 | union u64bit temp64; | |
2439 | ||
2440 | if (!argp) | |
2441 | return -EINVAL; | |
2442 | if (!capable(CAP_SYS_RAWIO)) | |
2443 | return -EPERM; | |
2444 | if (copy_from_user(&iocommand, argp, sizeof(iocommand))) | |
2445 | return -EFAULT; | |
2446 | if ((iocommand.buf_size < 1) && | |
2447 | (iocommand.Request.Type.Direction != XFER_NONE)) { | |
2448 | return -EINVAL; | |
2449 | } | |
2450 | if (iocommand.buf_size > 0) { | |
2451 | buff = kmalloc(iocommand.buf_size, GFP_KERNEL); | |
2452 | if (buff == NULL) | |
2453 | return -EFAULT; | |
2454 | } | |
2455 | if (iocommand.Request.Type.Direction == XFER_WRITE) { | |
2456 | /* Copy the data into the buffer we created */ | |
2457 | if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { | |
2458 | kfree(buff); | |
2459 | return -EFAULT; | |
2460 | } | |
2461 | } else | |
2462 | memset(buff, 0, iocommand.buf_size); | |
2463 | c = cmd_special_alloc(h); | |
2464 | if (c == NULL) { | |
2465 | kfree(buff); | |
2466 | return -ENOMEM; | |
2467 | } | |
2468 | /* Fill in the command type */ | |
2469 | c->cmd_type = CMD_IOCTL_PEND; | |
2470 | /* Fill in Command Header */ | |
2471 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | |
2472 | if (iocommand.buf_size > 0) { /* buffer to fill */ | |
2473 | c->Header.SGList = 1; | |
2474 | c->Header.SGTotal = 1; | |
2475 | } else { /* no buffers to fill */ | |
2476 | c->Header.SGList = 0; | |
2477 | c->Header.SGTotal = 0; | |
2478 | } | |
2479 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); | |
2480 | /* use the kernel address the cmd block for tag */ | |
2481 | c->Header.Tag.lower = c->busaddr; | |
2482 | ||
2483 | /* Fill in Request block */ | |
2484 | memcpy(&c->Request, &iocommand.Request, | |
2485 | sizeof(c->Request)); | |
2486 | ||
2487 | /* Fill in the scatter gather information */ | |
2488 | if (iocommand.buf_size > 0) { | |
2489 | temp64.val = pci_map_single(h->pdev, buff, | |
2490 | iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); | |
2491 | c->SG[0].Addr.lower = temp64.val32.lower; | |
2492 | c->SG[0].Addr.upper = temp64.val32.upper; | |
2493 | c->SG[0].Len = iocommand.buf_size; | |
2494 | c->SG[0].Ext = 0; /* we are not chaining*/ | |
2495 | } | |
2496 | hpsa_scsi_do_simple_cmd_core(h, c); | |
2497 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); | |
2498 | check_ioctl_unit_attention(h, c); | |
2499 | ||
2500 | /* Copy the error information out */ | |
2501 | memcpy(&iocommand.error_info, c->err_info, | |
2502 | sizeof(iocommand.error_info)); | |
2503 | if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { | |
2504 | kfree(buff); | |
2505 | cmd_special_free(h, c); | |
2506 | return -EFAULT; | |
2507 | } | |
2508 | ||
2509 | if (iocommand.Request.Type.Direction == XFER_READ) { | |
2510 | /* Copy the data out of the buffer we created */ | |
2511 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { | |
2512 | kfree(buff); | |
2513 | cmd_special_free(h, c); | |
2514 | return -EFAULT; | |
2515 | } | |
2516 | } | |
2517 | kfree(buff); | |
2518 | cmd_special_free(h, c); | |
2519 | return 0; | |
2520 | } | |
2521 | ||
2522 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |
2523 | { | |
2524 | BIG_IOCTL_Command_struct *ioc; | |
2525 | struct CommandList *c; | |
2526 | unsigned char **buff = NULL; | |
2527 | int *buff_size = NULL; | |
2528 | union u64bit temp64; | |
2529 | BYTE sg_used = 0; | |
2530 | int status = 0; | |
2531 | int i; | |
01a02ffc SC |
2532 | u32 left; |
2533 | u32 sz; | |
edd16368 SC |
2534 | BYTE __user *data_ptr; |
2535 | ||
2536 | if (!argp) | |
2537 | return -EINVAL; | |
2538 | if (!capable(CAP_SYS_RAWIO)) | |
2539 | return -EPERM; | |
2540 | ioc = (BIG_IOCTL_Command_struct *) | |
2541 | kmalloc(sizeof(*ioc), GFP_KERNEL); | |
2542 | if (!ioc) { | |
2543 | status = -ENOMEM; | |
2544 | goto cleanup1; | |
2545 | } | |
2546 | if (copy_from_user(ioc, argp, sizeof(*ioc))) { | |
2547 | status = -EFAULT; | |
2548 | goto cleanup1; | |
2549 | } | |
2550 | if ((ioc->buf_size < 1) && | |
2551 | (ioc->Request.Type.Direction != XFER_NONE)) { | |
2552 | status = -EINVAL; | |
2553 | goto cleanup1; | |
2554 | } | |
2555 | /* Check kmalloc limits using all SGs */ | |
2556 | if (ioc->malloc_size > MAX_KMALLOC_SIZE) { | |
2557 | status = -EINVAL; | |
2558 | goto cleanup1; | |
2559 | } | |
2560 | if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { | |
2561 | status = -EINVAL; | |
2562 | goto cleanup1; | |
2563 | } | |
2564 | buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); | |
2565 | if (!buff) { | |
2566 | status = -ENOMEM; | |
2567 | goto cleanup1; | |
2568 | } | |
2569 | buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); | |
2570 | if (!buff_size) { | |
2571 | status = -ENOMEM; | |
2572 | goto cleanup1; | |
2573 | } | |
2574 | left = ioc->buf_size; | |
2575 | data_ptr = ioc->buf; | |
2576 | while (left) { | |
2577 | sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; | |
2578 | buff_size[sg_used] = sz; | |
2579 | buff[sg_used] = kmalloc(sz, GFP_KERNEL); | |
2580 | if (buff[sg_used] == NULL) { | |
2581 | status = -ENOMEM; | |
2582 | goto cleanup1; | |
2583 | } | |
2584 | if (ioc->Request.Type.Direction == XFER_WRITE) { | |
2585 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { | |
2586 | status = -ENOMEM; | |
2587 | goto cleanup1; | |
2588 | } | |
2589 | } else | |
2590 | memset(buff[sg_used], 0, sz); | |
2591 | left -= sz; | |
2592 | data_ptr += sz; | |
2593 | sg_used++; | |
2594 | } | |
2595 | c = cmd_special_alloc(h); | |
2596 | if (c == NULL) { | |
2597 | status = -ENOMEM; | |
2598 | goto cleanup1; | |
2599 | } | |
2600 | c->cmd_type = CMD_IOCTL_PEND; | |
2601 | c->Header.ReplyQueue = 0; | |
2602 | ||
2603 | if (ioc->buf_size > 0) { | |
2604 | c->Header.SGList = sg_used; | |
2605 | c->Header.SGTotal = sg_used; | |
2606 | } else { | |
2607 | c->Header.SGList = 0; | |
2608 | c->Header.SGTotal = 0; | |
2609 | } | |
2610 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); | |
2611 | c->Header.Tag.lower = c->busaddr; | |
2612 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); | |
2613 | if (ioc->buf_size > 0) { | |
2614 | int i; | |
2615 | for (i = 0; i < sg_used; i++) { | |
2616 | temp64.val = pci_map_single(h->pdev, buff[i], | |
2617 | buff_size[i], PCI_DMA_BIDIRECTIONAL); | |
2618 | c->SG[i].Addr.lower = temp64.val32.lower; | |
2619 | c->SG[i].Addr.upper = temp64.val32.upper; | |
2620 | c->SG[i].Len = buff_size[i]; | |
2621 | /* we are not chaining */ | |
2622 | c->SG[i].Ext = 0; | |
2623 | } | |
2624 | } | |
2625 | hpsa_scsi_do_simple_cmd_core(h, c); | |
2626 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); | |
2627 | check_ioctl_unit_attention(h, c); | |
2628 | /* Copy the error information out */ | |
2629 | memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); | |
2630 | if (copy_to_user(argp, ioc, sizeof(*ioc))) { | |
2631 | cmd_special_free(h, c); | |
2632 | status = -EFAULT; | |
2633 | goto cleanup1; | |
2634 | } | |
2635 | if (ioc->Request.Type.Direction == XFER_READ) { | |
2636 | /* Copy the data out of the buffer we created */ | |
2637 | BYTE __user *ptr = ioc->buf; | |
2638 | for (i = 0; i < sg_used; i++) { | |
2639 | if (copy_to_user(ptr, buff[i], buff_size[i])) { | |
2640 | cmd_special_free(h, c); | |
2641 | status = -EFAULT; | |
2642 | goto cleanup1; | |
2643 | } | |
2644 | ptr += buff_size[i]; | |
2645 | } | |
2646 | } | |
2647 | cmd_special_free(h, c); | |
2648 | status = 0; | |
2649 | cleanup1: | |
2650 | if (buff) { | |
2651 | for (i = 0; i < sg_used; i++) | |
2652 | kfree(buff[i]); | |
2653 | kfree(buff); | |
2654 | } | |
2655 | kfree(buff_size); | |
2656 | kfree(ioc); | |
2657 | return status; | |
2658 | } | |
2659 | ||
2660 | static void check_ioctl_unit_attention(struct ctlr_info *h, | |
2661 | struct CommandList *c) | |
2662 | { | |
2663 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && | |
2664 | c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) | |
2665 | (void) check_for_unit_attention(h, c); | |
2666 | } | |
2667 | /* | |
2668 | * ioctl | |
2669 | */ | |
2670 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) | |
2671 | { | |
2672 | struct ctlr_info *h; | |
2673 | void __user *argp = (void __user *)arg; | |
2674 | ||
2675 | h = sdev_to_hba(dev); | |
2676 | ||
2677 | switch (cmd) { | |
2678 | case CCISS_DEREGDISK: | |
2679 | case CCISS_REGNEWDISK: | |
2680 | case CCISS_REGNEWD: | |
a08a8471 | 2681 | hpsa_scan_start(h->scsi_host); |
edd16368 SC |
2682 | return 0; |
2683 | case CCISS_GETPCIINFO: | |
2684 | return hpsa_getpciinfo_ioctl(h, argp); | |
2685 | case CCISS_GETDRIVVER: | |
2686 | return hpsa_getdrivver_ioctl(h, argp); | |
2687 | case CCISS_PASSTHRU: | |
2688 | return hpsa_passthru_ioctl(h, argp); | |
2689 | case CCISS_BIG_PASSTHRU: | |
2690 | return hpsa_big_passthru_ioctl(h, argp); | |
2691 | default: | |
2692 | return -ENOTTY; | |
2693 | } | |
2694 | } | |
2695 | ||
01a02ffc SC |
2696 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
2697 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | |
edd16368 SC |
2698 | int cmd_type) |
2699 | { | |
2700 | int pci_dir = XFER_NONE; | |
2701 | ||
2702 | c->cmd_type = CMD_IOCTL_PEND; | |
2703 | c->Header.ReplyQueue = 0; | |
2704 | if (buff != NULL && size > 0) { | |
2705 | c->Header.SGList = 1; | |
2706 | c->Header.SGTotal = 1; | |
2707 | } else { | |
2708 | c->Header.SGList = 0; | |
2709 | c->Header.SGTotal = 0; | |
2710 | } | |
2711 | c->Header.Tag.lower = c->busaddr; | |
2712 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); | |
2713 | ||
2714 | c->Request.Type.Type = cmd_type; | |
2715 | if (cmd_type == TYPE_CMD) { | |
2716 | switch (cmd) { | |
2717 | case HPSA_INQUIRY: | |
2718 | /* are we trying to read a vital product page */ | |
2719 | if (page_code != 0) { | |
2720 | c->Request.CDB[1] = 0x01; | |
2721 | c->Request.CDB[2] = page_code; | |
2722 | } | |
2723 | c->Request.CDBLen = 6; | |
2724 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2725 | c->Request.Type.Direction = XFER_READ; | |
2726 | c->Request.Timeout = 0; | |
2727 | c->Request.CDB[0] = HPSA_INQUIRY; | |
2728 | c->Request.CDB[4] = size & 0xFF; | |
2729 | break; | |
2730 | case HPSA_REPORT_LOG: | |
2731 | case HPSA_REPORT_PHYS: | |
2732 | /* Talking to controller so It's a physical command | |
2733 | mode = 00 target = 0. Nothing to write. | |
2734 | */ | |
2735 | c->Request.CDBLen = 12; | |
2736 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2737 | c->Request.Type.Direction = XFER_READ; | |
2738 | c->Request.Timeout = 0; | |
2739 | c->Request.CDB[0] = cmd; | |
2740 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ | |
2741 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
2742 | c->Request.CDB[8] = (size >> 8) & 0xFF; | |
2743 | c->Request.CDB[9] = size & 0xFF; | |
2744 | break; | |
edd16368 SC |
2745 | case HPSA_CACHE_FLUSH: |
2746 | c->Request.CDBLen = 12; | |
2747 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2748 | c->Request.Type.Direction = XFER_WRITE; | |
2749 | c->Request.Timeout = 0; | |
2750 | c->Request.CDB[0] = BMIC_WRITE; | |
2751 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; | |
2752 | break; | |
2753 | case TEST_UNIT_READY: | |
2754 | c->Request.CDBLen = 6; | |
2755 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2756 | c->Request.Type.Direction = XFER_NONE; | |
2757 | c->Request.Timeout = 0; | |
2758 | break; | |
2759 | default: | |
2760 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); | |
2761 | BUG(); | |
2762 | return; | |
2763 | } | |
2764 | } else if (cmd_type == TYPE_MSG) { | |
2765 | switch (cmd) { | |
2766 | ||
2767 | case HPSA_DEVICE_RESET_MSG: | |
2768 | c->Request.CDBLen = 16; | |
2769 | c->Request.Type.Type = 1; /* It is a MSG not a CMD */ | |
2770 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2771 | c->Request.Type.Direction = XFER_NONE; | |
2772 | c->Request.Timeout = 0; /* Don't time out */ | |
2773 | c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ | |
2774 | c->Request.CDB[1] = 0x03; /* Reset target above */ | |
2775 | /* If bytes 4-7 are zero, it means reset the */ | |
2776 | /* LunID device */ | |
2777 | c->Request.CDB[4] = 0x00; | |
2778 | c->Request.CDB[5] = 0x00; | |
2779 | c->Request.CDB[6] = 0x00; | |
2780 | c->Request.CDB[7] = 0x00; | |
2781 | break; | |
2782 | ||
2783 | default: | |
2784 | dev_warn(&h->pdev->dev, "unknown message type %d\n", | |
2785 | cmd); | |
2786 | BUG(); | |
2787 | } | |
2788 | } else { | |
2789 | dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); | |
2790 | BUG(); | |
2791 | } | |
2792 | ||
2793 | switch (c->Request.Type.Direction) { | |
2794 | case XFER_READ: | |
2795 | pci_dir = PCI_DMA_FROMDEVICE; | |
2796 | break; | |
2797 | case XFER_WRITE: | |
2798 | pci_dir = PCI_DMA_TODEVICE; | |
2799 | break; | |
2800 | case XFER_NONE: | |
2801 | pci_dir = PCI_DMA_NONE; | |
2802 | break; | |
2803 | default: | |
2804 | pci_dir = PCI_DMA_BIDIRECTIONAL; | |
2805 | } | |
2806 | ||
2807 | hpsa_map_one(h->pdev, c, buff, size, pci_dir); | |
2808 | ||
2809 | return; | |
2810 | } | |
2811 | ||
2812 | /* | |
2813 | * Map (physical) PCI mem into (virtual) kernel space | |
2814 | */ | |
2815 | static void __iomem *remap_pci_mem(ulong base, ulong size) | |
2816 | { | |
2817 | ulong page_base = ((ulong) base) & PAGE_MASK; | |
2818 | ulong page_offs = ((ulong) base) - page_base; | |
2819 | void __iomem *page_remapped = ioremap(page_base, page_offs + size); | |
2820 | ||
2821 | return page_remapped ? (page_remapped + page_offs) : NULL; | |
2822 | } | |
2823 | ||
2824 | /* Takes cmds off the submission queue and sends them to the hardware, | |
2825 | * then puts them on the queue of cmds waiting for completion. | |
2826 | */ | |
2827 | static void start_io(struct ctlr_info *h) | |
2828 | { | |
2829 | struct CommandList *c; | |
2830 | ||
2831 | while (!hlist_empty(&h->reqQ)) { | |
2832 | c = hlist_entry(h->reqQ.first, struct CommandList, list); | |
2833 | /* can't do anything if fifo is full */ | |
2834 | if ((h->access.fifo_full(h))) { | |
2835 | dev_warn(&h->pdev->dev, "fifo full\n"); | |
2836 | break; | |
2837 | } | |
2838 | ||
2839 | /* Get the first entry from the Request Q */ | |
2840 | removeQ(c); | |
2841 | h->Qdepth--; | |
2842 | ||
2843 | /* Tell the controller execute command */ | |
2844 | h->access.submit_command(h, c); | |
2845 | ||
2846 | /* Put job onto the completed Q */ | |
2847 | addQ(&h->cmpQ, c); | |
2848 | } | |
2849 | } | |
2850 | ||
2851 | static inline unsigned long get_next_completion(struct ctlr_info *h) | |
2852 | { | |
2853 | return h->access.command_completed(h); | |
2854 | } | |
2855 | ||
900c5440 | 2856 | static inline bool interrupt_pending(struct ctlr_info *h) |
edd16368 SC |
2857 | { |
2858 | return h->access.intr_pending(h); | |
2859 | } | |
2860 | ||
2861 | static inline long interrupt_not_for_us(struct ctlr_info *h) | |
2862 | { | |
10f66018 SC |
2863 | return (h->access.intr_pending(h) == 0) || |
2864 | (h->interrupts_enabled == 0); | |
edd16368 SC |
2865 | } |
2866 | ||
01a02ffc SC |
2867 | static inline int bad_tag(struct ctlr_info *h, u32 tag_index, |
2868 | u32 raw_tag) | |
edd16368 SC |
2869 | { |
2870 | if (unlikely(tag_index >= h->nr_cmds)) { | |
2871 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); | |
2872 | return 1; | |
2873 | } | |
2874 | return 0; | |
2875 | } | |
2876 | ||
01a02ffc | 2877 | static inline void finish_cmd(struct CommandList *c, u32 raw_tag) |
edd16368 SC |
2878 | { |
2879 | removeQ(c); | |
2880 | if (likely(c->cmd_type == CMD_SCSI)) | |
2881 | complete_scsi_command(c, 0, raw_tag); | |
2882 | else if (c->cmd_type == CMD_IOCTL_PEND) | |
2883 | complete(c->waiting); | |
2884 | } | |
2885 | ||
a104c99f SC |
2886 | static inline u32 hpsa_tag_contains_index(u32 tag) |
2887 | { | |
303932fd | 2888 | #define DIRECT_LOOKUP_BIT 0x10 |
a104c99f SC |
2889 | return tag & DIRECT_LOOKUP_BIT; |
2890 | } | |
2891 | ||
2892 | static inline u32 hpsa_tag_to_index(u32 tag) | |
2893 | { | |
303932fd | 2894 | #define DIRECT_LOOKUP_SHIFT 5 |
a104c99f SC |
2895 | return tag >> DIRECT_LOOKUP_SHIFT; |
2896 | } | |
2897 | ||
2898 | static inline u32 hpsa_tag_discard_error_bits(u32 tag) | |
2899 | { | |
2900 | #define HPSA_ERROR_BITS 0x03 | |
2901 | return tag & ~HPSA_ERROR_BITS; | |
2902 | } | |
2903 | ||
303932fd DB |
2904 | /* process completion of an indexed ("direct lookup") command */ |
2905 | static inline u32 process_indexed_cmd(struct ctlr_info *h, | |
2906 | u32 raw_tag) | |
2907 | { | |
2908 | u32 tag_index; | |
2909 | struct CommandList *c; | |
2910 | ||
2911 | tag_index = hpsa_tag_to_index(raw_tag); | |
2912 | if (bad_tag(h, tag_index, raw_tag)) | |
2913 | return next_command(h); | |
2914 | c = h->cmd_pool + tag_index; | |
2915 | finish_cmd(c, raw_tag); | |
2916 | return next_command(h); | |
2917 | } | |
2918 | ||
2919 | /* process completion of a non-indexed command */ | |
2920 | static inline u32 process_nonindexed_cmd(struct ctlr_info *h, | |
2921 | u32 raw_tag) | |
2922 | { | |
2923 | u32 tag; | |
2924 | struct CommandList *c = NULL; | |
2925 | struct hlist_node *tmp; | |
2926 | ||
2927 | tag = hpsa_tag_discard_error_bits(raw_tag); | |
2928 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { | |
2929 | if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { | |
2930 | finish_cmd(c, raw_tag); | |
2931 | return next_command(h); | |
2932 | } | |
2933 | } | |
2934 | bad_tag(h, h->nr_cmds + 1, raw_tag); | |
2935 | return next_command(h); | |
2936 | } | |
2937 | ||
10f66018 | 2938 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) |
edd16368 SC |
2939 | { |
2940 | struct ctlr_info *h = dev_id; | |
edd16368 | 2941 | unsigned long flags; |
303932fd | 2942 | u32 raw_tag; |
edd16368 SC |
2943 | |
2944 | if (interrupt_not_for_us(h)) | |
2945 | return IRQ_NONE; | |
10f66018 SC |
2946 | spin_lock_irqsave(&h->lock, flags); |
2947 | while (interrupt_pending(h)) { | |
2948 | raw_tag = get_next_completion(h); | |
2949 | while (raw_tag != FIFO_EMPTY) { | |
2950 | if (hpsa_tag_contains_index(raw_tag)) | |
2951 | raw_tag = process_indexed_cmd(h, raw_tag); | |
2952 | else | |
2953 | raw_tag = process_nonindexed_cmd(h, raw_tag); | |
2954 | } | |
2955 | } | |
2956 | spin_unlock_irqrestore(&h->lock, flags); | |
2957 | return IRQ_HANDLED; | |
2958 | } | |
2959 | ||
2960 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) | |
2961 | { | |
2962 | struct ctlr_info *h = dev_id; | |
2963 | unsigned long flags; | |
2964 | u32 raw_tag; | |
2965 | ||
edd16368 | 2966 | spin_lock_irqsave(&h->lock, flags); |
303932fd DB |
2967 | raw_tag = get_next_completion(h); |
2968 | while (raw_tag != FIFO_EMPTY) { | |
2969 | if (hpsa_tag_contains_index(raw_tag)) | |
2970 | raw_tag = process_indexed_cmd(h, raw_tag); | |
2971 | else | |
2972 | raw_tag = process_nonindexed_cmd(h, raw_tag); | |
edd16368 SC |
2973 | } |
2974 | spin_unlock_irqrestore(&h->lock, flags); | |
2975 | return IRQ_HANDLED; | |
2976 | } | |
2977 | ||
f0edafc6 | 2978 | /* Send a message CDB to the firmware. */ |
edd16368 SC |
2979 | static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, |
2980 | unsigned char type) | |
2981 | { | |
2982 | struct Command { | |
2983 | struct CommandListHeader CommandHeader; | |
2984 | struct RequestBlock Request; | |
2985 | struct ErrDescriptor ErrorDescriptor; | |
2986 | }; | |
2987 | struct Command *cmd; | |
2988 | static const size_t cmd_sz = sizeof(*cmd) + | |
2989 | sizeof(cmd->ErrorDescriptor); | |
2990 | dma_addr_t paddr64; | |
2991 | uint32_t paddr32, tag; | |
2992 | void __iomem *vaddr; | |
2993 | int i, err; | |
2994 | ||
2995 | vaddr = pci_ioremap_bar(pdev, 0); | |
2996 | if (vaddr == NULL) | |
2997 | return -ENOMEM; | |
2998 | ||
2999 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the | |
3000 | * CCISS commands, so they must be allocated from the lower 4GiB of | |
3001 | * memory. | |
3002 | */ | |
3003 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
3004 | if (err) { | |
3005 | iounmap(vaddr); | |
3006 | return -ENOMEM; | |
3007 | } | |
3008 | ||
3009 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); | |
3010 | if (cmd == NULL) { | |
3011 | iounmap(vaddr); | |
3012 | return -ENOMEM; | |
3013 | } | |
3014 | ||
3015 | /* This must fit, because of the 32-bit consistent DMA mask. Also, | |
3016 | * although there's no guarantee, we assume that the address is at | |
3017 | * least 4-byte aligned (most likely, it's page-aligned). | |
3018 | */ | |
3019 | paddr32 = paddr64; | |
3020 | ||
3021 | cmd->CommandHeader.ReplyQueue = 0; | |
3022 | cmd->CommandHeader.SGList = 0; | |
3023 | cmd->CommandHeader.SGTotal = 0; | |
3024 | cmd->CommandHeader.Tag.lower = paddr32; | |
3025 | cmd->CommandHeader.Tag.upper = 0; | |
3026 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); | |
3027 | ||
3028 | cmd->Request.CDBLen = 16; | |
3029 | cmd->Request.Type.Type = TYPE_MSG; | |
3030 | cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; | |
3031 | cmd->Request.Type.Direction = XFER_NONE; | |
3032 | cmd->Request.Timeout = 0; /* Don't time out */ | |
3033 | cmd->Request.CDB[0] = opcode; | |
3034 | cmd->Request.CDB[1] = type; | |
3035 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ | |
3036 | cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); | |
3037 | cmd->ErrorDescriptor.Addr.upper = 0; | |
3038 | cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); | |
3039 | ||
3040 | writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); | |
3041 | ||
3042 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { | |
3043 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | |
a104c99f | 3044 | if (hpsa_tag_discard_error_bits(tag) == paddr32) |
edd16368 SC |
3045 | break; |
3046 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); | |
3047 | } | |
3048 | ||
3049 | iounmap(vaddr); | |
3050 | ||
3051 | /* we leak the DMA buffer here ... no choice since the controller could | |
3052 | * still complete the command. | |
3053 | */ | |
3054 | if (i == HPSA_MSG_SEND_RETRY_LIMIT) { | |
3055 | dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", | |
3056 | opcode, type); | |
3057 | return -ETIMEDOUT; | |
3058 | } | |
3059 | ||
3060 | pci_free_consistent(pdev, cmd_sz, cmd, paddr64); | |
3061 | ||
3062 | if (tag & HPSA_ERROR_BIT) { | |
3063 | dev_err(&pdev->dev, "controller message %02x:%02x failed\n", | |
3064 | opcode, type); | |
3065 | return -EIO; | |
3066 | } | |
3067 | ||
3068 | dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", | |
3069 | opcode, type); | |
3070 | return 0; | |
3071 | } | |
3072 | ||
3073 | #define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0) | |
3074 | #define hpsa_noop(p) hpsa_message(p, 3, 0) | |
3075 | ||
3076 | static __devinit int hpsa_reset_msi(struct pci_dev *pdev) | |
3077 | { | |
3078 | /* the #defines are stolen from drivers/pci/msi.h. */ | |
3079 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) | |
3080 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) | |
3081 | ||
3082 | int pos; | |
3083 | u16 control = 0; | |
3084 | ||
3085 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | |
3086 | if (pos) { | |
3087 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | |
3088 | if (control & PCI_MSI_FLAGS_ENABLE) { | |
3089 | dev_info(&pdev->dev, "resetting MSI\n"); | |
3090 | pci_write_config_word(pdev, msi_control_reg(pos), | |
3091 | control & ~PCI_MSI_FLAGS_ENABLE); | |
3092 | } | |
3093 | } | |
3094 | ||
3095 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | |
3096 | if (pos) { | |
3097 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | |
3098 | if (control & PCI_MSIX_FLAGS_ENABLE) { | |
3099 | dev_info(&pdev->dev, "resetting MSI-X\n"); | |
3100 | pci_write_config_word(pdev, msi_control_reg(pos), | |
3101 | control & ~PCI_MSIX_FLAGS_ENABLE); | |
3102 | } | |
3103 | } | |
3104 | ||
3105 | return 0; | |
3106 | } | |
3107 | ||
1df8552a SC |
3108 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, |
3109 | void * __iomem vaddr, bool use_doorbell) | |
3110 | { | |
3111 | u16 pmcsr; | |
3112 | int pos; | |
3113 | ||
3114 | if (use_doorbell) { | |
3115 | /* For everything after the P600, the PCI power state method | |
3116 | * of resetting the controller doesn't work, so we have this | |
3117 | * other way using the doorbell register. | |
3118 | */ | |
3119 | dev_info(&pdev->dev, "using doorbell to reset controller\n"); | |
3120 | writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); | |
3121 | msleep(1000); | |
3122 | } else { /* Try to do it the PCI power state way */ | |
3123 | ||
3124 | /* Quoting from the Open CISS Specification: "The Power | |
3125 | * Management Control/Status Register (CSR) controls the power | |
3126 | * state of the device. The normal operating state is D0, | |
3127 | * CSR=00h. The software off state is D3, CSR=03h. To reset | |
3128 | * the controller, place the interface device in D3 then to D0, | |
3129 | * this causes a secondary PCI reset which will reset the | |
3130 | * controller." */ | |
3131 | ||
3132 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | |
3133 | if (pos == 0) { | |
3134 | dev_err(&pdev->dev, | |
3135 | "hpsa_reset_controller: " | |
3136 | "PCI PM not supported\n"); | |
3137 | return -ENODEV; | |
3138 | } | |
3139 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); | |
3140 | /* enter the D3hot power management state */ | |
3141 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); | |
3142 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
3143 | pmcsr |= PCI_D3hot; | |
3144 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | |
3145 | ||
3146 | msleep(500); | |
3147 | ||
3148 | /* enter the D0 power management state */ | |
3149 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
3150 | pmcsr |= PCI_D0; | |
3151 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | |
3152 | ||
3153 | msleep(500); | |
3154 | } | |
3155 | return 0; | |
3156 | } | |
3157 | ||
edd16368 | 3158 | /* This does a hard reset of the controller using PCI power management |
1df8552a | 3159 | * states or the using the doorbell register. |
edd16368 | 3160 | */ |
1df8552a | 3161 | static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) |
edd16368 | 3162 | { |
1df8552a SC |
3163 | u16 saved_config_space[32]; |
3164 | u64 cfg_offset; | |
3165 | u32 cfg_base_addr; | |
3166 | u64 cfg_base_addr_index; | |
3167 | void __iomem *vaddr; | |
3168 | unsigned long paddr; | |
3169 | u32 misc_fw_support, active_transport; | |
3170 | int rc, i; | |
3171 | struct CfgTable __iomem *cfgtable; | |
3172 | bool use_doorbell; | |
18867659 | 3173 | u32 board_id; |
edd16368 | 3174 | |
1df8552a SC |
3175 | /* For controllers as old as the P600, this is very nearly |
3176 | * the same thing as | |
edd16368 SC |
3177 | * |
3178 | * pci_save_state(pci_dev); | |
3179 | * pci_set_power_state(pci_dev, PCI_D3hot); | |
3180 | * pci_set_power_state(pci_dev, PCI_D0); | |
3181 | * pci_restore_state(pci_dev); | |
3182 | * | |
3183 | * but we can't use these nice canned kernel routines on | |
3184 | * kexec, because they also check the MSI/MSI-X state in PCI | |
3185 | * configuration space and do the wrong thing when it is | |
3186 | * set/cleared. Also, the pci_save/restore_state functions | |
3187 | * violate the ordering requirements for restoring the | |
3188 | * configuration space from the CCISS document (see the | |
3189 | * comment below). So we roll our own .... | |
1df8552a SC |
3190 | * |
3191 | * For controllers newer than the P600, the pci power state | |
3192 | * method of resetting doesn't work so we have another way | |
3193 | * using the doorbell register. | |
edd16368 | 3194 | */ |
18867659 SC |
3195 | |
3196 | /* Exclude 640x boards. These are two pci devices in one slot | |
3197 | * which share a battery backed cache module. One controls the | |
3198 | * cache, the other accesses the cache through the one that controls | |
3199 | * it. If we reset the one controlling the cache, the other will | |
3200 | * likely not be happy. Just forbid resetting this conjoined mess. | |
3201 | * The 640x isn't really supported by hpsa anyway. | |
3202 | */ | |
3203 | hpsa_lookup_board_id(pdev, &board_id); | |
3204 | if (board_id == 0x409C0E11 || board_id == 0x409D0E11) | |
3205 | return -ENOTSUPP; | |
3206 | ||
edd16368 SC |
3207 | for (i = 0; i < 32; i++) |
3208 | pci_read_config_word(pdev, 2*i, &saved_config_space[i]); | |
3209 | ||
edd16368 | 3210 | |
1df8552a SC |
3211 | /* find the first memory BAR, so we can find the cfg table */ |
3212 | rc = hpsa_pci_find_memory_BAR(pdev, &paddr); | |
3213 | if (rc) | |
3214 | return rc; | |
3215 | vaddr = remap_pci_mem(paddr, 0x250); | |
3216 | if (!vaddr) | |
3217 | return -ENOMEM; | |
edd16368 | 3218 | |
1df8552a SC |
3219 | /* find cfgtable in order to check if reset via doorbell is supported */ |
3220 | rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, | |
3221 | &cfg_base_addr_index, &cfg_offset); | |
3222 | if (rc) | |
3223 | goto unmap_vaddr; | |
3224 | cfgtable = remap_pci_mem(pci_resource_start(pdev, | |
3225 | cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); | |
3226 | if (!cfgtable) { | |
3227 | rc = -ENOMEM; | |
3228 | goto unmap_vaddr; | |
3229 | } | |
edd16368 | 3230 | |
1df8552a SC |
3231 | /* If reset via doorbell register is supported, use that. */ |
3232 | misc_fw_support = readl(&cfgtable->misc_fw_support); | |
3233 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | |
edd16368 | 3234 | |
36ed2176 SC |
3235 | /* The doorbell reset seems to cause lockups on some Smart |
3236 | * Arrays (e.g. P410, P410i, maybe others). Until this is | |
3237 | * fixed or at least isolated, avoid the doorbell reset. | |
3238 | */ | |
3239 | use_doorbell = 0; | |
3240 | ||
1df8552a SC |
3241 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); |
3242 | if (rc) | |
3243 | goto unmap_cfgtable; | |
edd16368 SC |
3244 | |
3245 | /* Restore the PCI configuration space. The Open CISS | |
3246 | * Specification says, "Restore the PCI Configuration | |
3247 | * Registers, offsets 00h through 60h. It is important to | |
3248 | * restore the command register, 16-bits at offset 04h, | |
3249 | * last. Do not restore the configuration status register, | |
3250 | * 16-bits at offset 06h." Note that the offset is 2*i. | |
3251 | */ | |
3252 | for (i = 0; i < 32; i++) { | |
3253 | if (i == 2 || i == 3) | |
3254 | continue; | |
3255 | pci_write_config_word(pdev, 2*i, saved_config_space[i]); | |
3256 | } | |
3257 | wmb(); | |
3258 | pci_write_config_word(pdev, 4, saved_config_space[2]); | |
3259 | ||
1df8552a SC |
3260 | /* Some devices (notably the HP Smart Array 5i Controller) |
3261 | need a little pause here */ | |
3262 | msleep(HPSA_POST_RESET_PAUSE_MSECS); | |
3263 | ||
3264 | /* Controller should be in simple mode at this point. If it's not, | |
3265 | * It means we're on one of those controllers which doesn't support | |
3266 | * the doorbell reset method and on which the PCI power management reset | |
3267 | * method doesn't work (P800, for example.) | |
3268 | * In those cases, pretend the reset worked and hope for the best. | |
3269 | */ | |
3270 | active_transport = readl(&cfgtable->TransportActive); | |
3271 | if (active_transport & PERFORMANT_MODE) { | |
3272 | dev_warn(&pdev->dev, "Unable to successfully reset controller," | |
3273 | " proceeding anyway.\n"); | |
3274 | rc = -ENOTSUPP; | |
3275 | } | |
3276 | ||
3277 | unmap_cfgtable: | |
3278 | iounmap(cfgtable); | |
3279 | ||
3280 | unmap_vaddr: | |
3281 | iounmap(vaddr); | |
3282 | return rc; | |
edd16368 SC |
3283 | } |
3284 | ||
3285 | /* | |
3286 | * We cannot read the structure directly, for portability we must use | |
3287 | * the io functions. | |
3288 | * This is for debug only. | |
3289 | */ | |
edd16368 SC |
3290 | static void print_cfg_table(struct device *dev, struct CfgTable *tb) |
3291 | { | |
58f8665c | 3292 | #ifdef HPSA_DEBUG |
edd16368 SC |
3293 | int i; |
3294 | char temp_name[17]; | |
3295 | ||
3296 | dev_info(dev, "Controller Configuration information\n"); | |
3297 | dev_info(dev, "------------------------------------\n"); | |
3298 | for (i = 0; i < 4; i++) | |
3299 | temp_name[i] = readb(&(tb->Signature[i])); | |
3300 | temp_name[4] = '\0'; | |
3301 | dev_info(dev, " Signature = %s\n", temp_name); | |
3302 | dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); | |
3303 | dev_info(dev, " Transport methods supported = 0x%x\n", | |
3304 | readl(&(tb->TransportSupport))); | |
3305 | dev_info(dev, " Transport methods active = 0x%x\n", | |
3306 | readl(&(tb->TransportActive))); | |
3307 | dev_info(dev, " Requested transport Method = 0x%x\n", | |
3308 | readl(&(tb->HostWrite.TransportRequest))); | |
3309 | dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", | |
3310 | readl(&(tb->HostWrite.CoalIntDelay))); | |
3311 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", | |
3312 | readl(&(tb->HostWrite.CoalIntCount))); | |
3313 | dev_info(dev, " Max outstanding commands = 0x%d\n", | |
3314 | readl(&(tb->CmdsOutMax))); | |
3315 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); | |
3316 | for (i = 0; i < 16; i++) | |
3317 | temp_name[i] = readb(&(tb->ServerName[i])); | |
3318 | temp_name[16] = '\0'; | |
3319 | dev_info(dev, " Server Name = %s\n", temp_name); | |
3320 | dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", | |
3321 | readl(&(tb->HeartBeat))); | |
edd16368 | 3322 | #endif /* HPSA_DEBUG */ |
58f8665c | 3323 | } |
edd16368 SC |
3324 | |
3325 | static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |
3326 | { | |
3327 | int i, offset, mem_type, bar_type; | |
3328 | ||
3329 | if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ | |
3330 | return 0; | |
3331 | offset = 0; | |
3332 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
3333 | bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; | |
3334 | if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) | |
3335 | offset += 4; | |
3336 | else { | |
3337 | mem_type = pci_resource_flags(pdev, i) & | |
3338 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; | |
3339 | switch (mem_type) { | |
3340 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | |
3341 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | |
3342 | offset += 4; /* 32 bit */ | |
3343 | break; | |
3344 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | |
3345 | offset += 8; | |
3346 | break; | |
3347 | default: /* reserved in PCI 2.2 */ | |
3348 | dev_warn(&pdev->dev, | |
3349 | "base address is invalid\n"); | |
3350 | return -1; | |
3351 | break; | |
3352 | } | |
3353 | } | |
3354 | if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) | |
3355 | return i + 1; | |
3356 | } | |
3357 | return -1; | |
3358 | } | |
3359 | ||
3360 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on | |
3361 | * controllers that are capable. If not, we use IO-APIC mode. | |
3362 | */ | |
3363 | ||
6b3f4c52 | 3364 | static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) |
edd16368 SC |
3365 | { |
3366 | #ifdef CONFIG_PCI_MSI | |
3367 | int err; | |
3368 | struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, | |
3369 | {0, 2}, {0, 3} | |
3370 | }; | |
3371 | ||
3372 | /* Some boards advertise MSI but don't really support it */ | |
6b3f4c52 SC |
3373 | if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || |
3374 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) | |
edd16368 | 3375 | goto default_int_mode; |
55c06c71 SC |
3376 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { |
3377 | dev_info(&h->pdev->dev, "MSIX\n"); | |
3378 | err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); | |
edd16368 SC |
3379 | if (!err) { |
3380 | h->intr[0] = hpsa_msix_entries[0].vector; | |
3381 | h->intr[1] = hpsa_msix_entries[1].vector; | |
3382 | h->intr[2] = hpsa_msix_entries[2].vector; | |
3383 | h->intr[3] = hpsa_msix_entries[3].vector; | |
3384 | h->msix_vector = 1; | |
3385 | return; | |
3386 | } | |
3387 | if (err > 0) { | |
55c06c71 | 3388 | dev_warn(&h->pdev->dev, "only %d MSI-X vectors " |
edd16368 SC |
3389 | "available\n", err); |
3390 | goto default_int_mode; | |
3391 | } else { | |
55c06c71 | 3392 | dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", |
edd16368 SC |
3393 | err); |
3394 | goto default_int_mode; | |
3395 | } | |
3396 | } | |
55c06c71 SC |
3397 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { |
3398 | dev_info(&h->pdev->dev, "MSI\n"); | |
3399 | if (!pci_enable_msi(h->pdev)) | |
edd16368 SC |
3400 | h->msi_vector = 1; |
3401 | else | |
55c06c71 | 3402 | dev_warn(&h->pdev->dev, "MSI init failed\n"); |
edd16368 SC |
3403 | } |
3404 | default_int_mode: | |
3405 | #endif /* CONFIG_PCI_MSI */ | |
3406 | /* if we get here we're going to use the default interrupt mode */ | |
55c06c71 | 3407 | h->intr[PERF_MODE_INT] = h->pdev->irq; |
edd16368 SC |
3408 | } |
3409 | ||
e5c880d1 SC |
3410 | static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) |
3411 | { | |
3412 | int i; | |
3413 | u32 subsystem_vendor_id, subsystem_device_id; | |
3414 | ||
3415 | subsystem_vendor_id = pdev->subsystem_vendor; | |
3416 | subsystem_device_id = pdev->subsystem_device; | |
3417 | *board_id = ((subsystem_device_id << 16) & 0xffff0000) | | |
3418 | subsystem_vendor_id; | |
3419 | ||
3420 | for (i = 0; i < ARRAY_SIZE(products); i++) | |
3421 | if (*board_id == products[i].board_id) | |
3422 | return i; | |
3423 | ||
6798cc0a SC |
3424 | if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && |
3425 | subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || | |
3426 | !hpsa_allow_any) { | |
e5c880d1 SC |
3427 | dev_warn(&pdev->dev, "unrecognized board ID: " |
3428 | "0x%08x, ignoring.\n", *board_id); | |
3429 | return -ENODEV; | |
3430 | } | |
3431 | return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ | |
3432 | } | |
3433 | ||
85bdbabb SC |
3434 | static inline bool hpsa_board_disabled(struct pci_dev *pdev) |
3435 | { | |
3436 | u16 command; | |
3437 | ||
3438 | (void) pci_read_config_word(pdev, PCI_COMMAND, &command); | |
3439 | return ((command & PCI_COMMAND_MEMORY) == 0); | |
3440 | } | |
3441 | ||
12d2cd47 | 3442 | static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, |
3a7774ce SC |
3443 | unsigned long *memory_bar) |
3444 | { | |
3445 | int i; | |
3446 | ||
3447 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) | |
12d2cd47 | 3448 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { |
3a7774ce | 3449 | /* addressing mode bits already removed */ |
12d2cd47 SC |
3450 | *memory_bar = pci_resource_start(pdev, i); |
3451 | dev_dbg(&pdev->dev, "memory BAR = %lx\n", | |
3a7774ce SC |
3452 | *memory_bar); |
3453 | return 0; | |
3454 | } | |
12d2cd47 | 3455 | dev_warn(&pdev->dev, "no memory BAR found\n"); |
3a7774ce SC |
3456 | return -ENODEV; |
3457 | } | |
3458 | ||
2c4c8c8b SC |
3459 | static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h) |
3460 | { | |
3461 | int i; | |
3462 | u32 scratchpad; | |
3463 | ||
3464 | for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) { | |
3465 | scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); | |
3466 | if (scratchpad == HPSA_FIRMWARE_READY) | |
3467 | return 0; | |
3468 | msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); | |
3469 | } | |
3470 | dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); | |
3471 | return -ENODEV; | |
3472 | } | |
3473 | ||
a51fd47f SC |
3474 | static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, |
3475 | void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, | |
3476 | u64 *cfg_offset) | |
3477 | { | |
3478 | *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); | |
3479 | *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); | |
3480 | *cfg_base_addr &= (u32) 0x0000ffff; | |
3481 | *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); | |
3482 | if (*cfg_base_addr_index == -1) { | |
3483 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); | |
3484 | return -ENODEV; | |
3485 | } | |
3486 | return 0; | |
3487 | } | |
3488 | ||
77c4495c | 3489 | static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) |
edd16368 | 3490 | { |
01a02ffc SC |
3491 | u64 cfg_offset; |
3492 | u32 cfg_base_addr; | |
3493 | u64 cfg_base_addr_index; | |
303932fd | 3494 | u32 trans_offset; |
a51fd47f | 3495 | int rc; |
77c4495c | 3496 | |
a51fd47f SC |
3497 | rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, |
3498 | &cfg_base_addr_index, &cfg_offset); | |
3499 | if (rc) | |
3500 | return rc; | |
77c4495c | 3501 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
a51fd47f | 3502 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); |
77c4495c SC |
3503 | if (!h->cfgtable) |
3504 | return -ENOMEM; | |
3505 | /* Find performant mode table. */ | |
a51fd47f | 3506 | trans_offset = readl(&h->cfgtable->TransMethodOffset); |
77c4495c SC |
3507 | h->transtable = remap_pci_mem(pci_resource_start(h->pdev, |
3508 | cfg_base_addr_index)+cfg_offset+trans_offset, | |
3509 | sizeof(*h->transtable)); | |
3510 | if (!h->transtable) | |
3511 | return -ENOMEM; | |
3512 | return 0; | |
3513 | } | |
3514 | ||
cba3d38b SC |
3515 | static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) |
3516 | { | |
3517 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); | |
3518 | if (h->max_commands < 16) { | |
3519 | dev_warn(&h->pdev->dev, "Controller reports " | |
3520 | "max supported commands of %d, an obvious lie. " | |
3521 | "Using 16. Ensure that firmware is up to date.\n", | |
3522 | h->max_commands); | |
3523 | h->max_commands = 16; | |
3524 | } | |
3525 | } | |
3526 | ||
b93d7536 SC |
3527 | /* Interrogate the hardware for some limits: |
3528 | * max commands, max SG elements without chaining, and with chaining, | |
3529 | * SG chain block size, etc. | |
3530 | */ | |
3531 | static void __devinit hpsa_find_board_params(struct ctlr_info *h) | |
3532 | { | |
cba3d38b | 3533 | hpsa_get_max_perf_mode_cmds(h); |
b93d7536 SC |
3534 | h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ |
3535 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); | |
3536 | /* | |
3537 | * Limit in-command s/g elements to 32 save dma'able memory. | |
3538 | * Howvever spec says if 0, use 31 | |
3539 | */ | |
3540 | h->max_cmd_sg_entries = 31; | |
3541 | if (h->maxsgentries > 512) { | |
3542 | h->max_cmd_sg_entries = 32; | |
3543 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; | |
3544 | h->maxsgentries--; /* save one for chain pointer */ | |
3545 | } else { | |
3546 | h->maxsgentries = 31; /* default to traditional values */ | |
3547 | h->chainsize = 0; | |
3548 | } | |
3549 | } | |
3550 | ||
76c46e49 SC |
3551 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) |
3552 | { | |
3553 | if ((readb(&h->cfgtable->Signature[0]) != 'C') || | |
3554 | (readb(&h->cfgtable->Signature[1]) != 'I') || | |
3555 | (readb(&h->cfgtable->Signature[2]) != 'S') || | |
3556 | (readb(&h->cfgtable->Signature[3]) != 'S')) { | |
3557 | dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); | |
3558 | return false; | |
3559 | } | |
3560 | return true; | |
3561 | } | |
3562 | ||
f7c39101 SC |
3563 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
3564 | static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) | |
3565 | { | |
3566 | #ifdef CONFIG_X86 | |
3567 | u32 prefetch; | |
3568 | ||
3569 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); | |
3570 | prefetch |= 0x100; | |
3571 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); | |
3572 | #endif | |
3573 | } | |
3574 | ||
3d0eab67 SC |
3575 | /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result |
3576 | * in a prefetch beyond physical memory. | |
3577 | */ | |
3578 | static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) | |
3579 | { | |
3580 | u32 dma_prefetch; | |
3581 | ||
3582 | if (h->board_id != 0x3225103C) | |
3583 | return; | |
3584 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); | |
3585 | dma_prefetch |= 0x8000; | |
3586 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); | |
3587 | } | |
3588 | ||
3f4336f3 | 3589 | static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) |
eb6b2ae9 SC |
3590 | { |
3591 | int i; | |
eb6b2ae9 SC |
3592 | |
3593 | /* under certain very rare conditions, this can take awhile. | |
3594 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | |
3595 | * as we enter this code.) | |
3596 | */ | |
3597 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | |
3598 | if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) | |
3599 | break; | |
3600 | /* delay and try again */ | |
3601 | msleep(10); | |
3602 | } | |
3f4336f3 SC |
3603 | } |
3604 | ||
3605 | static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) | |
3606 | { | |
3607 | u32 trans_support; | |
3608 | ||
3609 | trans_support = readl(&(h->cfgtable->TransportSupport)); | |
3610 | if (!(trans_support & SIMPLE_MODE)) | |
3611 | return -ENOTSUPP; | |
3612 | ||
3613 | h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); | |
3614 | /* Update the field, and then ring the doorbell */ | |
3615 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); | |
3616 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | |
3617 | hpsa_wait_for_mode_change_ack(h); | |
eb6b2ae9 | 3618 | print_cfg_table(&h->pdev->dev, h->cfgtable); |
eb6b2ae9 SC |
3619 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { |
3620 | dev_warn(&h->pdev->dev, | |
3621 | "unable to get board into simple mode\n"); | |
3622 | return -ENODEV; | |
3623 | } | |
3624 | return 0; | |
3625 | } | |
3626 | ||
77c4495c SC |
3627 | static int __devinit hpsa_pci_init(struct ctlr_info *h) |
3628 | { | |
eb6b2ae9 | 3629 | int prod_index, err; |
edd16368 | 3630 | |
e5c880d1 SC |
3631 | prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); |
3632 | if (prod_index < 0) | |
3633 | return -ENODEV; | |
3634 | h->product_name = products[prod_index].product_name; | |
3635 | h->access = *(products[prod_index].access); | |
edd16368 | 3636 | |
85bdbabb | 3637 | if (hpsa_board_disabled(h->pdev)) { |
55c06c71 | 3638 | dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); |
edd16368 SC |
3639 | return -ENODEV; |
3640 | } | |
55c06c71 | 3641 | err = pci_enable_device(h->pdev); |
edd16368 | 3642 | if (err) { |
55c06c71 | 3643 | dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); |
edd16368 SC |
3644 | return err; |
3645 | } | |
3646 | ||
55c06c71 | 3647 | err = pci_request_regions(h->pdev, "hpsa"); |
edd16368 | 3648 | if (err) { |
55c06c71 SC |
3649 | dev_err(&h->pdev->dev, |
3650 | "cannot obtain PCI resources, aborting\n"); | |
edd16368 SC |
3651 | return err; |
3652 | } | |
6b3f4c52 | 3653 | hpsa_interrupt_mode(h); |
12d2cd47 | 3654 | err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); |
3a7774ce | 3655 | if (err) |
edd16368 | 3656 | goto err_out_free_res; |
edd16368 | 3657 | h->vaddr = remap_pci_mem(h->paddr, 0x250); |
204892e9 SC |
3658 | if (!h->vaddr) { |
3659 | err = -ENOMEM; | |
3660 | goto err_out_free_res; | |
3661 | } | |
2c4c8c8b SC |
3662 | err = hpsa_wait_for_board_ready(h); |
3663 | if (err) | |
edd16368 | 3664 | goto err_out_free_res; |
77c4495c SC |
3665 | err = hpsa_find_cfgtables(h); |
3666 | if (err) | |
edd16368 | 3667 | goto err_out_free_res; |
b93d7536 | 3668 | hpsa_find_board_params(h); |
edd16368 | 3669 | |
76c46e49 | 3670 | if (!hpsa_CISS_signature_present(h)) { |
edd16368 SC |
3671 | err = -ENODEV; |
3672 | goto err_out_free_res; | |
3673 | } | |
f7c39101 | 3674 | hpsa_enable_scsi_prefetch(h); |
3d0eab67 | 3675 | hpsa_p600_dma_prefetch_quirk(h); |
eb6b2ae9 SC |
3676 | err = hpsa_enter_simple_mode(h); |
3677 | if (err) | |
edd16368 | 3678 | goto err_out_free_res; |
edd16368 SC |
3679 | return 0; |
3680 | ||
3681 | err_out_free_res: | |
204892e9 SC |
3682 | if (h->transtable) |
3683 | iounmap(h->transtable); | |
3684 | if (h->cfgtable) | |
3685 | iounmap(h->cfgtable); | |
3686 | if (h->vaddr) | |
3687 | iounmap(h->vaddr); | |
edd16368 SC |
3688 | /* |
3689 | * Deliberately omit pci_disable_device(): it does something nasty to | |
3690 | * Smart Array controllers that pci_enable_device does not undo | |
3691 | */ | |
55c06c71 | 3692 | pci_release_regions(h->pdev); |
edd16368 SC |
3693 | return err; |
3694 | } | |
3695 | ||
339b2b14 SC |
3696 | static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) |
3697 | { | |
3698 | int rc; | |
3699 | ||
3700 | #define HBA_INQUIRY_BYTE_COUNT 64 | |
3701 | h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); | |
3702 | if (!h->hba_inquiry_data) | |
3703 | return; | |
3704 | rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, | |
3705 | h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); | |
3706 | if (rc != 0) { | |
3707 | kfree(h->hba_inquiry_data); | |
3708 | h->hba_inquiry_data = NULL; | |
3709 | } | |
3710 | } | |
3711 | ||
4c2a8c40 SC |
3712 | static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) |
3713 | { | |
1df8552a | 3714 | int rc, i; |
4c2a8c40 SC |
3715 | |
3716 | if (!reset_devices) | |
3717 | return 0; | |
3718 | ||
1df8552a SC |
3719 | /* Reset the controller with a PCI power-cycle or via doorbell */ |
3720 | rc = hpsa_kdump_hard_reset_controller(pdev); | |
4c2a8c40 | 3721 | |
1df8552a SC |
3722 | /* -ENOTSUPP here means we cannot reset the controller |
3723 | * but it's already (and still) up and running in | |
18867659 SC |
3724 | * "performant mode". Or, it might be 640x, which can't reset |
3725 | * due to concerns about shared bbwc between 6402/6404 pair. | |
1df8552a SC |
3726 | */ |
3727 | if (rc == -ENOTSUPP) | |
3728 | return 0; /* just try to do the kdump anyhow. */ | |
3729 | if (rc) | |
3730 | return -ENODEV; | |
3731 | if (hpsa_reset_msi(pdev)) | |
3732 | return -ENODEV; | |
4c2a8c40 SC |
3733 | |
3734 | /* Now try to get the controller to respond to a no-op */ | |
3735 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { | |
3736 | if (hpsa_noop(pdev) == 0) | |
3737 | break; | |
3738 | else | |
3739 | dev_warn(&pdev->dev, "no-op failed%s\n", | |
3740 | (i < 11 ? "; re-trying" : "")); | |
3741 | } | |
3742 | return 0; | |
3743 | } | |
3744 | ||
edd16368 SC |
3745 | static int __devinit hpsa_init_one(struct pci_dev *pdev, |
3746 | const struct pci_device_id *ent) | |
3747 | { | |
4c2a8c40 | 3748 | int dac, rc; |
edd16368 SC |
3749 | struct ctlr_info *h; |
3750 | ||
3751 | if (number_of_controllers == 0) | |
3752 | printk(KERN_INFO DRIVER_NAME "\n"); | |
edd16368 | 3753 | |
4c2a8c40 SC |
3754 | rc = hpsa_init_reset_devices(pdev); |
3755 | if (rc) | |
3756 | return rc; | |
edd16368 | 3757 | |
303932fd DB |
3758 | /* Command structures must be aligned on a 32-byte boundary because |
3759 | * the 5 lower bits of the address are used by the hardware. and by | |
3760 | * the driver. See comments in hpsa.h for more info. | |
3761 | */ | |
3762 | #define COMMANDLIST_ALIGNMENT 32 | |
3763 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); | |
edd16368 SC |
3764 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
3765 | if (!h) | |
ecd9aad4 | 3766 | return -ENOMEM; |
edd16368 | 3767 | |
55c06c71 | 3768 | h->pdev = pdev; |
edd16368 SC |
3769 | h->busy_initializing = 1; |
3770 | INIT_HLIST_HEAD(&h->cmpQ); | |
3771 | INIT_HLIST_HEAD(&h->reqQ); | |
55c06c71 | 3772 | rc = hpsa_pci_init(h); |
ecd9aad4 | 3773 | if (rc != 0) |
edd16368 SC |
3774 | goto clean1; |
3775 | ||
3776 | sprintf(h->devname, "hpsa%d", number_of_controllers); | |
3777 | h->ctlr = number_of_controllers; | |
3778 | number_of_controllers++; | |
edd16368 SC |
3779 | |
3780 | /* configure PCI DMA stuff */ | |
ecd9aad4 SC |
3781 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
3782 | if (rc == 0) { | |
edd16368 | 3783 | dac = 1; |
ecd9aad4 SC |
3784 | } else { |
3785 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
3786 | if (rc == 0) { | |
3787 | dac = 0; | |
3788 | } else { | |
3789 | dev_err(&pdev->dev, "no suitable DMA available\n"); | |
3790 | goto clean1; | |
3791 | } | |
edd16368 SC |
3792 | } |
3793 | ||
3794 | /* make sure the board interrupts are off */ | |
3795 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
10f66018 SC |
3796 | |
3797 | if (h->msix_vector || h->msi_vector) | |
3798 | rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi, | |
3799 | IRQF_DISABLED, h->devname, h); | |
3800 | else | |
3801 | rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx, | |
3802 | IRQF_DISABLED, h->devname, h); | |
ecd9aad4 | 3803 | if (rc) { |
edd16368 | 3804 | dev_err(&pdev->dev, "unable to get irq %d for %s\n", |
303932fd | 3805 | h->intr[PERF_MODE_INT], h->devname); |
edd16368 SC |
3806 | goto clean2; |
3807 | } | |
3808 | ||
303932fd DB |
3809 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", |
3810 | h->devname, pdev->device, | |
3811 | h->intr[PERF_MODE_INT], dac ? "" : " not"); | |
edd16368 SC |
3812 | |
3813 | h->cmd_pool_bits = | |
3814 | kmalloc(((h->nr_cmds + BITS_PER_LONG - | |
3815 | 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); | |
3816 | h->cmd_pool = pci_alloc_consistent(h->pdev, | |
3817 | h->nr_cmds * sizeof(*h->cmd_pool), | |
3818 | &(h->cmd_pool_dhandle)); | |
3819 | h->errinfo_pool = pci_alloc_consistent(h->pdev, | |
3820 | h->nr_cmds * sizeof(*h->errinfo_pool), | |
3821 | &(h->errinfo_pool_dhandle)); | |
3822 | if ((h->cmd_pool_bits == NULL) | |
3823 | || (h->cmd_pool == NULL) | |
3824 | || (h->errinfo_pool == NULL)) { | |
3825 | dev_err(&pdev->dev, "out of memory"); | |
ecd9aad4 | 3826 | rc = -ENOMEM; |
edd16368 SC |
3827 | goto clean4; |
3828 | } | |
33a2ffce SC |
3829 | if (hpsa_allocate_sg_chain_blocks(h)) |
3830 | goto clean4; | |
edd16368 | 3831 | spin_lock_init(&h->lock); |
a08a8471 SC |
3832 | spin_lock_init(&h->scan_lock); |
3833 | init_waitqueue_head(&h->scan_wait_queue); | |
3834 | h->scan_finished = 1; /* no scan currently in progress */ | |
edd16368 SC |
3835 | |
3836 | pci_set_drvdata(pdev, h); | |
3837 | memset(h->cmd_pool_bits, 0, | |
3838 | ((h->nr_cmds + BITS_PER_LONG - | |
3839 | 1) / BITS_PER_LONG) * sizeof(unsigned long)); | |
3840 | ||
3841 | hpsa_scsi_setup(h); | |
3842 | ||
3843 | /* Turn the interrupts on so we can service requests */ | |
3844 | h->access.set_intr_mask(h, HPSA_INTR_ON); | |
3845 | ||
303932fd | 3846 | hpsa_put_ctlr_into_performant_mode(h); |
339b2b14 | 3847 | hpsa_hba_inquiry(h); |
edd16368 SC |
3848 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ |
3849 | h->busy_initializing = 0; | |
3850 | return 1; | |
3851 | ||
3852 | clean4: | |
33a2ffce | 3853 | hpsa_free_sg_chain_blocks(h); |
edd16368 SC |
3854 | kfree(h->cmd_pool_bits); |
3855 | if (h->cmd_pool) | |
3856 | pci_free_consistent(h->pdev, | |
3857 | h->nr_cmds * sizeof(struct CommandList), | |
3858 | h->cmd_pool, h->cmd_pool_dhandle); | |
3859 | if (h->errinfo_pool) | |
3860 | pci_free_consistent(h->pdev, | |
3861 | h->nr_cmds * sizeof(struct ErrorInfo), | |
3862 | h->errinfo_pool, | |
3863 | h->errinfo_pool_dhandle); | |
303932fd | 3864 | free_irq(h->intr[PERF_MODE_INT], h); |
edd16368 SC |
3865 | clean2: |
3866 | clean1: | |
3867 | h->busy_initializing = 0; | |
3868 | kfree(h); | |
ecd9aad4 | 3869 | return rc; |
edd16368 SC |
3870 | } |
3871 | ||
3872 | static void hpsa_flush_cache(struct ctlr_info *h) | |
3873 | { | |
3874 | char *flush_buf; | |
3875 | struct CommandList *c; | |
3876 | ||
3877 | flush_buf = kzalloc(4, GFP_KERNEL); | |
3878 | if (!flush_buf) | |
3879 | return; | |
3880 | ||
3881 | c = cmd_special_alloc(h); | |
3882 | if (!c) { | |
3883 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
3884 | goto out_of_memory; | |
3885 | } | |
3886 | fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, | |
3887 | RAID_CTLR_LUNID, TYPE_CMD); | |
3888 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); | |
3889 | if (c->err_info->CommandStatus != 0) | |
3890 | dev_warn(&h->pdev->dev, | |
3891 | "error flushing cache on controller\n"); | |
3892 | cmd_special_free(h, c); | |
3893 | out_of_memory: | |
3894 | kfree(flush_buf); | |
3895 | } | |
3896 | ||
3897 | static void hpsa_shutdown(struct pci_dev *pdev) | |
3898 | { | |
3899 | struct ctlr_info *h; | |
3900 | ||
3901 | h = pci_get_drvdata(pdev); | |
3902 | /* Turn board interrupts off and send the flush cache command | |
3903 | * sendcmd will turn off interrupt, and send the flush... | |
3904 | * To write all data in the battery backed cache to disks | |
3905 | */ | |
3906 | hpsa_flush_cache(h); | |
3907 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
303932fd | 3908 | free_irq(h->intr[PERF_MODE_INT], h); |
edd16368 SC |
3909 | #ifdef CONFIG_PCI_MSI |
3910 | if (h->msix_vector) | |
3911 | pci_disable_msix(h->pdev); | |
3912 | else if (h->msi_vector) | |
3913 | pci_disable_msi(h->pdev); | |
3914 | #endif /* CONFIG_PCI_MSI */ | |
3915 | } | |
3916 | ||
3917 | static void __devexit hpsa_remove_one(struct pci_dev *pdev) | |
3918 | { | |
3919 | struct ctlr_info *h; | |
3920 | ||
3921 | if (pci_get_drvdata(pdev) == NULL) { | |
3922 | dev_err(&pdev->dev, "unable to remove device \n"); | |
3923 | return; | |
3924 | } | |
3925 | h = pci_get_drvdata(pdev); | |
edd16368 SC |
3926 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ |
3927 | hpsa_shutdown(pdev); | |
3928 | iounmap(h->vaddr); | |
204892e9 SC |
3929 | iounmap(h->transtable); |
3930 | iounmap(h->cfgtable); | |
33a2ffce | 3931 | hpsa_free_sg_chain_blocks(h); |
edd16368 SC |
3932 | pci_free_consistent(h->pdev, |
3933 | h->nr_cmds * sizeof(struct CommandList), | |
3934 | h->cmd_pool, h->cmd_pool_dhandle); | |
3935 | pci_free_consistent(h->pdev, | |
3936 | h->nr_cmds * sizeof(struct ErrorInfo), | |
3937 | h->errinfo_pool, h->errinfo_pool_dhandle); | |
303932fd DB |
3938 | pci_free_consistent(h->pdev, h->reply_pool_size, |
3939 | h->reply_pool, h->reply_pool_dhandle); | |
edd16368 | 3940 | kfree(h->cmd_pool_bits); |
303932fd | 3941 | kfree(h->blockFetchTable); |
339b2b14 | 3942 | kfree(h->hba_inquiry_data); |
edd16368 SC |
3943 | /* |
3944 | * Deliberately omit pci_disable_device(): it does something nasty to | |
3945 | * Smart Array controllers that pci_enable_device does not undo | |
3946 | */ | |
3947 | pci_release_regions(pdev); | |
3948 | pci_set_drvdata(pdev, NULL); | |
edd16368 SC |
3949 | kfree(h); |
3950 | } | |
3951 | ||
3952 | static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, | |
3953 | __attribute__((unused)) pm_message_t state) | |
3954 | { | |
3955 | return -ENOSYS; | |
3956 | } | |
3957 | ||
3958 | static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) | |
3959 | { | |
3960 | return -ENOSYS; | |
3961 | } | |
3962 | ||
3963 | static struct pci_driver hpsa_pci_driver = { | |
3964 | .name = "hpsa", | |
3965 | .probe = hpsa_init_one, | |
3966 | .remove = __devexit_p(hpsa_remove_one), | |
3967 | .id_table = hpsa_pci_device_id, /* id_table */ | |
3968 | .shutdown = hpsa_shutdown, | |
3969 | .suspend = hpsa_suspend, | |
3970 | .resume = hpsa_resume, | |
3971 | }; | |
3972 | ||
303932fd DB |
3973 | /* Fill in bucket_map[], given nsgs (the max number of |
3974 | * scatter gather elements supported) and bucket[], | |
3975 | * which is an array of 8 integers. The bucket[] array | |
3976 | * contains 8 different DMA transfer sizes (in 16 | |
3977 | * byte increments) which the controller uses to fetch | |
3978 | * commands. This function fills in bucket_map[], which | |
3979 | * maps a given number of scatter gather elements to one of | |
3980 | * the 8 DMA transfer sizes. The point of it is to allow the | |
3981 | * controller to only do as much DMA as needed to fetch the | |
3982 | * command, with the DMA transfer size encoded in the lower | |
3983 | * bits of the command address. | |
3984 | */ | |
3985 | static void calc_bucket_map(int bucket[], int num_buckets, | |
3986 | int nsgs, int *bucket_map) | |
3987 | { | |
3988 | int i, j, b, size; | |
3989 | ||
3990 | /* even a command with 0 SGs requires 4 blocks */ | |
3991 | #define MINIMUM_TRANSFER_BLOCKS 4 | |
3992 | #define NUM_BUCKETS 8 | |
3993 | /* Note, bucket_map must have nsgs+1 entries. */ | |
3994 | for (i = 0; i <= nsgs; i++) { | |
3995 | /* Compute size of a command with i SG entries */ | |
3996 | size = i + MINIMUM_TRANSFER_BLOCKS; | |
3997 | b = num_buckets; /* Assume the biggest bucket */ | |
3998 | /* Find the bucket that is just big enough */ | |
3999 | for (j = 0; j < 8; j++) { | |
4000 | if (bucket[j] >= size) { | |
4001 | b = j; | |
4002 | break; | |
4003 | } | |
4004 | } | |
4005 | /* for a command with i SG entries, use bucket b. */ | |
4006 | bucket_map[i] = b; | |
4007 | } | |
4008 | } | |
4009 | ||
6c311b57 | 4010 | static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) |
303932fd | 4011 | { |
6c311b57 SC |
4012 | int i; |
4013 | unsigned long register_value; | |
def342bd SC |
4014 | |
4015 | /* This is a bit complicated. There are 8 registers on | |
4016 | * the controller which we write to to tell it 8 different | |
4017 | * sizes of commands which there may be. It's a way of | |
4018 | * reducing the DMA done to fetch each command. Encoded into | |
4019 | * each command's tag are 3 bits which communicate to the controller | |
4020 | * which of the eight sizes that command fits within. The size of | |
4021 | * each command depends on how many scatter gather entries there are. | |
4022 | * Each SG entry requires 16 bytes. The eight registers are programmed | |
4023 | * with the number of 16-byte blocks a command of that size requires. | |
4024 | * The smallest command possible requires 5 such 16 byte blocks. | |
4025 | * the largest command possible requires MAXSGENTRIES + 4 16-byte | |
4026 | * blocks. Note, this only extends to the SG entries contained | |
4027 | * within the command block, and does not extend to chained blocks | |
4028 | * of SG elements. bft[] contains the eight values we write to | |
4029 | * the registers. They are not evenly distributed, but have more | |
4030 | * sizes for small commands, and fewer sizes for larger commands. | |
4031 | */ | |
4032 | int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; | |
4033 | BUILD_BUG_ON(28 > MAXSGENTRIES + 4); | |
303932fd DB |
4034 | /* 5 = 1 s/g entry or 4k |
4035 | * 6 = 2 s/g entry or 8k | |
4036 | * 8 = 4 s/g entry or 16k | |
4037 | * 10 = 6 s/g entry or 24k | |
4038 | */ | |
303932fd DB |
4039 | |
4040 | h->reply_pool_wraparound = 1; /* spec: init to 1 */ | |
4041 | ||
4042 | /* Controller spec: zero out this buffer. */ | |
4043 | memset(h->reply_pool, 0, h->reply_pool_size); | |
4044 | h->reply_pool_head = h->reply_pool; | |
4045 | ||
303932fd DB |
4046 | bft[7] = h->max_sg_entries + 4; |
4047 | calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); | |
4048 | for (i = 0; i < 8; i++) | |
4049 | writel(bft[i], &h->transtable->BlockFetch[i]); | |
4050 | ||
4051 | /* size of controller ring buffer */ | |
4052 | writel(h->max_commands, &h->transtable->RepQSize); | |
4053 | writel(1, &h->transtable->RepQCount); | |
4054 | writel(0, &h->transtable->RepQCtrAddrLow32); | |
4055 | writel(0, &h->transtable->RepQCtrAddrHigh32); | |
4056 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | |
4057 | writel(0, &h->transtable->RepQAddr0High32); | |
4058 | writel(CFGTBL_Trans_Performant, | |
4059 | &(h->cfgtable->HostWrite.TransportRequest)); | |
4060 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | |
3f4336f3 | 4061 | hpsa_wait_for_mode_change_ack(h); |
303932fd DB |
4062 | register_value = readl(&(h->cfgtable->TransportActive)); |
4063 | if (!(register_value & CFGTBL_Trans_Performant)) { | |
4064 | dev_warn(&h->pdev->dev, "unable to get board into" | |
4065 | " performant mode\n"); | |
4066 | return; | |
4067 | } | |
6c311b57 SC |
4068 | } |
4069 | ||
4070 | static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | |
4071 | { | |
4072 | u32 trans_support; | |
4073 | ||
4074 | trans_support = readl(&(h->cfgtable->TransportSupport)); | |
4075 | if (!(trans_support & PERFORMANT_MODE)) | |
4076 | return; | |
4077 | ||
cba3d38b | 4078 | hpsa_get_max_perf_mode_cmds(h); |
6c311b57 SC |
4079 | h->max_sg_entries = 32; |
4080 | /* Performant mode ring buffer and supporting data structures */ | |
4081 | h->reply_pool_size = h->max_commands * sizeof(u64); | |
4082 | h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, | |
4083 | &(h->reply_pool_dhandle)); | |
4084 | ||
4085 | /* Need a block fetch table for performant mode */ | |
4086 | h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * | |
4087 | sizeof(u32)), GFP_KERNEL); | |
4088 | ||
4089 | if ((h->reply_pool == NULL) | |
4090 | || (h->blockFetchTable == NULL)) | |
4091 | goto clean_up; | |
4092 | ||
4093 | hpsa_enter_performant_mode(h); | |
303932fd DB |
4094 | |
4095 | /* Change the access methods to the performant access methods */ | |
4096 | h->access = SA5_performant_access; | |
4097 | h->transMethod = CFGTBL_Trans_Performant; | |
4098 | ||
4099 | return; | |
4100 | ||
4101 | clean_up: | |
4102 | if (h->reply_pool) | |
4103 | pci_free_consistent(h->pdev, h->reply_pool_size, | |
4104 | h->reply_pool, h->reply_pool_dhandle); | |
4105 | kfree(h->blockFetchTable); | |
4106 | } | |
4107 | ||
edd16368 SC |
4108 | /* |
4109 | * This is it. Register the PCI driver information for the cards we control | |
4110 | * the OS will call our registered routines when it finds one of our cards. | |
4111 | */ | |
4112 | static int __init hpsa_init(void) | |
4113 | { | |
31468401 | 4114 | return pci_register_driver(&hpsa_pci_driver); |
edd16368 SC |
4115 | } |
4116 | ||
4117 | static void __exit hpsa_cleanup(void) | |
4118 | { | |
4119 | pci_unregister_driver(&hpsa_pci_driver); | |
edd16368 SC |
4120 | } |
4121 | ||
4122 | module_init(hpsa_init); | |
4123 | module_exit(hpsa_cleanup); |