]>
Commit | Line | Data |
---|---|---|
edd16368 SC |
1 | /* |
2 | * Disk Array driver for HP Smart Array SAS controllers | |
3 | * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; version 2 of the License. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
12 | * NON INFRINGEMENT. See the GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
17 | * | |
18 | * Questions/Comments/Bugfixes to [email protected] | |
19 | * | |
20 | */ | |
21 | ||
22 | #include <linux/module.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/types.h> | |
25 | #include <linux/pci.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/fs.h> | |
30 | #include <linux/timer.h> | |
31 | #include <linux/seq_file.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/spinlock.h> | |
edd16368 SC |
34 | #include <linux/compat.h> |
35 | #include <linux/blktrace_api.h> | |
36 | #include <linux/uaccess.h> | |
37 | #include <linux/io.h> | |
38 | #include <linux/dma-mapping.h> | |
39 | #include <linux/completion.h> | |
40 | #include <linux/moduleparam.h> | |
41 | #include <scsi/scsi.h> | |
42 | #include <scsi/scsi_cmnd.h> | |
43 | #include <scsi/scsi_device.h> | |
44 | #include <scsi/scsi_host.h> | |
667e23d4 | 45 | #include <scsi/scsi_tcq.h> |
edd16368 SC |
46 | #include <linux/cciss_ioctl.h> |
47 | #include <linux/string.h> | |
48 | #include <linux/bitmap.h> | |
60063497 | 49 | #include <linux/atomic.h> |
edd16368 SC |
50 | #include <linux/kthread.h> |
51 | #include "hpsa_cmd.h" | |
52 | #include "hpsa.h" | |
53 | ||
54 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ | |
31468401 | 55 | #define HPSA_DRIVER_VERSION "2.0.2-1" |
edd16368 SC |
56 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
57 | ||
58 | /* How long to wait (in milliseconds) for board to go into simple mode */ | |
59 | #define MAX_CONFIG_WAIT 30000 | |
60 | #define MAX_IOCTL_CONFIG_WAIT 1000 | |
61 | ||
62 | /*define how many times we will try a command because of bus resets */ | |
63 | #define MAX_CMD_RETRIES 3 | |
64 | ||
65 | /* Embedded module documentation macros - see modules.h */ | |
66 | MODULE_AUTHOR("Hewlett-Packard Company"); | |
67 | MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ | |
68 | HPSA_DRIVER_VERSION); | |
69 | MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); | |
70 | MODULE_VERSION(HPSA_DRIVER_VERSION); | |
71 | MODULE_LICENSE("GPL"); | |
72 | ||
73 | static int hpsa_allow_any; | |
74 | module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); | |
75 | MODULE_PARM_DESC(hpsa_allow_any, | |
76 | "Allow hpsa driver to access unknown HP Smart Array hardware"); | |
02ec19c8 SC |
77 | static int hpsa_simple_mode; |
78 | module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR); | |
79 | MODULE_PARM_DESC(hpsa_simple_mode, | |
80 | "Use 'simple mode' rather than 'performant mode'"); | |
edd16368 SC |
81 | |
82 | /* define the PCI info for the cards we can control */ | |
83 | static const struct pci_device_id hpsa_pci_device_id[] = { | |
edd16368 SC |
84 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
85 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, | |
86 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, | |
87 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, | |
88 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, | |
89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, | |
90 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, | |
f8b01eb9 | 91 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, |
9143a961 | 92 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, |
93 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, | |
94 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, | |
95 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, | |
96 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, | |
97 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, | |
98 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, | |
7c03b870 | 99 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
6798cc0a | 100 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
edd16368 SC |
101 | {0,} |
102 | }; | |
103 | ||
104 | MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); | |
105 | ||
106 | /* board_id = Subsystem Device ID & Vendor ID | |
107 | * product = Marketing Name for the board | |
108 | * access = Address of the struct of function pointers | |
109 | */ | |
110 | static struct board_type products[] = { | |
edd16368 SC |
111 | {0x3241103C, "Smart Array P212", &SA5_access}, |
112 | {0x3243103C, "Smart Array P410", &SA5_access}, | |
113 | {0x3245103C, "Smart Array P410i", &SA5_access}, | |
114 | {0x3247103C, "Smart Array P411", &SA5_access}, | |
115 | {0x3249103C, "Smart Array P812", &SA5_access}, | |
116 | {0x324a103C, "Smart Array P712m", &SA5_access}, | |
117 | {0x324b103C, "Smart Array P711m", &SA5_access}, | |
9143a961 | 118 | {0x3350103C, "Smart Array", &SA5_access}, |
119 | {0x3351103C, "Smart Array", &SA5_access}, | |
120 | {0x3352103C, "Smart Array", &SA5_access}, | |
121 | {0x3353103C, "Smart Array", &SA5_access}, | |
122 | {0x3354103C, "Smart Array", &SA5_access}, | |
123 | {0x3355103C, "Smart Array", &SA5_access}, | |
124 | {0x3356103C, "Smart Array", &SA5_access}, | |
edd16368 SC |
125 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
126 | }; | |
127 | ||
128 | static int number_of_controllers; | |
129 | ||
10f66018 SC |
130 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id); |
131 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id); | |
edd16368 SC |
132 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); |
133 | static void start_io(struct ctlr_info *h); | |
134 | ||
135 | #ifdef CONFIG_COMPAT | |
136 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); | |
137 | #endif | |
138 | ||
139 | static void cmd_free(struct ctlr_info *h, struct CommandList *c); | |
140 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); | |
141 | static struct CommandList *cmd_alloc(struct ctlr_info *h); | |
142 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); | |
01a02ffc SC |
143 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
144 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | |
edd16368 SC |
145 | int cmd_type); |
146 | ||
f281233d | 147 | static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
a08a8471 SC |
148 | static void hpsa_scan_start(struct Scsi_Host *); |
149 | static int hpsa_scan_finished(struct Scsi_Host *sh, | |
150 | unsigned long elapsed_time); | |
667e23d4 SC |
151 | static int hpsa_change_queue_depth(struct scsi_device *sdev, |
152 | int qdepth, int reason); | |
edd16368 SC |
153 | |
154 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | |
155 | static int hpsa_slave_alloc(struct scsi_device *sdev); | |
156 | static void hpsa_slave_destroy(struct scsi_device *sdev); | |
157 | ||
edd16368 | 158 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); |
edd16368 SC |
159 | static int check_for_unit_attention(struct ctlr_info *h, |
160 | struct CommandList *c); | |
161 | static void check_ioctl_unit_attention(struct ctlr_info *h, | |
162 | struct CommandList *c); | |
303932fd DB |
163 | /* performant mode helper functions */ |
164 | static void calc_bucket_map(int *bucket, int num_buckets, | |
165 | int nsgs, int *bucket_map); | |
7136f9a7 | 166 | static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); |
303932fd | 167 | static inline u32 next_command(struct ctlr_info *h); |
1df8552a SC |
168 | static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, |
169 | void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, | |
170 | u64 *cfg_offset); | |
171 | static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, | |
172 | unsigned long *memory_bar); | |
18867659 | 173 | static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); |
fe5389c8 SC |
174 | static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, |
175 | void __iomem *vaddr, int wait_for_ready); | |
176 | #define BOARD_NOT_READY 0 | |
177 | #define BOARD_READY 1 | |
edd16368 | 178 | |
edd16368 SC |
179 | static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) |
180 | { | |
181 | unsigned long *priv = shost_priv(sdev->host); | |
182 | return (struct ctlr_info *) *priv; | |
183 | } | |
184 | ||
a23513e8 SC |
185 | static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) |
186 | { | |
187 | unsigned long *priv = shost_priv(sh); | |
188 | return (struct ctlr_info *) *priv; | |
189 | } | |
190 | ||
edd16368 SC |
191 | static int check_for_unit_attention(struct ctlr_info *h, |
192 | struct CommandList *c) | |
193 | { | |
194 | if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) | |
195 | return 0; | |
196 | ||
197 | switch (c->err_info->SenseInfo[12]) { | |
198 | case STATE_CHANGED: | |
199 | dev_warn(&h->pdev->dev, "hpsa%d: a state change " | |
200 | "detected, command retried\n", h->ctlr); | |
201 | break; | |
202 | case LUN_FAILED: | |
203 | dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " | |
204 | "detected, action required\n", h->ctlr); | |
205 | break; | |
206 | case REPORT_LUNS_CHANGED: | |
207 | dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " | |
31468401 | 208 | "changed, action required\n", h->ctlr); |
edd16368 | 209 | /* |
edd16368 SC |
210 | * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. |
211 | */ | |
212 | break; | |
213 | case POWER_OR_RESET: | |
214 | dev_warn(&h->pdev->dev, "hpsa%d: a power on " | |
215 | "or device reset detected\n", h->ctlr); | |
216 | break; | |
217 | case UNIT_ATTENTION_CLEARED: | |
218 | dev_warn(&h->pdev->dev, "hpsa%d: unit attention " | |
219 | "cleared by another initiator\n", h->ctlr); | |
220 | break; | |
221 | default: | |
222 | dev_warn(&h->pdev->dev, "hpsa%d: unknown " | |
223 | "unit attention detected\n", h->ctlr); | |
224 | break; | |
225 | } | |
226 | return 1; | |
227 | } | |
228 | ||
229 | static ssize_t host_store_rescan(struct device *dev, | |
230 | struct device_attribute *attr, | |
231 | const char *buf, size_t count) | |
232 | { | |
233 | struct ctlr_info *h; | |
234 | struct Scsi_Host *shost = class_to_shost(dev); | |
a23513e8 | 235 | h = shost_to_hba(shost); |
31468401 | 236 | hpsa_scan_start(h->scsi_host); |
edd16368 SC |
237 | return count; |
238 | } | |
239 | ||
d28ce020 SC |
240 | static ssize_t host_show_firmware_revision(struct device *dev, |
241 | struct device_attribute *attr, char *buf) | |
242 | { | |
243 | struct ctlr_info *h; | |
244 | struct Scsi_Host *shost = class_to_shost(dev); | |
245 | unsigned char *fwrev; | |
246 | ||
247 | h = shost_to_hba(shost); | |
248 | if (!h->hba_inquiry_data) | |
249 | return 0; | |
250 | fwrev = &h->hba_inquiry_data[32]; | |
251 | return snprintf(buf, 20, "%c%c%c%c\n", | |
252 | fwrev[0], fwrev[1], fwrev[2], fwrev[3]); | |
253 | } | |
254 | ||
94a13649 SC |
255 | static ssize_t host_show_commands_outstanding(struct device *dev, |
256 | struct device_attribute *attr, char *buf) | |
257 | { | |
258 | struct Scsi_Host *shost = class_to_shost(dev); | |
259 | struct ctlr_info *h = shost_to_hba(shost); | |
260 | ||
261 | return snprintf(buf, 20, "%d\n", h->commands_outstanding); | |
262 | } | |
263 | ||
745a7a25 SC |
264 | static ssize_t host_show_transport_mode(struct device *dev, |
265 | struct device_attribute *attr, char *buf) | |
266 | { | |
267 | struct ctlr_info *h; | |
268 | struct Scsi_Host *shost = class_to_shost(dev); | |
269 | ||
270 | h = shost_to_hba(shost); | |
271 | return snprintf(buf, 20, "%s\n", | |
960a30e7 | 272 | h->transMethod & CFGTBL_Trans_Performant ? |
745a7a25 SC |
273 | "performant" : "simple"); |
274 | } | |
275 | ||
46380786 | 276 | /* List of controllers which cannot be hard reset on kexec with reset_devices */ |
941b1cda SC |
277 | static u32 unresettable_controller[] = { |
278 | 0x324a103C, /* Smart Array P712m */ | |
279 | 0x324b103C, /* SmartArray P711m */ | |
280 | 0x3223103C, /* Smart Array P800 */ | |
281 | 0x3234103C, /* Smart Array P400 */ | |
282 | 0x3235103C, /* Smart Array P400i */ | |
283 | 0x3211103C, /* Smart Array E200i */ | |
284 | 0x3212103C, /* Smart Array E200 */ | |
285 | 0x3213103C, /* Smart Array E200i */ | |
286 | 0x3214103C, /* Smart Array E200i */ | |
287 | 0x3215103C, /* Smart Array E200i */ | |
288 | 0x3237103C, /* Smart Array E500 */ | |
289 | 0x323D103C, /* Smart Array P700m */ | |
290 | 0x409C0E11, /* Smart Array 6400 */ | |
291 | 0x409D0E11, /* Smart Array 6400 EM */ | |
292 | }; | |
293 | ||
46380786 SC |
294 | /* List of controllers which cannot even be soft reset */ |
295 | static u32 soft_unresettable_controller[] = { | |
296 | /* Exclude 640x boards. These are two pci devices in one slot | |
297 | * which share a battery backed cache module. One controls the | |
298 | * cache, the other accesses the cache through the one that controls | |
299 | * it. If we reset the one controlling the cache, the other will | |
300 | * likely not be happy. Just forbid resetting this conjoined mess. | |
301 | * The 640x isn't really supported by hpsa anyway. | |
302 | */ | |
303 | 0x409C0E11, /* Smart Array 6400 */ | |
304 | 0x409D0E11, /* Smart Array 6400 EM */ | |
305 | }; | |
306 | ||
307 | static int ctlr_is_hard_resettable(u32 board_id) | |
941b1cda SC |
308 | { |
309 | int i; | |
310 | ||
311 | for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) | |
46380786 SC |
312 | if (unresettable_controller[i] == board_id) |
313 | return 0; | |
314 | return 1; | |
315 | } | |
316 | ||
317 | static int ctlr_is_soft_resettable(u32 board_id) | |
318 | { | |
319 | int i; | |
320 | ||
321 | for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) | |
322 | if (soft_unresettable_controller[i] == board_id) | |
941b1cda SC |
323 | return 0; |
324 | return 1; | |
325 | } | |
326 | ||
46380786 SC |
327 | static int ctlr_is_resettable(u32 board_id) |
328 | { | |
329 | return ctlr_is_hard_resettable(board_id) || | |
330 | ctlr_is_soft_resettable(board_id); | |
331 | } | |
332 | ||
941b1cda SC |
333 | static ssize_t host_show_resettable(struct device *dev, |
334 | struct device_attribute *attr, char *buf) | |
335 | { | |
336 | struct ctlr_info *h; | |
337 | struct Scsi_Host *shost = class_to_shost(dev); | |
338 | ||
339 | h = shost_to_hba(shost); | |
46380786 | 340 | return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); |
941b1cda SC |
341 | } |
342 | ||
edd16368 SC |
343 | static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) |
344 | { | |
345 | return (scsi3addr[3] & 0xC0) == 0x40; | |
346 | } | |
347 | ||
348 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | |
349 | "UNKNOWN" | |
350 | }; | |
351 | #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) | |
352 | ||
353 | static ssize_t raid_level_show(struct device *dev, | |
354 | struct device_attribute *attr, char *buf) | |
355 | { | |
356 | ssize_t l = 0; | |
82a72c0a | 357 | unsigned char rlevel; |
edd16368 SC |
358 | struct ctlr_info *h; |
359 | struct scsi_device *sdev; | |
360 | struct hpsa_scsi_dev_t *hdev; | |
361 | unsigned long flags; | |
362 | ||
363 | sdev = to_scsi_device(dev); | |
364 | h = sdev_to_hba(sdev); | |
365 | spin_lock_irqsave(&h->lock, flags); | |
366 | hdev = sdev->hostdata; | |
367 | if (!hdev) { | |
368 | spin_unlock_irqrestore(&h->lock, flags); | |
369 | return -ENODEV; | |
370 | } | |
371 | ||
372 | /* Is this even a logical drive? */ | |
373 | if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { | |
374 | spin_unlock_irqrestore(&h->lock, flags); | |
375 | l = snprintf(buf, PAGE_SIZE, "N/A\n"); | |
376 | return l; | |
377 | } | |
378 | ||
379 | rlevel = hdev->raid_level; | |
380 | spin_unlock_irqrestore(&h->lock, flags); | |
82a72c0a | 381 | if (rlevel > RAID_UNKNOWN) |
edd16368 SC |
382 | rlevel = RAID_UNKNOWN; |
383 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); | |
384 | return l; | |
385 | } | |
386 | ||
387 | static ssize_t lunid_show(struct device *dev, | |
388 | struct device_attribute *attr, char *buf) | |
389 | { | |
390 | struct ctlr_info *h; | |
391 | struct scsi_device *sdev; | |
392 | struct hpsa_scsi_dev_t *hdev; | |
393 | unsigned long flags; | |
394 | unsigned char lunid[8]; | |
395 | ||
396 | sdev = to_scsi_device(dev); | |
397 | h = sdev_to_hba(sdev); | |
398 | spin_lock_irqsave(&h->lock, flags); | |
399 | hdev = sdev->hostdata; | |
400 | if (!hdev) { | |
401 | spin_unlock_irqrestore(&h->lock, flags); | |
402 | return -ENODEV; | |
403 | } | |
404 | memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); | |
405 | spin_unlock_irqrestore(&h->lock, flags); | |
406 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | |
407 | lunid[0], lunid[1], lunid[2], lunid[3], | |
408 | lunid[4], lunid[5], lunid[6], lunid[7]); | |
409 | } | |
410 | ||
411 | static ssize_t unique_id_show(struct device *dev, | |
412 | struct device_attribute *attr, char *buf) | |
413 | { | |
414 | struct ctlr_info *h; | |
415 | struct scsi_device *sdev; | |
416 | struct hpsa_scsi_dev_t *hdev; | |
417 | unsigned long flags; | |
418 | unsigned char sn[16]; | |
419 | ||
420 | sdev = to_scsi_device(dev); | |
421 | h = sdev_to_hba(sdev); | |
422 | spin_lock_irqsave(&h->lock, flags); | |
423 | hdev = sdev->hostdata; | |
424 | if (!hdev) { | |
425 | spin_unlock_irqrestore(&h->lock, flags); | |
426 | return -ENODEV; | |
427 | } | |
428 | memcpy(sn, hdev->device_id, sizeof(sn)); | |
429 | spin_unlock_irqrestore(&h->lock, flags); | |
430 | return snprintf(buf, 16 * 2 + 2, | |
431 | "%02X%02X%02X%02X%02X%02X%02X%02X" | |
432 | "%02X%02X%02X%02X%02X%02X%02X%02X\n", | |
433 | sn[0], sn[1], sn[2], sn[3], | |
434 | sn[4], sn[5], sn[6], sn[7], | |
435 | sn[8], sn[9], sn[10], sn[11], | |
436 | sn[12], sn[13], sn[14], sn[15]); | |
437 | } | |
438 | ||
3f5eac3a SC |
439 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); |
440 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); | |
441 | static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); | |
442 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); | |
443 | static DEVICE_ATTR(firmware_revision, S_IRUGO, | |
444 | host_show_firmware_revision, NULL); | |
445 | static DEVICE_ATTR(commands_outstanding, S_IRUGO, | |
446 | host_show_commands_outstanding, NULL); | |
447 | static DEVICE_ATTR(transport_mode, S_IRUGO, | |
448 | host_show_transport_mode, NULL); | |
941b1cda SC |
449 | static DEVICE_ATTR(resettable, S_IRUGO, |
450 | host_show_resettable, NULL); | |
3f5eac3a SC |
451 | |
452 | static struct device_attribute *hpsa_sdev_attrs[] = { | |
453 | &dev_attr_raid_level, | |
454 | &dev_attr_lunid, | |
455 | &dev_attr_unique_id, | |
456 | NULL, | |
457 | }; | |
458 | ||
459 | static struct device_attribute *hpsa_shost_attrs[] = { | |
460 | &dev_attr_rescan, | |
461 | &dev_attr_firmware_revision, | |
462 | &dev_attr_commands_outstanding, | |
463 | &dev_attr_transport_mode, | |
941b1cda | 464 | &dev_attr_resettable, |
3f5eac3a SC |
465 | NULL, |
466 | }; | |
467 | ||
468 | static struct scsi_host_template hpsa_driver_template = { | |
469 | .module = THIS_MODULE, | |
470 | .name = "hpsa", | |
471 | .proc_name = "hpsa", | |
472 | .queuecommand = hpsa_scsi_queue_command, | |
473 | .scan_start = hpsa_scan_start, | |
474 | .scan_finished = hpsa_scan_finished, | |
475 | .change_queue_depth = hpsa_change_queue_depth, | |
476 | .this_id = -1, | |
477 | .use_clustering = ENABLE_CLUSTERING, | |
478 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, | |
479 | .ioctl = hpsa_ioctl, | |
480 | .slave_alloc = hpsa_slave_alloc, | |
481 | .slave_destroy = hpsa_slave_destroy, | |
482 | #ifdef CONFIG_COMPAT | |
483 | .compat_ioctl = hpsa_compat_ioctl, | |
484 | #endif | |
485 | .sdev_attrs = hpsa_sdev_attrs, | |
486 | .shost_attrs = hpsa_shost_attrs, | |
487 | }; | |
488 | ||
489 | ||
490 | /* Enqueuing and dequeuing functions for cmdlists. */ | |
491 | static inline void addQ(struct list_head *list, struct CommandList *c) | |
492 | { | |
493 | list_add_tail(&c->list, list); | |
494 | } | |
495 | ||
496 | static inline u32 next_command(struct ctlr_info *h) | |
497 | { | |
498 | u32 a; | |
499 | ||
500 | if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) | |
501 | return h->access.command_completed(h); | |
502 | ||
503 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | |
504 | a = *(h->reply_pool_head); /* Next cmd in ring buffer */ | |
505 | (h->reply_pool_head)++; | |
506 | h->commands_outstanding--; | |
507 | } else { | |
508 | a = FIFO_EMPTY; | |
509 | } | |
510 | /* Check for wraparound */ | |
511 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | |
512 | h->reply_pool_head = h->reply_pool; | |
513 | h->reply_pool_wraparound ^= 1; | |
514 | } | |
515 | return a; | |
516 | } | |
517 | ||
518 | /* set_performant_mode: Modify the tag for cciss performant | |
519 | * set bit 0 for pull model, bits 3-1 for block fetch | |
520 | * register number | |
521 | */ | |
522 | static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) | |
523 | { | |
524 | if (likely(h->transMethod & CFGTBL_Trans_Performant)) | |
525 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | |
526 | } | |
527 | ||
528 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, | |
529 | struct CommandList *c) | |
530 | { | |
531 | unsigned long flags; | |
532 | ||
533 | set_performant_mode(h, c); | |
534 | spin_lock_irqsave(&h->lock, flags); | |
535 | addQ(&h->reqQ, c); | |
536 | h->Qdepth++; | |
537 | start_io(h); | |
538 | spin_unlock_irqrestore(&h->lock, flags); | |
539 | } | |
540 | ||
541 | static inline void removeQ(struct CommandList *c) | |
542 | { | |
543 | if (WARN_ON(list_empty(&c->list))) | |
544 | return; | |
545 | list_del_init(&c->list); | |
546 | } | |
547 | ||
548 | static inline int is_hba_lunid(unsigned char scsi3addr[]) | |
549 | { | |
550 | return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; | |
551 | } | |
552 | ||
553 | static inline int is_scsi_rev_5(struct ctlr_info *h) | |
554 | { | |
555 | if (!h->hba_inquiry_data) | |
556 | return 0; | |
557 | if ((h->hba_inquiry_data[2] & 0x07) == 5) | |
558 | return 1; | |
559 | return 0; | |
560 | } | |
561 | ||
edd16368 SC |
562 | static int hpsa_find_target_lun(struct ctlr_info *h, |
563 | unsigned char scsi3addr[], int bus, int *target, int *lun) | |
564 | { | |
565 | /* finds an unused bus, target, lun for a new physical device | |
566 | * assumes h->devlock is held | |
567 | */ | |
568 | int i, found = 0; | |
569 | DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); | |
570 | ||
571 | memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); | |
572 | ||
573 | for (i = 0; i < h->ndevices; i++) { | |
574 | if (h->dev[i]->bus == bus && h->dev[i]->target != -1) | |
575 | set_bit(h->dev[i]->target, lun_taken); | |
576 | } | |
577 | ||
578 | for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { | |
579 | if (!test_bit(i, lun_taken)) { | |
580 | /* *bus = 1; */ | |
581 | *target = i; | |
582 | *lun = 0; | |
583 | found = 1; | |
584 | break; | |
585 | } | |
586 | } | |
587 | return !found; | |
588 | } | |
589 | ||
590 | /* Add an entry into h->dev[] array. */ | |
591 | static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, | |
592 | struct hpsa_scsi_dev_t *device, | |
593 | struct hpsa_scsi_dev_t *added[], int *nadded) | |
594 | { | |
595 | /* assumes h->devlock is held */ | |
596 | int n = h->ndevices; | |
597 | int i; | |
598 | unsigned char addr1[8], addr2[8]; | |
599 | struct hpsa_scsi_dev_t *sd; | |
600 | ||
601 | if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { | |
602 | dev_err(&h->pdev->dev, "too many devices, some will be " | |
603 | "inaccessible.\n"); | |
604 | return -1; | |
605 | } | |
606 | ||
607 | /* physical devices do not have lun or target assigned until now. */ | |
608 | if (device->lun != -1) | |
609 | /* Logical device, lun is already assigned. */ | |
610 | goto lun_assigned; | |
611 | ||
612 | /* If this device a non-zero lun of a multi-lun device | |
613 | * byte 4 of the 8-byte LUN addr will contain the logical | |
614 | * unit no, zero otherise. | |
615 | */ | |
616 | if (device->scsi3addr[4] == 0) { | |
617 | /* This is not a non-zero lun of a multi-lun device */ | |
618 | if (hpsa_find_target_lun(h, device->scsi3addr, | |
619 | device->bus, &device->target, &device->lun) != 0) | |
620 | return -1; | |
621 | goto lun_assigned; | |
622 | } | |
623 | ||
624 | /* This is a non-zero lun of a multi-lun device. | |
625 | * Search through our list and find the device which | |
626 | * has the same 8 byte LUN address, excepting byte 4. | |
627 | * Assign the same bus and target for this new LUN. | |
628 | * Use the logical unit number from the firmware. | |
629 | */ | |
630 | memcpy(addr1, device->scsi3addr, 8); | |
631 | addr1[4] = 0; | |
632 | for (i = 0; i < n; i++) { | |
633 | sd = h->dev[i]; | |
634 | memcpy(addr2, sd->scsi3addr, 8); | |
635 | addr2[4] = 0; | |
636 | /* differ only in byte 4? */ | |
637 | if (memcmp(addr1, addr2, 8) == 0) { | |
638 | device->bus = sd->bus; | |
639 | device->target = sd->target; | |
640 | device->lun = device->scsi3addr[4]; | |
641 | break; | |
642 | } | |
643 | } | |
644 | if (device->lun == -1) { | |
645 | dev_warn(&h->pdev->dev, "physical device with no LUN=0," | |
646 | " suspect firmware bug or unsupported hardware " | |
647 | "configuration.\n"); | |
648 | return -1; | |
649 | } | |
650 | ||
651 | lun_assigned: | |
652 | ||
653 | h->dev[n] = device; | |
654 | h->ndevices++; | |
655 | added[*nadded] = device; | |
656 | (*nadded)++; | |
657 | ||
658 | /* initially, (before registering with scsi layer) we don't | |
659 | * know our hostno and we don't want to print anything first | |
660 | * time anyway (the scsi layer's inquiries will show that info) | |
661 | */ | |
662 | /* if (hostno != -1) */ | |
663 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", | |
664 | scsi_device_type(device->devtype), hostno, | |
665 | device->bus, device->target, device->lun); | |
666 | return 0; | |
667 | } | |
668 | ||
2a8ccf31 SC |
669 | /* Replace an entry from h->dev[] array. */ |
670 | static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, | |
671 | int entry, struct hpsa_scsi_dev_t *new_entry, | |
672 | struct hpsa_scsi_dev_t *added[], int *nadded, | |
673 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | |
674 | { | |
675 | /* assumes h->devlock is held */ | |
676 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | |
677 | removed[*nremoved] = h->dev[entry]; | |
678 | (*nremoved)++; | |
679 | h->dev[entry] = new_entry; | |
680 | added[*nadded] = new_entry; | |
681 | (*nadded)++; | |
682 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", | |
683 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, | |
684 | new_entry->target, new_entry->lun); | |
685 | } | |
686 | ||
edd16368 SC |
687 | /* Remove an entry from h->dev[] array. */ |
688 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | |
689 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | |
690 | { | |
691 | /* assumes h->devlock is held */ | |
692 | int i; | |
693 | struct hpsa_scsi_dev_t *sd; | |
694 | ||
b2ed4f79 | 695 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); |
edd16368 SC |
696 | |
697 | sd = h->dev[entry]; | |
698 | removed[*nremoved] = h->dev[entry]; | |
699 | (*nremoved)++; | |
700 | ||
701 | for (i = entry; i < h->ndevices-1; i++) | |
702 | h->dev[i] = h->dev[i+1]; | |
703 | h->ndevices--; | |
704 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", | |
705 | scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, | |
706 | sd->lun); | |
707 | } | |
708 | ||
709 | #define SCSI3ADDR_EQ(a, b) ( \ | |
710 | (a)[7] == (b)[7] && \ | |
711 | (a)[6] == (b)[6] && \ | |
712 | (a)[5] == (b)[5] && \ | |
713 | (a)[4] == (b)[4] && \ | |
714 | (a)[3] == (b)[3] && \ | |
715 | (a)[2] == (b)[2] && \ | |
716 | (a)[1] == (b)[1] && \ | |
717 | (a)[0] == (b)[0]) | |
718 | ||
719 | static void fixup_botched_add(struct ctlr_info *h, | |
720 | struct hpsa_scsi_dev_t *added) | |
721 | { | |
722 | /* called when scsi_add_device fails in order to re-adjust | |
723 | * h->dev[] to match the mid layer's view. | |
724 | */ | |
725 | unsigned long flags; | |
726 | int i, j; | |
727 | ||
728 | spin_lock_irqsave(&h->lock, flags); | |
729 | for (i = 0; i < h->ndevices; i++) { | |
730 | if (h->dev[i] == added) { | |
731 | for (j = i; j < h->ndevices-1; j++) | |
732 | h->dev[j] = h->dev[j+1]; | |
733 | h->ndevices--; | |
734 | break; | |
735 | } | |
736 | } | |
737 | spin_unlock_irqrestore(&h->lock, flags); | |
738 | kfree(added); | |
739 | } | |
740 | ||
741 | static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, | |
742 | struct hpsa_scsi_dev_t *dev2) | |
743 | { | |
edd16368 SC |
744 | /* we compare everything except lun and target as these |
745 | * are not yet assigned. Compare parts likely | |
746 | * to differ first | |
747 | */ | |
748 | if (memcmp(dev1->scsi3addr, dev2->scsi3addr, | |
749 | sizeof(dev1->scsi3addr)) != 0) | |
750 | return 0; | |
751 | if (memcmp(dev1->device_id, dev2->device_id, | |
752 | sizeof(dev1->device_id)) != 0) | |
753 | return 0; | |
754 | if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) | |
755 | return 0; | |
756 | if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) | |
757 | return 0; | |
edd16368 SC |
758 | if (dev1->devtype != dev2->devtype) |
759 | return 0; | |
edd16368 SC |
760 | if (dev1->bus != dev2->bus) |
761 | return 0; | |
762 | return 1; | |
763 | } | |
764 | ||
765 | /* Find needle in haystack. If exact match found, return DEVICE_SAME, | |
766 | * and return needle location in *index. If scsi3addr matches, but not | |
767 | * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle | |
768 | * location in *index. If needle not found, return DEVICE_NOT_FOUND. | |
769 | */ | |
770 | static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, | |
771 | struct hpsa_scsi_dev_t *haystack[], int haystack_size, | |
772 | int *index) | |
773 | { | |
774 | int i; | |
775 | #define DEVICE_NOT_FOUND 0 | |
776 | #define DEVICE_CHANGED 1 | |
777 | #define DEVICE_SAME 2 | |
778 | for (i = 0; i < haystack_size; i++) { | |
23231048 SC |
779 | if (haystack[i] == NULL) /* previously removed. */ |
780 | continue; | |
edd16368 SC |
781 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
782 | *index = i; | |
783 | if (device_is_the_same(needle, haystack[i])) | |
784 | return DEVICE_SAME; | |
785 | else | |
786 | return DEVICE_CHANGED; | |
787 | } | |
788 | } | |
789 | *index = -1; | |
790 | return DEVICE_NOT_FOUND; | |
791 | } | |
792 | ||
4967bd3e | 793 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, |
edd16368 SC |
794 | struct hpsa_scsi_dev_t *sd[], int nsds) |
795 | { | |
796 | /* sd contains scsi3 addresses and devtypes, and inquiry | |
797 | * data. This function takes what's in sd to be the current | |
798 | * reality and updates h->dev[] to reflect that reality. | |
799 | */ | |
800 | int i, entry, device_change, changes = 0; | |
801 | struct hpsa_scsi_dev_t *csd; | |
802 | unsigned long flags; | |
803 | struct hpsa_scsi_dev_t **added, **removed; | |
804 | int nadded, nremoved; | |
805 | struct Scsi_Host *sh = NULL; | |
806 | ||
807 | added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, | |
808 | GFP_KERNEL); | |
809 | removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, | |
810 | GFP_KERNEL); | |
811 | ||
812 | if (!added || !removed) { | |
813 | dev_warn(&h->pdev->dev, "out of memory in " | |
814 | "adjust_hpsa_scsi_table\n"); | |
815 | goto free_and_out; | |
816 | } | |
817 | ||
818 | spin_lock_irqsave(&h->devlock, flags); | |
819 | ||
820 | /* find any devices in h->dev[] that are not in | |
821 | * sd[] and remove them from h->dev[], and for any | |
822 | * devices which have changed, remove the old device | |
823 | * info and add the new device info. | |
824 | */ | |
825 | i = 0; | |
826 | nremoved = 0; | |
827 | nadded = 0; | |
828 | while (i < h->ndevices) { | |
829 | csd = h->dev[i]; | |
830 | device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); | |
831 | if (device_change == DEVICE_NOT_FOUND) { | |
832 | changes++; | |
833 | hpsa_scsi_remove_entry(h, hostno, i, | |
834 | removed, &nremoved); | |
835 | continue; /* remove ^^^, hence i not incremented */ | |
836 | } else if (device_change == DEVICE_CHANGED) { | |
837 | changes++; | |
2a8ccf31 SC |
838 | hpsa_scsi_replace_entry(h, hostno, i, sd[entry], |
839 | added, &nadded, removed, &nremoved); | |
c7f172dc SC |
840 | /* Set it to NULL to prevent it from being freed |
841 | * at the bottom of hpsa_update_scsi_devices() | |
842 | */ | |
843 | sd[entry] = NULL; | |
edd16368 SC |
844 | } |
845 | i++; | |
846 | } | |
847 | ||
848 | /* Now, make sure every device listed in sd[] is also | |
849 | * listed in h->dev[], adding them if they aren't found | |
850 | */ | |
851 | ||
852 | for (i = 0; i < nsds; i++) { | |
853 | if (!sd[i]) /* if already added above. */ | |
854 | continue; | |
855 | device_change = hpsa_scsi_find_entry(sd[i], h->dev, | |
856 | h->ndevices, &entry); | |
857 | if (device_change == DEVICE_NOT_FOUND) { | |
858 | changes++; | |
859 | if (hpsa_scsi_add_entry(h, hostno, sd[i], | |
860 | added, &nadded) != 0) | |
861 | break; | |
862 | sd[i] = NULL; /* prevent from being freed later. */ | |
863 | } else if (device_change == DEVICE_CHANGED) { | |
864 | /* should never happen... */ | |
865 | changes++; | |
866 | dev_warn(&h->pdev->dev, | |
867 | "device unexpectedly changed.\n"); | |
868 | /* but if it does happen, we just ignore that device */ | |
869 | } | |
870 | } | |
871 | spin_unlock_irqrestore(&h->devlock, flags); | |
872 | ||
873 | /* Don't notify scsi mid layer of any changes the first time through | |
874 | * (or if there are no changes) scsi_scan_host will do it later the | |
875 | * first time through. | |
876 | */ | |
877 | if (hostno == -1 || !changes) | |
878 | goto free_and_out; | |
879 | ||
880 | sh = h->scsi_host; | |
881 | /* Notify scsi mid layer of any removed devices */ | |
882 | for (i = 0; i < nremoved; i++) { | |
883 | struct scsi_device *sdev = | |
884 | scsi_device_lookup(sh, removed[i]->bus, | |
885 | removed[i]->target, removed[i]->lun); | |
886 | if (sdev != NULL) { | |
887 | scsi_remove_device(sdev); | |
888 | scsi_device_put(sdev); | |
889 | } else { | |
890 | /* We don't expect to get here. | |
891 | * future cmds to this device will get selection | |
892 | * timeout as if the device was gone. | |
893 | */ | |
894 | dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " | |
895 | " for removal.", hostno, removed[i]->bus, | |
896 | removed[i]->target, removed[i]->lun); | |
897 | } | |
898 | kfree(removed[i]); | |
899 | removed[i] = NULL; | |
900 | } | |
901 | ||
902 | /* Notify scsi mid layer of any added devices */ | |
903 | for (i = 0; i < nadded; i++) { | |
904 | if (scsi_add_device(sh, added[i]->bus, | |
905 | added[i]->target, added[i]->lun) == 0) | |
906 | continue; | |
907 | dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " | |
908 | "device not added.\n", hostno, added[i]->bus, | |
909 | added[i]->target, added[i]->lun); | |
910 | /* now we have to remove it from h->dev, | |
911 | * since it didn't get added to scsi mid layer | |
912 | */ | |
913 | fixup_botched_add(h, added[i]); | |
914 | } | |
915 | ||
916 | free_and_out: | |
917 | kfree(added); | |
918 | kfree(removed); | |
edd16368 SC |
919 | } |
920 | ||
921 | /* | |
922 | * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * | |
923 | * Assume's h->devlock is held. | |
924 | */ | |
925 | static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, | |
926 | int bus, int target, int lun) | |
927 | { | |
928 | int i; | |
929 | struct hpsa_scsi_dev_t *sd; | |
930 | ||
931 | for (i = 0; i < h->ndevices; i++) { | |
932 | sd = h->dev[i]; | |
933 | if (sd->bus == bus && sd->target == target && sd->lun == lun) | |
934 | return sd; | |
935 | } | |
936 | return NULL; | |
937 | } | |
938 | ||
939 | /* link sdev->hostdata to our per-device structure. */ | |
940 | static int hpsa_slave_alloc(struct scsi_device *sdev) | |
941 | { | |
942 | struct hpsa_scsi_dev_t *sd; | |
943 | unsigned long flags; | |
944 | struct ctlr_info *h; | |
945 | ||
946 | h = sdev_to_hba(sdev); | |
947 | spin_lock_irqsave(&h->devlock, flags); | |
948 | sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), | |
949 | sdev_id(sdev), sdev->lun); | |
950 | if (sd != NULL) | |
951 | sdev->hostdata = sd; | |
952 | spin_unlock_irqrestore(&h->devlock, flags); | |
953 | return 0; | |
954 | } | |
955 | ||
956 | static void hpsa_slave_destroy(struct scsi_device *sdev) | |
957 | { | |
bcc44255 | 958 | /* nothing to do. */ |
edd16368 SC |
959 | } |
960 | ||
33a2ffce SC |
961 | static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) |
962 | { | |
963 | int i; | |
964 | ||
965 | if (!h->cmd_sg_list) | |
966 | return; | |
967 | for (i = 0; i < h->nr_cmds; i++) { | |
968 | kfree(h->cmd_sg_list[i]); | |
969 | h->cmd_sg_list[i] = NULL; | |
970 | } | |
971 | kfree(h->cmd_sg_list); | |
972 | h->cmd_sg_list = NULL; | |
973 | } | |
974 | ||
975 | static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h) | |
976 | { | |
977 | int i; | |
978 | ||
979 | if (h->chainsize <= 0) | |
980 | return 0; | |
981 | ||
982 | h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds, | |
983 | GFP_KERNEL); | |
984 | if (!h->cmd_sg_list) | |
985 | return -ENOMEM; | |
986 | for (i = 0; i < h->nr_cmds; i++) { | |
987 | h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) * | |
988 | h->chainsize, GFP_KERNEL); | |
989 | if (!h->cmd_sg_list[i]) | |
990 | goto clean; | |
991 | } | |
992 | return 0; | |
993 | ||
994 | clean: | |
995 | hpsa_free_sg_chain_blocks(h); | |
996 | return -ENOMEM; | |
997 | } | |
998 | ||
999 | static void hpsa_map_sg_chain_block(struct ctlr_info *h, | |
1000 | struct CommandList *c) | |
1001 | { | |
1002 | struct SGDescriptor *chain_sg, *chain_block; | |
1003 | u64 temp64; | |
1004 | ||
1005 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; | |
1006 | chain_block = h->cmd_sg_list[c->cmdindex]; | |
1007 | chain_sg->Ext = HPSA_SG_CHAIN; | |
1008 | chain_sg->Len = sizeof(*chain_sg) * | |
1009 | (c->Header.SGTotal - h->max_cmd_sg_entries); | |
1010 | temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len, | |
1011 | PCI_DMA_TODEVICE); | |
1012 | chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL); | |
1013 | chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL); | |
1014 | } | |
1015 | ||
1016 | static void hpsa_unmap_sg_chain_block(struct ctlr_info *h, | |
1017 | struct CommandList *c) | |
1018 | { | |
1019 | struct SGDescriptor *chain_sg; | |
1020 | union u64bit temp64; | |
1021 | ||
1022 | if (c->Header.SGTotal <= h->max_cmd_sg_entries) | |
1023 | return; | |
1024 | ||
1025 | chain_sg = &c->SG[h->max_cmd_sg_entries - 1]; | |
1026 | temp64.val32.lower = chain_sg->Addr.lower; | |
1027 | temp64.val32.upper = chain_sg->Addr.upper; | |
1028 | pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); | |
1029 | } | |
1030 | ||
1fb011fb | 1031 | static void complete_scsi_command(struct CommandList *cp) |
edd16368 SC |
1032 | { |
1033 | struct scsi_cmnd *cmd; | |
1034 | struct ctlr_info *h; | |
1035 | struct ErrorInfo *ei; | |
1036 | ||
1037 | unsigned char sense_key; | |
1038 | unsigned char asc; /* additional sense code */ | |
1039 | unsigned char ascq; /* additional sense code qualifier */ | |
db111e18 | 1040 | unsigned long sense_data_size; |
edd16368 SC |
1041 | |
1042 | ei = cp->err_info; | |
1043 | cmd = (struct scsi_cmnd *) cp->scsi_cmd; | |
1044 | h = cp->h; | |
1045 | ||
1046 | scsi_dma_unmap(cmd); /* undo the DMA mappings */ | |
33a2ffce SC |
1047 | if (cp->Header.SGTotal > h->max_cmd_sg_entries) |
1048 | hpsa_unmap_sg_chain_block(h, cp); | |
edd16368 SC |
1049 | |
1050 | cmd->result = (DID_OK << 16); /* host byte */ | |
1051 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | |
5512672f | 1052 | cmd->result |= ei->ScsiStatus; |
edd16368 SC |
1053 | |
1054 | /* copy the sense data whether we need to or not. */ | |
db111e18 SC |
1055 | if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo)) |
1056 | sense_data_size = SCSI_SENSE_BUFFERSIZE; | |
1057 | else | |
1058 | sense_data_size = sizeof(ei->SenseInfo); | |
1059 | if (ei->SenseLen < sense_data_size) | |
1060 | sense_data_size = ei->SenseLen; | |
1061 | ||
1062 | memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size); | |
edd16368 SC |
1063 | scsi_set_resid(cmd, ei->ResidualCnt); |
1064 | ||
1065 | if (ei->CommandStatus == 0) { | |
1066 | cmd->scsi_done(cmd); | |
1067 | cmd_free(h, cp); | |
1068 | return; | |
1069 | } | |
1070 | ||
1071 | /* an error has occurred */ | |
1072 | switch (ei->CommandStatus) { | |
1073 | ||
1074 | case CMD_TARGET_STATUS: | |
1075 | if (ei->ScsiStatus) { | |
1076 | /* Get sense key */ | |
1077 | sense_key = 0xf & ei->SenseInfo[2]; | |
1078 | /* Get additional sense code */ | |
1079 | asc = ei->SenseInfo[12]; | |
1080 | /* Get addition sense code qualifier */ | |
1081 | ascq = ei->SenseInfo[13]; | |
1082 | } | |
1083 | ||
1084 | if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { | |
1085 | if (check_for_unit_attention(h, cp)) { | |
1086 | cmd->result = DID_SOFT_ERROR << 16; | |
1087 | break; | |
1088 | } | |
1089 | if (sense_key == ILLEGAL_REQUEST) { | |
1090 | /* | |
1091 | * SCSI REPORT_LUNS is commonly unsupported on | |
1092 | * Smart Array. Suppress noisy complaint. | |
1093 | */ | |
1094 | if (cp->Request.CDB[0] == REPORT_LUNS) | |
1095 | break; | |
1096 | ||
1097 | /* If ASC/ASCQ indicate Logical Unit | |
1098 | * Not Supported condition, | |
1099 | */ | |
1100 | if ((asc == 0x25) && (ascq == 0x0)) { | |
1101 | dev_warn(&h->pdev->dev, "cp %p " | |
1102 | "has check condition\n", cp); | |
1103 | break; | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | if (sense_key == NOT_READY) { | |
1108 | /* If Sense is Not Ready, Logical Unit | |
1109 | * Not ready, Manual Intervention | |
1110 | * required | |
1111 | */ | |
1112 | if ((asc == 0x04) && (ascq == 0x03)) { | |
edd16368 SC |
1113 | dev_warn(&h->pdev->dev, "cp %p " |
1114 | "has check condition: unit " | |
1115 | "not ready, manual " | |
1116 | "intervention required\n", cp); | |
1117 | break; | |
1118 | } | |
1119 | } | |
1d3b3609 MG |
1120 | if (sense_key == ABORTED_COMMAND) { |
1121 | /* Aborted command is retryable */ | |
1122 | dev_warn(&h->pdev->dev, "cp %p " | |
1123 | "has check condition: aborted command: " | |
1124 | "ASC: 0x%x, ASCQ: 0x%x\n", | |
1125 | cp, asc, ascq); | |
1126 | cmd->result = DID_SOFT_ERROR << 16; | |
1127 | break; | |
1128 | } | |
edd16368 SC |
1129 | /* Must be some other type of check condition */ |
1130 | dev_warn(&h->pdev->dev, "cp %p has check condition: " | |
1131 | "unknown type: " | |
1132 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " | |
1133 | "Returning result: 0x%x, " | |
1134 | "cmd=[%02x %02x %02x %02x %02x " | |
807be732 | 1135 | "%02x %02x %02x %02x %02x %02x " |
edd16368 SC |
1136 | "%02x %02x %02x %02x %02x]\n", |
1137 | cp, sense_key, asc, ascq, | |
1138 | cmd->result, | |
1139 | cmd->cmnd[0], cmd->cmnd[1], | |
1140 | cmd->cmnd[2], cmd->cmnd[3], | |
1141 | cmd->cmnd[4], cmd->cmnd[5], | |
1142 | cmd->cmnd[6], cmd->cmnd[7], | |
807be732 MM |
1143 | cmd->cmnd[8], cmd->cmnd[9], |
1144 | cmd->cmnd[10], cmd->cmnd[11], | |
1145 | cmd->cmnd[12], cmd->cmnd[13], | |
1146 | cmd->cmnd[14], cmd->cmnd[15]); | |
edd16368 SC |
1147 | break; |
1148 | } | |
1149 | ||
1150 | ||
1151 | /* Problem was not a check condition | |
1152 | * Pass it up to the upper layers... | |
1153 | */ | |
1154 | if (ei->ScsiStatus) { | |
1155 | dev_warn(&h->pdev->dev, "cp %p has status 0x%x " | |
1156 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " | |
1157 | "Returning result: 0x%x\n", | |
1158 | cp, ei->ScsiStatus, | |
1159 | sense_key, asc, ascq, | |
1160 | cmd->result); | |
1161 | } else { /* scsi status is zero??? How??? */ | |
1162 | dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " | |
1163 | "Returning no connection.\n", cp), | |
1164 | ||
1165 | /* Ordinarily, this case should never happen, | |
1166 | * but there is a bug in some released firmware | |
1167 | * revisions that allows it to happen if, for | |
1168 | * example, a 4100 backplane loses power and | |
1169 | * the tape drive is in it. We assume that | |
1170 | * it's a fatal error of some kind because we | |
1171 | * can't show that it wasn't. We will make it | |
1172 | * look like selection timeout since that is | |
1173 | * the most common reason for this to occur, | |
1174 | * and it's severe enough. | |
1175 | */ | |
1176 | ||
1177 | cmd->result = DID_NO_CONNECT << 16; | |
1178 | } | |
1179 | break; | |
1180 | ||
1181 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | |
1182 | break; | |
1183 | case CMD_DATA_OVERRUN: | |
1184 | dev_warn(&h->pdev->dev, "cp %p has" | |
1185 | " completed with data overrun " | |
1186 | "reported\n", cp); | |
1187 | break; | |
1188 | case CMD_INVALID: { | |
1189 | /* print_bytes(cp, sizeof(*cp), 1, 0); | |
1190 | print_cmd(cp); */ | |
1191 | /* We get CMD_INVALID if you address a non-existent device | |
1192 | * instead of a selection timeout (no response). You will | |
1193 | * see this if you yank out a drive, then try to access it. | |
1194 | * This is kind of a shame because it means that any other | |
1195 | * CMD_INVALID (e.g. driver bug) will get interpreted as a | |
1196 | * missing target. */ | |
1197 | cmd->result = DID_NO_CONNECT << 16; | |
1198 | } | |
1199 | break; | |
1200 | case CMD_PROTOCOL_ERR: | |
1201 | dev_warn(&h->pdev->dev, "cp %p has " | |
1202 | "protocol error \n", cp); | |
1203 | break; | |
1204 | case CMD_HARDWARE_ERR: | |
1205 | cmd->result = DID_ERROR << 16; | |
1206 | dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); | |
1207 | break; | |
1208 | case CMD_CONNECTION_LOST: | |
1209 | cmd->result = DID_ERROR << 16; | |
1210 | dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); | |
1211 | break; | |
1212 | case CMD_ABORTED: | |
1213 | cmd->result = DID_ABORT << 16; | |
1214 | dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", | |
1215 | cp, ei->ScsiStatus); | |
1216 | break; | |
1217 | case CMD_ABORT_FAILED: | |
1218 | cmd->result = DID_ERROR << 16; | |
1219 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); | |
1220 | break; | |
1221 | case CMD_UNSOLICITED_ABORT: | |
5f0325ab | 1222 | cmd->result = DID_RESET << 16; |
edd16368 SC |
1223 | dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " |
1224 | "abort\n", cp); | |
1225 | break; | |
1226 | case CMD_TIMEOUT: | |
1227 | cmd->result = DID_TIME_OUT << 16; | |
1228 | dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); | |
1229 | break; | |
1d5e2ed0 SC |
1230 | case CMD_UNABORTABLE: |
1231 | cmd->result = DID_ERROR << 16; | |
1232 | dev_warn(&h->pdev->dev, "Command unabortable\n"); | |
1233 | break; | |
edd16368 SC |
1234 | default: |
1235 | cmd->result = DID_ERROR << 16; | |
1236 | dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", | |
1237 | cp, ei->CommandStatus); | |
1238 | } | |
1239 | cmd->scsi_done(cmd); | |
1240 | cmd_free(h, cp); | |
1241 | } | |
1242 | ||
1243 | static int hpsa_scsi_detect(struct ctlr_info *h) | |
1244 | { | |
1245 | struct Scsi_Host *sh; | |
1246 | int error; | |
1247 | ||
1248 | sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); | |
1249 | if (sh == NULL) | |
1250 | goto fail; | |
1251 | ||
1252 | sh->io_port = 0; | |
1253 | sh->n_io_port = 0; | |
1254 | sh->this_id = -1; | |
1255 | sh->max_channel = 3; | |
1256 | sh->max_cmd_len = MAX_COMMAND_SIZE; | |
1257 | sh->max_lun = HPSA_MAX_LUN; | |
1258 | sh->max_id = HPSA_MAX_LUN; | |
303932fd DB |
1259 | sh->can_queue = h->nr_cmds; |
1260 | sh->cmd_per_lun = h->nr_cmds; | |
33a2ffce | 1261 | sh->sg_tablesize = h->maxsgentries; |
edd16368 SC |
1262 | h->scsi_host = sh; |
1263 | sh->hostdata[0] = (unsigned long) h; | |
a9a3a273 | 1264 | sh->irq = h->intr[h->intr_mode]; |
edd16368 SC |
1265 | sh->unique_id = sh->irq; |
1266 | error = scsi_add_host(sh, &h->pdev->dev); | |
1267 | if (error) | |
1268 | goto fail_host_put; | |
1269 | scsi_scan_host(sh); | |
1270 | return 0; | |
1271 | ||
1272 | fail_host_put: | |
1273 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" | |
1274 | " failed for controller %d\n", h->ctlr); | |
1275 | scsi_host_put(sh); | |
ecd9aad4 | 1276 | return error; |
edd16368 SC |
1277 | fail: |
1278 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" | |
1279 | " failed for controller %d\n", h->ctlr); | |
ecd9aad4 | 1280 | return -ENOMEM; |
edd16368 SC |
1281 | } |
1282 | ||
1283 | static void hpsa_pci_unmap(struct pci_dev *pdev, | |
1284 | struct CommandList *c, int sg_used, int data_direction) | |
1285 | { | |
1286 | int i; | |
1287 | union u64bit addr64; | |
1288 | ||
1289 | for (i = 0; i < sg_used; i++) { | |
1290 | addr64.val32.lower = c->SG[i].Addr.lower; | |
1291 | addr64.val32.upper = c->SG[i].Addr.upper; | |
1292 | pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, | |
1293 | data_direction); | |
1294 | } | |
1295 | } | |
1296 | ||
1297 | static void hpsa_map_one(struct pci_dev *pdev, | |
1298 | struct CommandList *cp, | |
1299 | unsigned char *buf, | |
1300 | size_t buflen, | |
1301 | int data_direction) | |
1302 | { | |
01a02ffc | 1303 | u64 addr64; |
edd16368 SC |
1304 | |
1305 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { | |
1306 | cp->Header.SGList = 0; | |
1307 | cp->Header.SGTotal = 0; | |
1308 | return; | |
1309 | } | |
1310 | ||
01a02ffc | 1311 | addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); |
edd16368 | 1312 | cp->SG[0].Addr.lower = |
01a02ffc | 1313 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
edd16368 | 1314 | cp->SG[0].Addr.upper = |
01a02ffc | 1315 | (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); |
edd16368 | 1316 | cp->SG[0].Len = buflen; |
01a02ffc SC |
1317 | cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ |
1318 | cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ | |
edd16368 SC |
1319 | } |
1320 | ||
1321 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | |
1322 | struct CommandList *c) | |
1323 | { | |
1324 | DECLARE_COMPLETION_ONSTACK(wait); | |
1325 | ||
1326 | c->waiting = &wait; | |
1327 | enqueue_cmd_and_start_io(h, c); | |
1328 | wait_for_completion(&wait); | |
1329 | } | |
1330 | ||
1331 | static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, | |
1332 | struct CommandList *c, int data_direction) | |
1333 | { | |
1334 | int retry_count = 0; | |
1335 | ||
1336 | do { | |
7630abd0 | 1337 | memset(c->err_info, 0, sizeof(*c->err_info)); |
edd16368 SC |
1338 | hpsa_scsi_do_simple_cmd_core(h, c); |
1339 | retry_count++; | |
1340 | } while (check_for_unit_attention(h, c) && retry_count <= 3); | |
1341 | hpsa_pci_unmap(h->pdev, c, 1, data_direction); | |
1342 | } | |
1343 | ||
1344 | static void hpsa_scsi_interpret_error(struct CommandList *cp) | |
1345 | { | |
1346 | struct ErrorInfo *ei; | |
1347 | struct device *d = &cp->h->pdev->dev; | |
1348 | ||
1349 | ei = cp->err_info; | |
1350 | switch (ei->CommandStatus) { | |
1351 | case CMD_TARGET_STATUS: | |
1352 | dev_warn(d, "cmd %p has completed with errors\n", cp); | |
1353 | dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, | |
1354 | ei->ScsiStatus); | |
1355 | if (ei->ScsiStatus == 0) | |
1356 | dev_warn(d, "SCSI status is abnormally zero. " | |
1357 | "(probably indicates selection timeout " | |
1358 | "reported incorrectly due to a known " | |
1359 | "firmware bug, circa July, 2001.)\n"); | |
1360 | break; | |
1361 | case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ | |
1362 | dev_info(d, "UNDERRUN\n"); | |
1363 | break; | |
1364 | case CMD_DATA_OVERRUN: | |
1365 | dev_warn(d, "cp %p has completed with data overrun\n", cp); | |
1366 | break; | |
1367 | case CMD_INVALID: { | |
1368 | /* controller unfortunately reports SCSI passthru's | |
1369 | * to non-existent targets as invalid commands. | |
1370 | */ | |
1371 | dev_warn(d, "cp %p is reported invalid (probably means " | |
1372 | "target device no longer present)\n", cp); | |
1373 | /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); | |
1374 | print_cmd(cp); */ | |
1375 | } | |
1376 | break; | |
1377 | case CMD_PROTOCOL_ERR: | |
1378 | dev_warn(d, "cp %p has protocol error \n", cp); | |
1379 | break; | |
1380 | case CMD_HARDWARE_ERR: | |
1381 | /* cmd->result = DID_ERROR << 16; */ | |
1382 | dev_warn(d, "cp %p had hardware error\n", cp); | |
1383 | break; | |
1384 | case CMD_CONNECTION_LOST: | |
1385 | dev_warn(d, "cp %p had connection lost\n", cp); | |
1386 | break; | |
1387 | case CMD_ABORTED: | |
1388 | dev_warn(d, "cp %p was aborted\n", cp); | |
1389 | break; | |
1390 | case CMD_ABORT_FAILED: | |
1391 | dev_warn(d, "cp %p reports abort failed\n", cp); | |
1392 | break; | |
1393 | case CMD_UNSOLICITED_ABORT: | |
1394 | dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); | |
1395 | break; | |
1396 | case CMD_TIMEOUT: | |
1397 | dev_warn(d, "cp %p timed out\n", cp); | |
1398 | break; | |
1d5e2ed0 SC |
1399 | case CMD_UNABORTABLE: |
1400 | dev_warn(d, "Command unabortable\n"); | |
1401 | break; | |
edd16368 SC |
1402 | default: |
1403 | dev_warn(d, "cp %p returned unknown status %x\n", cp, | |
1404 | ei->CommandStatus); | |
1405 | } | |
1406 | } | |
1407 | ||
1408 | static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |
1409 | unsigned char page, unsigned char *buf, | |
1410 | unsigned char bufsize) | |
1411 | { | |
1412 | int rc = IO_OK; | |
1413 | struct CommandList *c; | |
1414 | struct ErrorInfo *ei; | |
1415 | ||
1416 | c = cmd_special_alloc(h); | |
1417 | ||
1418 | if (c == NULL) { /* trouble... */ | |
1419 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
ecd9aad4 | 1420 | return -ENOMEM; |
edd16368 SC |
1421 | } |
1422 | ||
1423 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); | |
1424 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | |
1425 | ei = c->err_info; | |
1426 | if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
1427 | hpsa_scsi_interpret_error(c); | |
1428 | rc = -1; | |
1429 | } | |
1430 | cmd_special_free(h, c); | |
1431 | return rc; | |
1432 | } | |
1433 | ||
1434 | static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) | |
1435 | { | |
1436 | int rc = IO_OK; | |
1437 | struct CommandList *c; | |
1438 | struct ErrorInfo *ei; | |
1439 | ||
1440 | c = cmd_special_alloc(h); | |
1441 | ||
1442 | if (c == NULL) { /* trouble... */ | |
1443 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
e9ea04a6 | 1444 | return -ENOMEM; |
edd16368 SC |
1445 | } |
1446 | ||
1447 | fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); | |
1448 | hpsa_scsi_do_simple_cmd_core(h, c); | |
1449 | /* no unmap needed here because no data xfer. */ | |
1450 | ||
1451 | ei = c->err_info; | |
1452 | if (ei->CommandStatus != 0) { | |
1453 | hpsa_scsi_interpret_error(c); | |
1454 | rc = -1; | |
1455 | } | |
1456 | cmd_special_free(h, c); | |
1457 | return rc; | |
1458 | } | |
1459 | ||
1460 | static void hpsa_get_raid_level(struct ctlr_info *h, | |
1461 | unsigned char *scsi3addr, unsigned char *raid_level) | |
1462 | { | |
1463 | int rc; | |
1464 | unsigned char *buf; | |
1465 | ||
1466 | *raid_level = RAID_UNKNOWN; | |
1467 | buf = kzalloc(64, GFP_KERNEL); | |
1468 | if (!buf) | |
1469 | return; | |
1470 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); | |
1471 | if (rc == 0) | |
1472 | *raid_level = buf[8]; | |
1473 | if (*raid_level > RAID_UNKNOWN) | |
1474 | *raid_level = RAID_UNKNOWN; | |
1475 | kfree(buf); | |
1476 | return; | |
1477 | } | |
1478 | ||
1479 | /* Get the device id from inquiry page 0x83 */ | |
1480 | static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, | |
1481 | unsigned char *device_id, int buflen) | |
1482 | { | |
1483 | int rc; | |
1484 | unsigned char *buf; | |
1485 | ||
1486 | if (buflen > 16) | |
1487 | buflen = 16; | |
1488 | buf = kzalloc(64, GFP_KERNEL); | |
1489 | if (!buf) | |
1490 | return -1; | |
1491 | rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); | |
1492 | if (rc == 0) | |
1493 | memcpy(device_id, &buf[8], buflen); | |
1494 | kfree(buf); | |
1495 | return rc != 0; | |
1496 | } | |
1497 | ||
1498 | static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |
1499 | struct ReportLUNdata *buf, int bufsize, | |
1500 | int extended_response) | |
1501 | { | |
1502 | int rc = IO_OK; | |
1503 | struct CommandList *c; | |
1504 | unsigned char scsi3addr[8]; | |
1505 | struct ErrorInfo *ei; | |
1506 | ||
1507 | c = cmd_special_alloc(h); | |
1508 | if (c == NULL) { /* trouble... */ | |
1509 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
1510 | return -1; | |
1511 | } | |
e89c0ae7 SC |
1512 | /* address the controller */ |
1513 | memset(scsi3addr, 0, sizeof(scsi3addr)); | |
edd16368 SC |
1514 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
1515 | buf, bufsize, 0, scsi3addr, TYPE_CMD); | |
1516 | if (extended_response) | |
1517 | c->Request.CDB[1] = extended_response; | |
1518 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); | |
1519 | ei = c->err_info; | |
1520 | if (ei->CommandStatus != 0 && | |
1521 | ei->CommandStatus != CMD_DATA_UNDERRUN) { | |
1522 | hpsa_scsi_interpret_error(c); | |
1523 | rc = -1; | |
1524 | } | |
1525 | cmd_special_free(h, c); | |
1526 | return rc; | |
1527 | } | |
1528 | ||
1529 | static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, | |
1530 | struct ReportLUNdata *buf, | |
1531 | int bufsize, int extended_response) | |
1532 | { | |
1533 | return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); | |
1534 | } | |
1535 | ||
1536 | static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, | |
1537 | struct ReportLUNdata *buf, int bufsize) | |
1538 | { | |
1539 | return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); | |
1540 | } | |
1541 | ||
1542 | static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, | |
1543 | int bus, int target, int lun) | |
1544 | { | |
1545 | device->bus = bus; | |
1546 | device->target = target; | |
1547 | device->lun = lun; | |
1548 | } | |
1549 | ||
1550 | static int hpsa_update_device_info(struct ctlr_info *h, | |
1551 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) | |
1552 | { | |
1553 | #define OBDR_TAPE_INQ_SIZE 49 | |
ea6d3bc3 | 1554 | unsigned char *inq_buff; |
edd16368 | 1555 | |
ea6d3bc3 | 1556 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
edd16368 SC |
1557 | if (!inq_buff) |
1558 | goto bail_out; | |
1559 | ||
edd16368 SC |
1560 | /* Do an inquiry to the device to see what it is. */ |
1561 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, | |
1562 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { | |
1563 | /* Inquiry failed (msg printed already) */ | |
1564 | dev_err(&h->pdev->dev, | |
1565 | "hpsa_update_device_info: inquiry failed\n"); | |
1566 | goto bail_out; | |
1567 | } | |
1568 | ||
edd16368 SC |
1569 | this_device->devtype = (inq_buff[0] & 0x1f); |
1570 | memcpy(this_device->scsi3addr, scsi3addr, 8); | |
1571 | memcpy(this_device->vendor, &inq_buff[8], | |
1572 | sizeof(this_device->vendor)); | |
1573 | memcpy(this_device->model, &inq_buff[16], | |
1574 | sizeof(this_device->model)); | |
edd16368 SC |
1575 | memset(this_device->device_id, 0, |
1576 | sizeof(this_device->device_id)); | |
1577 | hpsa_get_device_id(h, scsi3addr, this_device->device_id, | |
1578 | sizeof(this_device->device_id)); | |
1579 | ||
1580 | if (this_device->devtype == TYPE_DISK && | |
1581 | is_logical_dev_addr_mode(scsi3addr)) | |
1582 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); | |
1583 | else | |
1584 | this_device->raid_level = RAID_UNKNOWN; | |
1585 | ||
1586 | kfree(inq_buff); | |
1587 | return 0; | |
1588 | ||
1589 | bail_out: | |
1590 | kfree(inq_buff); | |
1591 | return 1; | |
1592 | } | |
1593 | ||
1594 | static unsigned char *msa2xxx_model[] = { | |
1595 | "MSA2012", | |
1596 | "MSA2024", | |
1597 | "MSA2312", | |
1598 | "MSA2324", | |
fda38518 | 1599 | "P2000 G3 SAS", |
edd16368 SC |
1600 | NULL, |
1601 | }; | |
1602 | ||
1603 | static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) | |
1604 | { | |
1605 | int i; | |
1606 | ||
1607 | for (i = 0; msa2xxx_model[i]; i++) | |
1608 | if (strncmp(device->model, msa2xxx_model[i], | |
1609 | strlen(msa2xxx_model[i])) == 0) | |
1610 | return 1; | |
1611 | return 0; | |
1612 | } | |
1613 | ||
1614 | /* Helper function to assign bus, target, lun mapping of devices. | |
1615 | * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical | |
1616 | * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. | |
1617 | * Logical drive target and lun are assigned at this time, but | |
1618 | * physical device lun and target assignment are deferred (assigned | |
1619 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) | |
1620 | */ | |
1621 | static void figure_bus_target_lun(struct ctlr_info *h, | |
01a02ffc | 1622 | u8 *lunaddrbytes, int *bus, int *target, int *lun, |
edd16368 SC |
1623 | struct hpsa_scsi_dev_t *device) |
1624 | { | |
01a02ffc | 1625 | u32 lunid; |
edd16368 SC |
1626 | |
1627 | if (is_logical_dev_addr_mode(lunaddrbytes)) { | |
1628 | /* logical device */ | |
339b2b14 SC |
1629 | if (unlikely(is_scsi_rev_5(h))) { |
1630 | /* p1210m, logical drives lun assignments | |
1631 | * match SCSI REPORT LUNS data. | |
1632 | */ | |
1633 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); | |
edd16368 | 1634 | *bus = 0; |
339b2b14 SC |
1635 | *target = 0; |
1636 | *lun = (lunid & 0x3fff) + 1; | |
1637 | } else { | |
1638 | /* not p1210m... */ | |
1639 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); | |
1640 | if (is_msa2xxx(h, device)) { | |
1641 | /* msa2xxx way, put logicals on bus 1 | |
1642 | * and match target/lun numbers box | |
1643 | * reports. | |
1644 | */ | |
1645 | *bus = 1; | |
1646 | *target = (lunid >> 16) & 0x3fff; | |
1647 | *lun = lunid & 0x00ff; | |
1648 | } else { | |
1649 | /* Traditional smart array way. */ | |
1650 | *bus = 0; | |
1651 | *lun = 0; | |
1652 | *target = lunid & 0x3fff; | |
1653 | } | |
edd16368 SC |
1654 | } |
1655 | } else { | |
1656 | /* physical device */ | |
1657 | if (is_hba_lunid(lunaddrbytes)) | |
339b2b14 SC |
1658 | if (unlikely(is_scsi_rev_5(h))) { |
1659 | *bus = 0; /* put p1210m ctlr at 0,0,0 */ | |
1660 | *target = 0; | |
1661 | *lun = 0; | |
1662 | return; | |
1663 | } else | |
1664 | *bus = 3; /* traditional smartarray */ | |
edd16368 | 1665 | else |
339b2b14 | 1666 | *bus = 2; /* physical disk */ |
edd16368 SC |
1667 | *target = -1; |
1668 | *lun = -1; /* we will fill these in later. */ | |
1669 | } | |
1670 | } | |
1671 | ||
1672 | /* | |
1673 | * If there is no lun 0 on a target, linux won't find any devices. | |
1674 | * For the MSA2xxx boxes, we have to manually detect the enclosure | |
1675 | * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report | |
1676 | * it for some reason. *tmpdevice is the target we're adding, | |
1677 | * this_device is a pointer into the current element of currentsd[] | |
1678 | * that we're building up in update_scsi_devices(), below. | |
1679 | * lunzerobits is a bitmap that tracks which targets already have a | |
1680 | * lun 0 assigned. | |
1681 | * Returns 1 if an enclosure was added, 0 if not. | |
1682 | */ | |
1683 | static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |
1684 | struct hpsa_scsi_dev_t *tmpdevice, | |
01a02ffc | 1685 | struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, |
edd16368 SC |
1686 | int bus, int target, int lun, unsigned long lunzerobits[], |
1687 | int *nmsa2xxx_enclosures) | |
1688 | { | |
1689 | unsigned char scsi3addr[8]; | |
1690 | ||
1691 | if (test_bit(target, lunzerobits)) | |
1692 | return 0; /* There is already a lun 0 on this target. */ | |
1693 | ||
1694 | if (!is_logical_dev_addr_mode(lunaddrbytes)) | |
1695 | return 0; /* It's the logical targets that may lack lun 0. */ | |
1696 | ||
1697 | if (!is_msa2xxx(h, tmpdevice)) | |
1698 | return 0; /* It's only the MSA2xxx that have this problem. */ | |
1699 | ||
1700 | if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ | |
1701 | return 0; | |
1702 | ||
c4f8a299 SC |
1703 | memset(scsi3addr, 0, 8); |
1704 | scsi3addr[3] = target; | |
edd16368 SC |
1705 | if (is_hba_lunid(scsi3addr)) |
1706 | return 0; /* Don't add the RAID controller here. */ | |
1707 | ||
339b2b14 SC |
1708 | if (is_scsi_rev_5(h)) |
1709 | return 0; /* p1210m doesn't need to do this. */ | |
1710 | ||
edd16368 SC |
1711 | #define MAX_MSA2XXX_ENCLOSURES 32 |
1712 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { | |
1713 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " | |
1714 | "enclosures exceeded. Check your hardware " | |
1715 | "configuration."); | |
1716 | return 0; | |
1717 | } | |
1718 | ||
edd16368 SC |
1719 | if (hpsa_update_device_info(h, scsi3addr, this_device)) |
1720 | return 0; | |
1721 | (*nmsa2xxx_enclosures)++; | |
1722 | hpsa_set_bus_target_lun(this_device, bus, target, 0); | |
1723 | set_bit(target, lunzerobits); | |
1724 | return 1; | |
1725 | } | |
1726 | ||
1727 | /* | |
1728 | * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, | |
1729 | * logdev. The number of luns in physdev and logdev are returned in | |
1730 | * *nphysicals and *nlogicals, respectively. | |
1731 | * Returns 0 on success, -1 otherwise. | |
1732 | */ | |
1733 | static int hpsa_gather_lun_info(struct ctlr_info *h, | |
1734 | int reportlunsize, | |
01a02ffc SC |
1735 | struct ReportLUNdata *physdev, u32 *nphysicals, |
1736 | struct ReportLUNdata *logdev, u32 *nlogicals) | |
edd16368 SC |
1737 | { |
1738 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { | |
1739 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); | |
1740 | return -1; | |
1741 | } | |
6df1e954 | 1742 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; |
edd16368 SC |
1743 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
1744 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." | |
1745 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | |
1746 | *nphysicals - HPSA_MAX_PHYS_LUN); | |
1747 | *nphysicals = HPSA_MAX_PHYS_LUN; | |
1748 | } | |
1749 | if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { | |
1750 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); | |
1751 | return -1; | |
1752 | } | |
6df1e954 | 1753 | *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; |
edd16368 SC |
1754 | /* Reject Logicals in excess of our max capability. */ |
1755 | if (*nlogicals > HPSA_MAX_LUN) { | |
1756 | dev_warn(&h->pdev->dev, | |
1757 | "maximum logical LUNs (%d) exceeded. " | |
1758 | "%d LUNs ignored.\n", HPSA_MAX_LUN, | |
1759 | *nlogicals - HPSA_MAX_LUN); | |
1760 | *nlogicals = HPSA_MAX_LUN; | |
1761 | } | |
1762 | if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { | |
1763 | dev_warn(&h->pdev->dev, | |
1764 | "maximum logical + physical LUNs (%d) exceeded. " | |
1765 | "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | |
1766 | *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); | |
1767 | *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; | |
1768 | } | |
1769 | return 0; | |
1770 | } | |
1771 | ||
339b2b14 SC |
1772 | u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, |
1773 | int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, | |
1774 | struct ReportLUNdata *logdev_list) | |
1775 | { | |
1776 | /* Helper function, figure out where the LUN ID info is coming from | |
1777 | * given index i, lists of physical and logical devices, where in | |
1778 | * the list the raid controller is supposed to appear (first or last) | |
1779 | */ | |
1780 | ||
1781 | int logicals_start = nphysicals + (raid_ctlr_position == 0); | |
1782 | int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); | |
1783 | ||
1784 | if (i == raid_ctlr_position) | |
1785 | return RAID_CTLR_LUNID; | |
1786 | ||
1787 | if (i < logicals_start) | |
1788 | return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; | |
1789 | ||
1790 | if (i < last_device) | |
1791 | return &logdev_list->LUN[i - nphysicals - | |
1792 | (raid_ctlr_position == 0)][0]; | |
1793 | BUG(); | |
1794 | return NULL; | |
1795 | } | |
1796 | ||
edd16368 SC |
1797 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
1798 | { | |
1799 | /* the idea here is we could get notified | |
1800 | * that some devices have changed, so we do a report | |
1801 | * physical luns and report logical luns cmd, and adjust | |
1802 | * our list of devices accordingly. | |
1803 | * | |
1804 | * The scsi3addr's of devices won't change so long as the | |
1805 | * adapter is not reset. That means we can rescan and | |
1806 | * tell which devices we already know about, vs. new | |
1807 | * devices, vs. disappearing devices. | |
1808 | */ | |
1809 | struct ReportLUNdata *physdev_list = NULL; | |
1810 | struct ReportLUNdata *logdev_list = NULL; | |
1811 | unsigned char *inq_buff = NULL; | |
01a02ffc SC |
1812 | u32 nphysicals = 0; |
1813 | u32 nlogicals = 0; | |
1814 | u32 ndev_allocated = 0; | |
edd16368 SC |
1815 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
1816 | int ncurrent = 0; | |
1817 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; | |
1818 | int i, nmsa2xxx_enclosures, ndevs_to_allocate; | |
1819 | int bus, target, lun; | |
339b2b14 | 1820 | int raid_ctlr_position; |
edd16368 SC |
1821 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); |
1822 | ||
1823 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, | |
1824 | GFP_KERNEL); | |
1825 | physdev_list = kzalloc(reportlunsize, GFP_KERNEL); | |
1826 | logdev_list = kzalloc(reportlunsize, GFP_KERNEL); | |
1827 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); | |
1828 | tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); | |
1829 | ||
1830 | if (!currentsd || !physdev_list || !logdev_list || | |
1831 | !inq_buff || !tmpdevice) { | |
1832 | dev_err(&h->pdev->dev, "out of memory\n"); | |
1833 | goto out; | |
1834 | } | |
1835 | memset(lunzerobits, 0, sizeof(lunzerobits)); | |
1836 | ||
1837 | if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, | |
1838 | logdev_list, &nlogicals)) | |
1839 | goto out; | |
1840 | ||
1841 | /* We might see up to 32 MSA2xxx enclosures, actually 8 of them | |
1842 | * but each of them 4 times through different paths. The plus 1 | |
1843 | * is for the RAID controller. | |
1844 | */ | |
1845 | ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; | |
1846 | ||
1847 | /* Allocate the per device structures */ | |
1848 | for (i = 0; i < ndevs_to_allocate; i++) { | |
1849 | currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); | |
1850 | if (!currentsd[i]) { | |
1851 | dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", | |
1852 | __FILE__, __LINE__); | |
1853 | goto out; | |
1854 | } | |
1855 | ndev_allocated++; | |
1856 | } | |
1857 | ||
339b2b14 SC |
1858 | if (unlikely(is_scsi_rev_5(h))) |
1859 | raid_ctlr_position = 0; | |
1860 | else | |
1861 | raid_ctlr_position = nphysicals + nlogicals; | |
1862 | ||
edd16368 SC |
1863 | /* adjust our table of devices */ |
1864 | nmsa2xxx_enclosures = 0; | |
1865 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { | |
01a02ffc | 1866 | u8 *lunaddrbytes; |
edd16368 SC |
1867 | |
1868 | /* Figure out where the LUN ID info is coming from */ | |
339b2b14 SC |
1869 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
1870 | i, nphysicals, nlogicals, physdev_list, logdev_list); | |
edd16368 | 1871 | /* skip masked physical devices. */ |
339b2b14 SC |
1872 | if (lunaddrbytes[3] & 0xC0 && |
1873 | i < nphysicals + (raid_ctlr_position == 0)) | |
edd16368 SC |
1874 | continue; |
1875 | ||
1876 | /* Get device type, vendor, model, device id */ | |
1877 | if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) | |
1878 | continue; /* skip it if we can't talk to it. */ | |
1879 | figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, | |
1880 | tmpdevice); | |
1881 | this_device = currentsd[ncurrent]; | |
1882 | ||
1883 | /* | |
1884 | * For the msa2xxx boxes, we have to insert a LUN 0 which | |
1885 | * doesn't show up in CCISS_REPORT_PHYSICAL data, but there | |
1886 | * is nonetheless an enclosure device there. We have to | |
1887 | * present that otherwise linux won't find anything if | |
1888 | * there is no lun 0. | |
1889 | */ | |
1890 | if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, | |
1891 | lunaddrbytes, bus, target, lun, lunzerobits, | |
1892 | &nmsa2xxx_enclosures)) { | |
1893 | ncurrent++; | |
1894 | this_device = currentsd[ncurrent]; | |
1895 | } | |
1896 | ||
1897 | *this_device = *tmpdevice; | |
1898 | hpsa_set_bus_target_lun(this_device, bus, target, lun); | |
1899 | ||
1900 | switch (this_device->devtype) { | |
1901 | case TYPE_ROM: { | |
1902 | /* We don't *really* support actual CD-ROM devices, | |
1903 | * just "One Button Disaster Recovery" tape drive | |
1904 | * which temporarily pretends to be a CD-ROM drive. | |
1905 | * So we check that the device is really an OBDR tape | |
1906 | * device by checking for "$DR-10" in bytes 43-48 of | |
1907 | * the inquiry data. | |
1908 | */ | |
1909 | char obdr_sig[7]; | |
1910 | #define OBDR_TAPE_SIG "$DR-10" | |
1911 | strncpy(obdr_sig, &inq_buff[43], 6); | |
1912 | obdr_sig[6] = '\0'; | |
1913 | if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) | |
1914 | /* Not OBDR device, ignore it. */ | |
1915 | break; | |
1916 | } | |
1917 | ncurrent++; | |
1918 | break; | |
1919 | case TYPE_DISK: | |
1920 | if (i < nphysicals) | |
1921 | break; | |
1922 | ncurrent++; | |
1923 | break; | |
1924 | case TYPE_TAPE: | |
1925 | case TYPE_MEDIUM_CHANGER: | |
1926 | ncurrent++; | |
1927 | break; | |
1928 | case TYPE_RAID: | |
1929 | /* Only present the Smartarray HBA as a RAID controller. | |
1930 | * If it's a RAID controller other than the HBA itself | |
1931 | * (an external RAID controller, MSA500 or similar) | |
1932 | * don't present it. | |
1933 | */ | |
1934 | if (!is_hba_lunid(lunaddrbytes)) | |
1935 | break; | |
1936 | ncurrent++; | |
1937 | break; | |
1938 | default: | |
1939 | break; | |
1940 | } | |
1941 | if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) | |
1942 | break; | |
1943 | } | |
1944 | adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); | |
1945 | out: | |
1946 | kfree(tmpdevice); | |
1947 | for (i = 0; i < ndev_allocated; i++) | |
1948 | kfree(currentsd[i]); | |
1949 | kfree(currentsd); | |
1950 | kfree(inq_buff); | |
1951 | kfree(physdev_list); | |
1952 | kfree(logdev_list); | |
edd16368 SC |
1953 | } |
1954 | ||
1955 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | |
1956 | * dma mapping and fills in the scatter gather entries of the | |
1957 | * hpsa command, cp. | |
1958 | */ | |
33a2ffce | 1959 | static int hpsa_scatter_gather(struct ctlr_info *h, |
edd16368 SC |
1960 | struct CommandList *cp, |
1961 | struct scsi_cmnd *cmd) | |
1962 | { | |
1963 | unsigned int len; | |
1964 | struct scatterlist *sg; | |
01a02ffc | 1965 | u64 addr64; |
33a2ffce SC |
1966 | int use_sg, i, sg_index, chained; |
1967 | struct SGDescriptor *curr_sg; | |
edd16368 | 1968 | |
33a2ffce | 1969 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
edd16368 SC |
1970 | |
1971 | use_sg = scsi_dma_map(cmd); | |
1972 | if (use_sg < 0) | |
1973 | return use_sg; | |
1974 | ||
1975 | if (!use_sg) | |
1976 | goto sglist_finished; | |
1977 | ||
33a2ffce SC |
1978 | curr_sg = cp->SG; |
1979 | chained = 0; | |
1980 | sg_index = 0; | |
edd16368 | 1981 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
33a2ffce SC |
1982 | if (i == h->max_cmd_sg_entries - 1 && |
1983 | use_sg > h->max_cmd_sg_entries) { | |
1984 | chained = 1; | |
1985 | curr_sg = h->cmd_sg_list[cp->cmdindex]; | |
1986 | sg_index = 0; | |
1987 | } | |
01a02ffc | 1988 | addr64 = (u64) sg_dma_address(sg); |
edd16368 | 1989 | len = sg_dma_len(sg); |
33a2ffce SC |
1990 | curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); |
1991 | curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); | |
1992 | curr_sg->Len = len; | |
1993 | curr_sg->Ext = 0; /* we are not chaining */ | |
1994 | curr_sg++; | |
1995 | } | |
1996 | ||
1997 | if (use_sg + chained > h->maxSG) | |
1998 | h->maxSG = use_sg + chained; | |
1999 | ||
2000 | if (chained) { | |
2001 | cp->Header.SGList = h->max_cmd_sg_entries; | |
2002 | cp->Header.SGTotal = (u16) (use_sg + 1); | |
2003 | hpsa_map_sg_chain_block(h, cp); | |
2004 | return 0; | |
edd16368 SC |
2005 | } |
2006 | ||
2007 | sglist_finished: | |
2008 | ||
01a02ffc SC |
2009 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
2010 | cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ | |
edd16368 SC |
2011 | return 0; |
2012 | } | |
2013 | ||
2014 | ||
f281233d | 2015 | static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, |
edd16368 SC |
2016 | void (*done)(struct scsi_cmnd *)) |
2017 | { | |
2018 | struct ctlr_info *h; | |
2019 | struct hpsa_scsi_dev_t *dev; | |
2020 | unsigned char scsi3addr[8]; | |
2021 | struct CommandList *c; | |
2022 | unsigned long flags; | |
2023 | ||
2024 | /* Get the ptr to our adapter structure out of cmd->host. */ | |
2025 | h = sdev_to_hba(cmd->device); | |
2026 | dev = cmd->device->hostdata; | |
2027 | if (!dev) { | |
2028 | cmd->result = DID_NO_CONNECT << 16; | |
2029 | done(cmd); | |
2030 | return 0; | |
2031 | } | |
2032 | memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); | |
2033 | ||
2034 | /* Need a lock as this is being allocated from the pool */ | |
2035 | spin_lock_irqsave(&h->lock, flags); | |
2036 | c = cmd_alloc(h); | |
2037 | spin_unlock_irqrestore(&h->lock, flags); | |
2038 | if (c == NULL) { /* trouble... */ | |
2039 | dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); | |
2040 | return SCSI_MLQUEUE_HOST_BUSY; | |
2041 | } | |
2042 | ||
2043 | /* Fill in the command list header */ | |
2044 | ||
2045 | cmd->scsi_done = done; /* save this for use by completion code */ | |
2046 | ||
2047 | /* save c in case we have to abort it */ | |
2048 | cmd->host_scribble = (unsigned char *) c; | |
2049 | ||
2050 | c->cmd_type = CMD_SCSI; | |
2051 | c->scsi_cmd = cmd; | |
2052 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | |
2053 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | |
303932fd DB |
2054 | c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); |
2055 | c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; | |
edd16368 SC |
2056 | |
2057 | /* Fill in the request block... */ | |
2058 | ||
2059 | c->Request.Timeout = 0; | |
2060 | memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); | |
2061 | BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); | |
2062 | c->Request.CDBLen = cmd->cmd_len; | |
2063 | memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); | |
2064 | c->Request.Type.Type = TYPE_CMD; | |
2065 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2066 | switch (cmd->sc_data_direction) { | |
2067 | case DMA_TO_DEVICE: | |
2068 | c->Request.Type.Direction = XFER_WRITE; | |
2069 | break; | |
2070 | case DMA_FROM_DEVICE: | |
2071 | c->Request.Type.Direction = XFER_READ; | |
2072 | break; | |
2073 | case DMA_NONE: | |
2074 | c->Request.Type.Direction = XFER_NONE; | |
2075 | break; | |
2076 | case DMA_BIDIRECTIONAL: | |
2077 | /* This can happen if a buggy application does a scsi passthru | |
2078 | * and sets both inlen and outlen to non-zero. ( see | |
2079 | * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) | |
2080 | */ | |
2081 | ||
2082 | c->Request.Type.Direction = XFER_RSVD; | |
2083 | /* This is technically wrong, and hpsa controllers should | |
2084 | * reject it with CMD_INVALID, which is the most correct | |
2085 | * response, but non-fibre backends appear to let it | |
2086 | * slide by, and give the same results as if this field | |
2087 | * were set correctly. Either way is acceptable for | |
2088 | * our purposes here. | |
2089 | */ | |
2090 | ||
2091 | break; | |
2092 | ||
2093 | default: | |
2094 | dev_err(&h->pdev->dev, "unknown data direction: %d\n", | |
2095 | cmd->sc_data_direction); | |
2096 | BUG(); | |
2097 | break; | |
2098 | } | |
2099 | ||
33a2ffce | 2100 | if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */ |
edd16368 SC |
2101 | cmd_free(h, c); |
2102 | return SCSI_MLQUEUE_HOST_BUSY; | |
2103 | } | |
2104 | enqueue_cmd_and_start_io(h, c); | |
2105 | /* the cmd'll come back via intr handler in complete_scsi_command() */ | |
2106 | return 0; | |
2107 | } | |
2108 | ||
f281233d JG |
2109 | static DEF_SCSI_QCMD(hpsa_scsi_queue_command) |
2110 | ||
a08a8471 SC |
2111 | static void hpsa_scan_start(struct Scsi_Host *sh) |
2112 | { | |
2113 | struct ctlr_info *h = shost_to_hba(sh); | |
2114 | unsigned long flags; | |
2115 | ||
2116 | /* wait until any scan already in progress is finished. */ | |
2117 | while (1) { | |
2118 | spin_lock_irqsave(&h->scan_lock, flags); | |
2119 | if (h->scan_finished) | |
2120 | break; | |
2121 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2122 | wait_event(h->scan_wait_queue, h->scan_finished); | |
2123 | /* Note: We don't need to worry about a race between this | |
2124 | * thread and driver unload because the midlayer will | |
2125 | * have incremented the reference count, so unload won't | |
2126 | * happen if we're in here. | |
2127 | */ | |
2128 | } | |
2129 | h->scan_finished = 0; /* mark scan as in progress */ | |
2130 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2131 | ||
2132 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); | |
2133 | ||
2134 | spin_lock_irqsave(&h->scan_lock, flags); | |
2135 | h->scan_finished = 1; /* mark scan as finished. */ | |
2136 | wake_up_all(&h->scan_wait_queue); | |
2137 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2138 | } | |
2139 | ||
2140 | static int hpsa_scan_finished(struct Scsi_Host *sh, | |
2141 | unsigned long elapsed_time) | |
2142 | { | |
2143 | struct ctlr_info *h = shost_to_hba(sh); | |
2144 | unsigned long flags; | |
2145 | int finished; | |
2146 | ||
2147 | spin_lock_irqsave(&h->scan_lock, flags); | |
2148 | finished = h->scan_finished; | |
2149 | spin_unlock_irqrestore(&h->scan_lock, flags); | |
2150 | return finished; | |
2151 | } | |
2152 | ||
667e23d4 SC |
2153 | static int hpsa_change_queue_depth(struct scsi_device *sdev, |
2154 | int qdepth, int reason) | |
2155 | { | |
2156 | struct ctlr_info *h = sdev_to_hba(sdev); | |
2157 | ||
2158 | if (reason != SCSI_QDEPTH_DEFAULT) | |
2159 | return -ENOTSUPP; | |
2160 | ||
2161 | if (qdepth < 1) | |
2162 | qdepth = 1; | |
2163 | else | |
2164 | if (qdepth > h->nr_cmds) | |
2165 | qdepth = h->nr_cmds; | |
2166 | scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); | |
2167 | return sdev->queue_depth; | |
2168 | } | |
2169 | ||
edd16368 SC |
2170 | static void hpsa_unregister_scsi(struct ctlr_info *h) |
2171 | { | |
2172 | /* we are being forcibly unloaded, and may not refuse. */ | |
2173 | scsi_remove_host(h->scsi_host); | |
2174 | scsi_host_put(h->scsi_host); | |
2175 | h->scsi_host = NULL; | |
2176 | } | |
2177 | ||
2178 | static int hpsa_register_scsi(struct ctlr_info *h) | |
2179 | { | |
2180 | int rc; | |
2181 | ||
edd16368 SC |
2182 | rc = hpsa_scsi_detect(h); |
2183 | if (rc != 0) | |
2184 | dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" | |
2185 | " hpsa_scsi_detect(), rc is %d\n", rc); | |
2186 | return rc; | |
2187 | } | |
2188 | ||
2189 | static int wait_for_device_to_become_ready(struct ctlr_info *h, | |
2190 | unsigned char lunaddr[]) | |
2191 | { | |
2192 | int rc = 0; | |
2193 | int count = 0; | |
2194 | int waittime = 1; /* seconds */ | |
2195 | struct CommandList *c; | |
2196 | ||
2197 | c = cmd_special_alloc(h); | |
2198 | if (!c) { | |
2199 | dev_warn(&h->pdev->dev, "out of memory in " | |
2200 | "wait_for_device_to_become_ready.\n"); | |
2201 | return IO_ERROR; | |
2202 | } | |
2203 | ||
2204 | /* Send test unit ready until device ready, or give up. */ | |
2205 | while (count < HPSA_TUR_RETRY_LIMIT) { | |
2206 | ||
2207 | /* Wait for a bit. do this first, because if we send | |
2208 | * the TUR right away, the reset will just abort it. | |
2209 | */ | |
2210 | msleep(1000 * waittime); | |
2211 | count++; | |
2212 | ||
2213 | /* Increase wait time with each try, up to a point. */ | |
2214 | if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) | |
2215 | waittime = waittime * 2; | |
2216 | ||
2217 | /* Send the Test Unit Ready */ | |
2218 | fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); | |
2219 | hpsa_scsi_do_simple_cmd_core(h, c); | |
2220 | /* no unmap needed here because no data xfer. */ | |
2221 | ||
2222 | if (c->err_info->CommandStatus == CMD_SUCCESS) | |
2223 | break; | |
2224 | ||
2225 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && | |
2226 | c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && | |
2227 | (c->err_info->SenseInfo[2] == NO_SENSE || | |
2228 | c->err_info->SenseInfo[2] == UNIT_ATTENTION)) | |
2229 | break; | |
2230 | ||
2231 | dev_warn(&h->pdev->dev, "waiting %d secs " | |
2232 | "for device to become ready.\n", waittime); | |
2233 | rc = 1; /* device not ready. */ | |
2234 | } | |
2235 | ||
2236 | if (rc) | |
2237 | dev_warn(&h->pdev->dev, "giving up on device.\n"); | |
2238 | else | |
2239 | dev_warn(&h->pdev->dev, "device is ready.\n"); | |
2240 | ||
2241 | cmd_special_free(h, c); | |
2242 | return rc; | |
2243 | } | |
2244 | ||
2245 | /* Need at least one of these error handlers to keep ../scsi/hosts.c from | |
2246 | * complaining. Doing a host- or bus-reset can't do anything good here. | |
2247 | */ | |
2248 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |
2249 | { | |
2250 | int rc; | |
2251 | struct ctlr_info *h; | |
2252 | struct hpsa_scsi_dev_t *dev; | |
2253 | ||
2254 | /* find the controller to which the command to be aborted was sent */ | |
2255 | h = sdev_to_hba(scsicmd->device); | |
2256 | if (h == NULL) /* paranoia */ | |
2257 | return FAILED; | |
edd16368 SC |
2258 | dev = scsicmd->device->hostdata; |
2259 | if (!dev) { | |
2260 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " | |
2261 | "device lookup failed.\n"); | |
2262 | return FAILED; | |
2263 | } | |
d416b0c7 SC |
2264 | dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", |
2265 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); | |
edd16368 SC |
2266 | /* send a reset to the SCSI LUN which the command was sent to */ |
2267 | rc = hpsa_send_reset(h, dev->scsi3addr); | |
2268 | if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) | |
2269 | return SUCCESS; | |
2270 | ||
2271 | dev_warn(&h->pdev->dev, "resetting device failed.\n"); | |
2272 | return FAILED; | |
2273 | } | |
2274 | ||
2275 | /* | |
2276 | * For operations that cannot sleep, a command block is allocated at init, | |
2277 | * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track | |
2278 | * which ones are free or in use. Lock must be held when calling this. | |
2279 | * cmd_free() is the complement. | |
2280 | */ | |
2281 | static struct CommandList *cmd_alloc(struct ctlr_info *h) | |
2282 | { | |
2283 | struct CommandList *c; | |
2284 | int i; | |
2285 | union u64bit temp64; | |
2286 | dma_addr_t cmd_dma_handle, err_dma_handle; | |
2287 | ||
2288 | do { | |
2289 | i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); | |
2290 | if (i == h->nr_cmds) | |
2291 | return NULL; | |
2292 | } while (test_and_set_bit | |
2293 | (i & (BITS_PER_LONG - 1), | |
2294 | h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); | |
2295 | c = h->cmd_pool + i; | |
2296 | memset(c, 0, sizeof(*c)); | |
2297 | cmd_dma_handle = h->cmd_pool_dhandle | |
2298 | + i * sizeof(*c); | |
2299 | c->err_info = h->errinfo_pool + i; | |
2300 | memset(c->err_info, 0, sizeof(*c->err_info)); | |
2301 | err_dma_handle = h->errinfo_pool_dhandle | |
2302 | + i * sizeof(*c->err_info); | |
2303 | h->nr_allocs++; | |
2304 | ||
2305 | c->cmdindex = i; | |
2306 | ||
9e0fc764 | 2307 | INIT_LIST_HEAD(&c->list); |
01a02ffc SC |
2308 | c->busaddr = (u32) cmd_dma_handle; |
2309 | temp64.val = (u64) err_dma_handle; | |
edd16368 SC |
2310 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2311 | c->ErrDesc.Addr.upper = temp64.val32.upper; | |
2312 | c->ErrDesc.Len = sizeof(*c->err_info); | |
2313 | ||
2314 | c->h = h; | |
2315 | return c; | |
2316 | } | |
2317 | ||
2318 | /* For operations that can wait for kmalloc to possibly sleep, | |
2319 | * this routine can be called. Lock need not be held to call | |
2320 | * cmd_special_alloc. cmd_special_free() is the complement. | |
2321 | */ | |
2322 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | |
2323 | { | |
2324 | struct CommandList *c; | |
2325 | union u64bit temp64; | |
2326 | dma_addr_t cmd_dma_handle, err_dma_handle; | |
2327 | ||
2328 | c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); | |
2329 | if (c == NULL) | |
2330 | return NULL; | |
2331 | memset(c, 0, sizeof(*c)); | |
2332 | ||
2333 | c->cmdindex = -1; | |
2334 | ||
2335 | c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), | |
2336 | &err_dma_handle); | |
2337 | ||
2338 | if (c->err_info == NULL) { | |
2339 | pci_free_consistent(h->pdev, | |
2340 | sizeof(*c), c, cmd_dma_handle); | |
2341 | return NULL; | |
2342 | } | |
2343 | memset(c->err_info, 0, sizeof(*c->err_info)); | |
2344 | ||
9e0fc764 | 2345 | INIT_LIST_HEAD(&c->list); |
01a02ffc SC |
2346 | c->busaddr = (u32) cmd_dma_handle; |
2347 | temp64.val = (u64) err_dma_handle; | |
edd16368 SC |
2348 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2349 | c->ErrDesc.Addr.upper = temp64.val32.upper; | |
2350 | c->ErrDesc.Len = sizeof(*c->err_info); | |
2351 | ||
2352 | c->h = h; | |
2353 | return c; | |
2354 | } | |
2355 | ||
2356 | static void cmd_free(struct ctlr_info *h, struct CommandList *c) | |
2357 | { | |
2358 | int i; | |
2359 | ||
2360 | i = c - h->cmd_pool; | |
2361 | clear_bit(i & (BITS_PER_LONG - 1), | |
2362 | h->cmd_pool_bits + (i / BITS_PER_LONG)); | |
2363 | h->nr_frees++; | |
2364 | } | |
2365 | ||
2366 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) | |
2367 | { | |
2368 | union u64bit temp64; | |
2369 | ||
2370 | temp64.val32.lower = c->ErrDesc.Addr.lower; | |
2371 | temp64.val32.upper = c->ErrDesc.Addr.upper; | |
2372 | pci_free_consistent(h->pdev, sizeof(*c->err_info), | |
2373 | c->err_info, (dma_addr_t) temp64.val); | |
2374 | pci_free_consistent(h->pdev, sizeof(*c), | |
d896f3f3 | 2375 | c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK)); |
edd16368 SC |
2376 | } |
2377 | ||
2378 | #ifdef CONFIG_COMPAT | |
2379 | ||
edd16368 SC |
2380 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) |
2381 | { | |
2382 | IOCTL32_Command_struct __user *arg32 = | |
2383 | (IOCTL32_Command_struct __user *) arg; | |
2384 | IOCTL_Command_struct arg64; | |
2385 | IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); | |
2386 | int err; | |
2387 | u32 cp; | |
2388 | ||
938abd84 | 2389 | memset(&arg64, 0, sizeof(arg64)); |
edd16368 SC |
2390 | err = 0; |
2391 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, | |
2392 | sizeof(arg64.LUN_info)); | |
2393 | err |= copy_from_user(&arg64.Request, &arg32->Request, | |
2394 | sizeof(arg64.Request)); | |
2395 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, | |
2396 | sizeof(arg64.error_info)); | |
2397 | err |= get_user(arg64.buf_size, &arg32->buf_size); | |
2398 | err |= get_user(cp, &arg32->buf); | |
2399 | arg64.buf = compat_ptr(cp); | |
2400 | err |= copy_to_user(p, &arg64, sizeof(arg64)); | |
2401 | ||
2402 | if (err) | |
2403 | return -EFAULT; | |
2404 | ||
e39eeaed | 2405 | err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); |
edd16368 SC |
2406 | if (err) |
2407 | return err; | |
2408 | err |= copy_in_user(&arg32->error_info, &p->error_info, | |
2409 | sizeof(arg32->error_info)); | |
2410 | if (err) | |
2411 | return -EFAULT; | |
2412 | return err; | |
2413 | } | |
2414 | ||
2415 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | |
2416 | int cmd, void *arg) | |
2417 | { | |
2418 | BIG_IOCTL32_Command_struct __user *arg32 = | |
2419 | (BIG_IOCTL32_Command_struct __user *) arg; | |
2420 | BIG_IOCTL_Command_struct arg64; | |
2421 | BIG_IOCTL_Command_struct __user *p = | |
2422 | compat_alloc_user_space(sizeof(arg64)); | |
2423 | int err; | |
2424 | u32 cp; | |
2425 | ||
938abd84 | 2426 | memset(&arg64, 0, sizeof(arg64)); |
edd16368 SC |
2427 | err = 0; |
2428 | err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, | |
2429 | sizeof(arg64.LUN_info)); | |
2430 | err |= copy_from_user(&arg64.Request, &arg32->Request, | |
2431 | sizeof(arg64.Request)); | |
2432 | err |= copy_from_user(&arg64.error_info, &arg32->error_info, | |
2433 | sizeof(arg64.error_info)); | |
2434 | err |= get_user(arg64.buf_size, &arg32->buf_size); | |
2435 | err |= get_user(arg64.malloc_size, &arg32->malloc_size); | |
2436 | err |= get_user(cp, &arg32->buf); | |
2437 | arg64.buf = compat_ptr(cp); | |
2438 | err |= copy_to_user(p, &arg64, sizeof(arg64)); | |
2439 | ||
2440 | if (err) | |
2441 | return -EFAULT; | |
2442 | ||
e39eeaed | 2443 | err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); |
edd16368 SC |
2444 | if (err) |
2445 | return err; | |
2446 | err |= copy_in_user(&arg32->error_info, &p->error_info, | |
2447 | sizeof(arg32->error_info)); | |
2448 | if (err) | |
2449 | return -EFAULT; | |
2450 | return err; | |
2451 | } | |
71fe75a7 SC |
2452 | |
2453 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) | |
2454 | { | |
2455 | switch (cmd) { | |
2456 | case CCISS_GETPCIINFO: | |
2457 | case CCISS_GETINTINFO: | |
2458 | case CCISS_SETINTINFO: | |
2459 | case CCISS_GETNODENAME: | |
2460 | case CCISS_SETNODENAME: | |
2461 | case CCISS_GETHEARTBEAT: | |
2462 | case CCISS_GETBUSTYPES: | |
2463 | case CCISS_GETFIRMVER: | |
2464 | case CCISS_GETDRIVVER: | |
2465 | case CCISS_REVALIDVOLS: | |
2466 | case CCISS_DEREGDISK: | |
2467 | case CCISS_REGNEWDISK: | |
2468 | case CCISS_REGNEWD: | |
2469 | case CCISS_RESCANDISK: | |
2470 | case CCISS_GETLUNINFO: | |
2471 | return hpsa_ioctl(dev, cmd, arg); | |
2472 | ||
2473 | case CCISS_PASSTHRU32: | |
2474 | return hpsa_ioctl32_passthru(dev, cmd, arg); | |
2475 | case CCISS_BIG_PASSTHRU32: | |
2476 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); | |
2477 | ||
2478 | default: | |
2479 | return -ENOIOCTLCMD; | |
2480 | } | |
2481 | } | |
edd16368 SC |
2482 | #endif |
2483 | ||
2484 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) | |
2485 | { | |
2486 | struct hpsa_pci_info pciinfo; | |
2487 | ||
2488 | if (!argp) | |
2489 | return -EINVAL; | |
2490 | pciinfo.domain = pci_domain_nr(h->pdev->bus); | |
2491 | pciinfo.bus = h->pdev->bus->number; | |
2492 | pciinfo.dev_fn = h->pdev->devfn; | |
2493 | pciinfo.board_id = h->board_id; | |
2494 | if (copy_to_user(argp, &pciinfo, sizeof(pciinfo))) | |
2495 | return -EFAULT; | |
2496 | return 0; | |
2497 | } | |
2498 | ||
2499 | static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp) | |
2500 | { | |
2501 | DriverVer_type DriverVer; | |
2502 | unsigned char vmaj, vmin, vsubmin; | |
2503 | int rc; | |
2504 | ||
2505 | rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu", | |
2506 | &vmaj, &vmin, &vsubmin); | |
2507 | if (rc != 3) { | |
2508 | dev_info(&h->pdev->dev, "driver version string '%s' " | |
2509 | "unrecognized.", HPSA_DRIVER_VERSION); | |
2510 | vmaj = 0; | |
2511 | vmin = 0; | |
2512 | vsubmin = 0; | |
2513 | } | |
2514 | DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin; | |
2515 | if (!argp) | |
2516 | return -EINVAL; | |
2517 | if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) | |
2518 | return -EFAULT; | |
2519 | return 0; | |
2520 | } | |
2521 | ||
2522 | static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |
2523 | { | |
2524 | IOCTL_Command_struct iocommand; | |
2525 | struct CommandList *c; | |
2526 | char *buff = NULL; | |
2527 | union u64bit temp64; | |
2528 | ||
2529 | if (!argp) | |
2530 | return -EINVAL; | |
2531 | if (!capable(CAP_SYS_RAWIO)) | |
2532 | return -EPERM; | |
2533 | if (copy_from_user(&iocommand, argp, sizeof(iocommand))) | |
2534 | return -EFAULT; | |
2535 | if ((iocommand.buf_size < 1) && | |
2536 | (iocommand.Request.Type.Direction != XFER_NONE)) { | |
2537 | return -EINVAL; | |
2538 | } | |
2539 | if (iocommand.buf_size > 0) { | |
2540 | buff = kmalloc(iocommand.buf_size, GFP_KERNEL); | |
2541 | if (buff == NULL) | |
2542 | return -EFAULT; | |
b03a7771 SC |
2543 | if (iocommand.Request.Type.Direction == XFER_WRITE) { |
2544 | /* Copy the data into the buffer we created */ | |
2545 | if (copy_from_user(buff, iocommand.buf, | |
2546 | iocommand.buf_size)) { | |
2547 | kfree(buff); | |
2548 | return -EFAULT; | |
2549 | } | |
2550 | } else { | |
2551 | memset(buff, 0, iocommand.buf_size); | |
edd16368 | 2552 | } |
b03a7771 | 2553 | } |
edd16368 SC |
2554 | c = cmd_special_alloc(h); |
2555 | if (c == NULL) { | |
2556 | kfree(buff); | |
2557 | return -ENOMEM; | |
2558 | } | |
2559 | /* Fill in the command type */ | |
2560 | c->cmd_type = CMD_IOCTL_PEND; | |
2561 | /* Fill in Command Header */ | |
2562 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | |
2563 | if (iocommand.buf_size > 0) { /* buffer to fill */ | |
2564 | c->Header.SGList = 1; | |
2565 | c->Header.SGTotal = 1; | |
2566 | } else { /* no buffers to fill */ | |
2567 | c->Header.SGList = 0; | |
2568 | c->Header.SGTotal = 0; | |
2569 | } | |
2570 | memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN)); | |
2571 | /* use the kernel address the cmd block for tag */ | |
2572 | c->Header.Tag.lower = c->busaddr; | |
2573 | ||
2574 | /* Fill in Request block */ | |
2575 | memcpy(&c->Request, &iocommand.Request, | |
2576 | sizeof(c->Request)); | |
2577 | ||
2578 | /* Fill in the scatter gather information */ | |
2579 | if (iocommand.buf_size > 0) { | |
2580 | temp64.val = pci_map_single(h->pdev, buff, | |
2581 | iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); | |
2582 | c->SG[0].Addr.lower = temp64.val32.lower; | |
2583 | c->SG[0].Addr.upper = temp64.val32.upper; | |
2584 | c->SG[0].Len = iocommand.buf_size; | |
2585 | c->SG[0].Ext = 0; /* we are not chaining*/ | |
2586 | } | |
2587 | hpsa_scsi_do_simple_cmd_core(h, c); | |
c2dd32e0 SC |
2588 | if (iocommand.buf_size > 0) |
2589 | hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); | |
edd16368 SC |
2590 | check_ioctl_unit_attention(h, c); |
2591 | ||
2592 | /* Copy the error information out */ | |
2593 | memcpy(&iocommand.error_info, c->err_info, | |
2594 | sizeof(iocommand.error_info)); | |
2595 | if (copy_to_user(argp, &iocommand, sizeof(iocommand))) { | |
2596 | kfree(buff); | |
2597 | cmd_special_free(h, c); | |
2598 | return -EFAULT; | |
2599 | } | |
b03a7771 SC |
2600 | if (iocommand.Request.Type.Direction == XFER_READ && |
2601 | iocommand.buf_size > 0) { | |
edd16368 SC |
2602 | /* Copy the data out of the buffer we created */ |
2603 | if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { | |
2604 | kfree(buff); | |
2605 | cmd_special_free(h, c); | |
2606 | return -EFAULT; | |
2607 | } | |
2608 | } | |
2609 | kfree(buff); | |
2610 | cmd_special_free(h, c); | |
2611 | return 0; | |
2612 | } | |
2613 | ||
2614 | static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |
2615 | { | |
2616 | BIG_IOCTL_Command_struct *ioc; | |
2617 | struct CommandList *c; | |
2618 | unsigned char **buff = NULL; | |
2619 | int *buff_size = NULL; | |
2620 | union u64bit temp64; | |
2621 | BYTE sg_used = 0; | |
2622 | int status = 0; | |
2623 | int i; | |
01a02ffc SC |
2624 | u32 left; |
2625 | u32 sz; | |
edd16368 SC |
2626 | BYTE __user *data_ptr; |
2627 | ||
2628 | if (!argp) | |
2629 | return -EINVAL; | |
2630 | if (!capable(CAP_SYS_RAWIO)) | |
2631 | return -EPERM; | |
2632 | ioc = (BIG_IOCTL_Command_struct *) | |
2633 | kmalloc(sizeof(*ioc), GFP_KERNEL); | |
2634 | if (!ioc) { | |
2635 | status = -ENOMEM; | |
2636 | goto cleanup1; | |
2637 | } | |
2638 | if (copy_from_user(ioc, argp, sizeof(*ioc))) { | |
2639 | status = -EFAULT; | |
2640 | goto cleanup1; | |
2641 | } | |
2642 | if ((ioc->buf_size < 1) && | |
2643 | (ioc->Request.Type.Direction != XFER_NONE)) { | |
2644 | status = -EINVAL; | |
2645 | goto cleanup1; | |
2646 | } | |
2647 | /* Check kmalloc limits using all SGs */ | |
2648 | if (ioc->malloc_size > MAX_KMALLOC_SIZE) { | |
2649 | status = -EINVAL; | |
2650 | goto cleanup1; | |
2651 | } | |
2652 | if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { | |
2653 | status = -EINVAL; | |
2654 | goto cleanup1; | |
2655 | } | |
2656 | buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); | |
2657 | if (!buff) { | |
2658 | status = -ENOMEM; | |
2659 | goto cleanup1; | |
2660 | } | |
2661 | buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); | |
2662 | if (!buff_size) { | |
2663 | status = -ENOMEM; | |
2664 | goto cleanup1; | |
2665 | } | |
2666 | left = ioc->buf_size; | |
2667 | data_ptr = ioc->buf; | |
2668 | while (left) { | |
2669 | sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; | |
2670 | buff_size[sg_used] = sz; | |
2671 | buff[sg_used] = kmalloc(sz, GFP_KERNEL); | |
2672 | if (buff[sg_used] == NULL) { | |
2673 | status = -ENOMEM; | |
2674 | goto cleanup1; | |
2675 | } | |
2676 | if (ioc->Request.Type.Direction == XFER_WRITE) { | |
2677 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { | |
2678 | status = -ENOMEM; | |
2679 | goto cleanup1; | |
2680 | } | |
2681 | } else | |
2682 | memset(buff[sg_used], 0, sz); | |
2683 | left -= sz; | |
2684 | data_ptr += sz; | |
2685 | sg_used++; | |
2686 | } | |
2687 | c = cmd_special_alloc(h); | |
2688 | if (c == NULL) { | |
2689 | status = -ENOMEM; | |
2690 | goto cleanup1; | |
2691 | } | |
2692 | c->cmd_type = CMD_IOCTL_PEND; | |
2693 | c->Header.ReplyQueue = 0; | |
b03a7771 | 2694 | c->Header.SGList = c->Header.SGTotal = sg_used; |
edd16368 SC |
2695 | memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); |
2696 | c->Header.Tag.lower = c->busaddr; | |
2697 | memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); | |
2698 | if (ioc->buf_size > 0) { | |
2699 | int i; | |
2700 | for (i = 0; i < sg_used; i++) { | |
2701 | temp64.val = pci_map_single(h->pdev, buff[i], | |
2702 | buff_size[i], PCI_DMA_BIDIRECTIONAL); | |
2703 | c->SG[i].Addr.lower = temp64.val32.lower; | |
2704 | c->SG[i].Addr.upper = temp64.val32.upper; | |
2705 | c->SG[i].Len = buff_size[i]; | |
2706 | /* we are not chaining */ | |
2707 | c->SG[i].Ext = 0; | |
2708 | } | |
2709 | } | |
2710 | hpsa_scsi_do_simple_cmd_core(h, c); | |
b03a7771 SC |
2711 | if (sg_used) |
2712 | hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); | |
edd16368 SC |
2713 | check_ioctl_unit_attention(h, c); |
2714 | /* Copy the error information out */ | |
2715 | memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); | |
2716 | if (copy_to_user(argp, ioc, sizeof(*ioc))) { | |
2717 | cmd_special_free(h, c); | |
2718 | status = -EFAULT; | |
2719 | goto cleanup1; | |
2720 | } | |
b03a7771 | 2721 | if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { |
edd16368 SC |
2722 | /* Copy the data out of the buffer we created */ |
2723 | BYTE __user *ptr = ioc->buf; | |
2724 | for (i = 0; i < sg_used; i++) { | |
2725 | if (copy_to_user(ptr, buff[i], buff_size[i])) { | |
2726 | cmd_special_free(h, c); | |
2727 | status = -EFAULT; | |
2728 | goto cleanup1; | |
2729 | } | |
2730 | ptr += buff_size[i]; | |
2731 | } | |
2732 | } | |
2733 | cmd_special_free(h, c); | |
2734 | status = 0; | |
2735 | cleanup1: | |
2736 | if (buff) { | |
2737 | for (i = 0; i < sg_used; i++) | |
2738 | kfree(buff[i]); | |
2739 | kfree(buff); | |
2740 | } | |
2741 | kfree(buff_size); | |
2742 | kfree(ioc); | |
2743 | return status; | |
2744 | } | |
2745 | ||
2746 | static void check_ioctl_unit_attention(struct ctlr_info *h, | |
2747 | struct CommandList *c) | |
2748 | { | |
2749 | if (c->err_info->CommandStatus == CMD_TARGET_STATUS && | |
2750 | c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) | |
2751 | (void) check_for_unit_attention(h, c); | |
2752 | } | |
2753 | /* | |
2754 | * ioctl | |
2755 | */ | |
2756 | static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) | |
2757 | { | |
2758 | struct ctlr_info *h; | |
2759 | void __user *argp = (void __user *)arg; | |
2760 | ||
2761 | h = sdev_to_hba(dev); | |
2762 | ||
2763 | switch (cmd) { | |
2764 | case CCISS_DEREGDISK: | |
2765 | case CCISS_REGNEWDISK: | |
2766 | case CCISS_REGNEWD: | |
a08a8471 | 2767 | hpsa_scan_start(h->scsi_host); |
edd16368 SC |
2768 | return 0; |
2769 | case CCISS_GETPCIINFO: | |
2770 | return hpsa_getpciinfo_ioctl(h, argp); | |
2771 | case CCISS_GETDRIVVER: | |
2772 | return hpsa_getdrivver_ioctl(h, argp); | |
2773 | case CCISS_PASSTHRU: | |
2774 | return hpsa_passthru_ioctl(h, argp); | |
2775 | case CCISS_BIG_PASSTHRU: | |
2776 | return hpsa_big_passthru_ioctl(h, argp); | |
2777 | default: | |
2778 | return -ENOTTY; | |
2779 | } | |
2780 | } | |
2781 | ||
64670ac8 SC |
2782 | static int __devinit hpsa_send_host_reset(struct ctlr_info *h, |
2783 | unsigned char *scsi3addr, u8 reset_type) | |
2784 | { | |
2785 | struct CommandList *c; | |
2786 | ||
2787 | c = cmd_alloc(h); | |
2788 | if (!c) | |
2789 | return -ENOMEM; | |
2790 | fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, | |
2791 | RAID_CTLR_LUNID, TYPE_MSG); | |
2792 | c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ | |
2793 | c->waiting = NULL; | |
2794 | enqueue_cmd_and_start_io(h, c); | |
2795 | /* Don't wait for completion, the reset won't complete. Don't free | |
2796 | * the command either. This is the last command we will send before | |
2797 | * re-initializing everything, so it doesn't matter and won't leak. | |
2798 | */ | |
2799 | return 0; | |
2800 | } | |
2801 | ||
01a02ffc SC |
2802 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
2803 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, | |
edd16368 SC |
2804 | int cmd_type) |
2805 | { | |
2806 | int pci_dir = XFER_NONE; | |
2807 | ||
2808 | c->cmd_type = CMD_IOCTL_PEND; | |
2809 | c->Header.ReplyQueue = 0; | |
2810 | if (buff != NULL && size > 0) { | |
2811 | c->Header.SGList = 1; | |
2812 | c->Header.SGTotal = 1; | |
2813 | } else { | |
2814 | c->Header.SGList = 0; | |
2815 | c->Header.SGTotal = 0; | |
2816 | } | |
2817 | c->Header.Tag.lower = c->busaddr; | |
2818 | memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); | |
2819 | ||
2820 | c->Request.Type.Type = cmd_type; | |
2821 | if (cmd_type == TYPE_CMD) { | |
2822 | switch (cmd) { | |
2823 | case HPSA_INQUIRY: | |
2824 | /* are we trying to read a vital product page */ | |
2825 | if (page_code != 0) { | |
2826 | c->Request.CDB[1] = 0x01; | |
2827 | c->Request.CDB[2] = page_code; | |
2828 | } | |
2829 | c->Request.CDBLen = 6; | |
2830 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2831 | c->Request.Type.Direction = XFER_READ; | |
2832 | c->Request.Timeout = 0; | |
2833 | c->Request.CDB[0] = HPSA_INQUIRY; | |
2834 | c->Request.CDB[4] = size & 0xFF; | |
2835 | break; | |
2836 | case HPSA_REPORT_LOG: | |
2837 | case HPSA_REPORT_PHYS: | |
2838 | /* Talking to controller so It's a physical command | |
2839 | mode = 00 target = 0. Nothing to write. | |
2840 | */ | |
2841 | c->Request.CDBLen = 12; | |
2842 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2843 | c->Request.Type.Direction = XFER_READ; | |
2844 | c->Request.Timeout = 0; | |
2845 | c->Request.CDB[0] = cmd; | |
2846 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ | |
2847 | c->Request.CDB[7] = (size >> 16) & 0xFF; | |
2848 | c->Request.CDB[8] = (size >> 8) & 0xFF; | |
2849 | c->Request.CDB[9] = size & 0xFF; | |
2850 | break; | |
edd16368 SC |
2851 | case HPSA_CACHE_FLUSH: |
2852 | c->Request.CDBLen = 12; | |
2853 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2854 | c->Request.Type.Direction = XFER_WRITE; | |
2855 | c->Request.Timeout = 0; | |
2856 | c->Request.CDB[0] = BMIC_WRITE; | |
2857 | c->Request.CDB[6] = BMIC_CACHE_FLUSH; | |
2858 | break; | |
2859 | case TEST_UNIT_READY: | |
2860 | c->Request.CDBLen = 6; | |
2861 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2862 | c->Request.Type.Direction = XFER_NONE; | |
2863 | c->Request.Timeout = 0; | |
2864 | break; | |
2865 | default: | |
2866 | dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd); | |
2867 | BUG(); | |
2868 | return; | |
2869 | } | |
2870 | } else if (cmd_type == TYPE_MSG) { | |
2871 | switch (cmd) { | |
2872 | ||
2873 | case HPSA_DEVICE_RESET_MSG: | |
2874 | c->Request.CDBLen = 16; | |
2875 | c->Request.Type.Type = 1; /* It is a MSG not a CMD */ | |
2876 | c->Request.Type.Attribute = ATTR_SIMPLE; | |
2877 | c->Request.Type.Direction = XFER_NONE; | |
2878 | c->Request.Timeout = 0; /* Don't time out */ | |
64670ac8 SC |
2879 | memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); |
2880 | c->Request.CDB[0] = cmd; | |
edd16368 SC |
2881 | c->Request.CDB[1] = 0x03; /* Reset target above */ |
2882 | /* If bytes 4-7 are zero, it means reset the */ | |
2883 | /* LunID device */ | |
2884 | c->Request.CDB[4] = 0x00; | |
2885 | c->Request.CDB[5] = 0x00; | |
2886 | c->Request.CDB[6] = 0x00; | |
2887 | c->Request.CDB[7] = 0x00; | |
2888 | break; | |
2889 | ||
2890 | default: | |
2891 | dev_warn(&h->pdev->dev, "unknown message type %d\n", | |
2892 | cmd); | |
2893 | BUG(); | |
2894 | } | |
2895 | } else { | |
2896 | dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); | |
2897 | BUG(); | |
2898 | } | |
2899 | ||
2900 | switch (c->Request.Type.Direction) { | |
2901 | case XFER_READ: | |
2902 | pci_dir = PCI_DMA_FROMDEVICE; | |
2903 | break; | |
2904 | case XFER_WRITE: | |
2905 | pci_dir = PCI_DMA_TODEVICE; | |
2906 | break; | |
2907 | case XFER_NONE: | |
2908 | pci_dir = PCI_DMA_NONE; | |
2909 | break; | |
2910 | default: | |
2911 | pci_dir = PCI_DMA_BIDIRECTIONAL; | |
2912 | } | |
2913 | ||
2914 | hpsa_map_one(h->pdev, c, buff, size, pci_dir); | |
2915 | ||
2916 | return; | |
2917 | } | |
2918 | ||
2919 | /* | |
2920 | * Map (physical) PCI mem into (virtual) kernel space | |
2921 | */ | |
2922 | static void __iomem *remap_pci_mem(ulong base, ulong size) | |
2923 | { | |
2924 | ulong page_base = ((ulong) base) & PAGE_MASK; | |
2925 | ulong page_offs = ((ulong) base) - page_base; | |
2926 | void __iomem *page_remapped = ioremap(page_base, page_offs + size); | |
2927 | ||
2928 | return page_remapped ? (page_remapped + page_offs) : NULL; | |
2929 | } | |
2930 | ||
2931 | /* Takes cmds off the submission queue and sends them to the hardware, | |
2932 | * then puts them on the queue of cmds waiting for completion. | |
2933 | */ | |
2934 | static void start_io(struct ctlr_info *h) | |
2935 | { | |
2936 | struct CommandList *c; | |
2937 | ||
9e0fc764 SC |
2938 | while (!list_empty(&h->reqQ)) { |
2939 | c = list_entry(h->reqQ.next, struct CommandList, list); | |
edd16368 SC |
2940 | /* can't do anything if fifo is full */ |
2941 | if ((h->access.fifo_full(h))) { | |
2942 | dev_warn(&h->pdev->dev, "fifo full\n"); | |
2943 | break; | |
2944 | } | |
2945 | ||
2946 | /* Get the first entry from the Request Q */ | |
2947 | removeQ(c); | |
2948 | h->Qdepth--; | |
2949 | ||
2950 | /* Tell the controller execute command */ | |
2951 | h->access.submit_command(h, c); | |
2952 | ||
2953 | /* Put job onto the completed Q */ | |
2954 | addQ(&h->cmpQ, c); | |
2955 | } | |
2956 | } | |
2957 | ||
2958 | static inline unsigned long get_next_completion(struct ctlr_info *h) | |
2959 | { | |
2960 | return h->access.command_completed(h); | |
2961 | } | |
2962 | ||
900c5440 | 2963 | static inline bool interrupt_pending(struct ctlr_info *h) |
edd16368 SC |
2964 | { |
2965 | return h->access.intr_pending(h); | |
2966 | } | |
2967 | ||
2968 | static inline long interrupt_not_for_us(struct ctlr_info *h) | |
2969 | { | |
10f66018 SC |
2970 | return (h->access.intr_pending(h) == 0) || |
2971 | (h->interrupts_enabled == 0); | |
edd16368 SC |
2972 | } |
2973 | ||
01a02ffc SC |
2974 | static inline int bad_tag(struct ctlr_info *h, u32 tag_index, |
2975 | u32 raw_tag) | |
edd16368 SC |
2976 | { |
2977 | if (unlikely(tag_index >= h->nr_cmds)) { | |
2978 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); | |
2979 | return 1; | |
2980 | } | |
2981 | return 0; | |
2982 | } | |
2983 | ||
01a02ffc | 2984 | static inline void finish_cmd(struct CommandList *c, u32 raw_tag) |
edd16368 SC |
2985 | { |
2986 | removeQ(c); | |
2987 | if (likely(c->cmd_type == CMD_SCSI)) | |
1fb011fb | 2988 | complete_scsi_command(c); |
edd16368 SC |
2989 | else if (c->cmd_type == CMD_IOCTL_PEND) |
2990 | complete(c->waiting); | |
2991 | } | |
2992 | ||
a104c99f SC |
2993 | static inline u32 hpsa_tag_contains_index(u32 tag) |
2994 | { | |
a104c99f SC |
2995 | return tag & DIRECT_LOOKUP_BIT; |
2996 | } | |
2997 | ||
2998 | static inline u32 hpsa_tag_to_index(u32 tag) | |
2999 | { | |
a104c99f SC |
3000 | return tag >> DIRECT_LOOKUP_SHIFT; |
3001 | } | |
3002 | ||
a9a3a273 SC |
3003 | |
3004 | static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag) | |
a104c99f | 3005 | { |
a9a3a273 SC |
3006 | #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) |
3007 | #define HPSA_SIMPLE_ERROR_BITS 0x03 | |
960a30e7 | 3008 | if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) |
a9a3a273 SC |
3009 | return tag & ~HPSA_SIMPLE_ERROR_BITS; |
3010 | return tag & ~HPSA_PERF_ERROR_BITS; | |
a104c99f SC |
3011 | } |
3012 | ||
303932fd DB |
3013 | /* process completion of an indexed ("direct lookup") command */ |
3014 | static inline u32 process_indexed_cmd(struct ctlr_info *h, | |
3015 | u32 raw_tag) | |
3016 | { | |
3017 | u32 tag_index; | |
3018 | struct CommandList *c; | |
3019 | ||
3020 | tag_index = hpsa_tag_to_index(raw_tag); | |
3021 | if (bad_tag(h, tag_index, raw_tag)) | |
3022 | return next_command(h); | |
3023 | c = h->cmd_pool + tag_index; | |
3024 | finish_cmd(c, raw_tag); | |
3025 | return next_command(h); | |
3026 | } | |
3027 | ||
3028 | /* process completion of a non-indexed command */ | |
3029 | static inline u32 process_nonindexed_cmd(struct ctlr_info *h, | |
3030 | u32 raw_tag) | |
3031 | { | |
3032 | u32 tag; | |
3033 | struct CommandList *c = NULL; | |
303932fd | 3034 | |
a9a3a273 | 3035 | tag = hpsa_tag_discard_error_bits(h, raw_tag); |
9e0fc764 | 3036 | list_for_each_entry(c, &h->cmpQ, list) { |
303932fd DB |
3037 | if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { |
3038 | finish_cmd(c, raw_tag); | |
3039 | return next_command(h); | |
3040 | } | |
3041 | } | |
3042 | bad_tag(h, h->nr_cmds + 1, raw_tag); | |
3043 | return next_command(h); | |
3044 | } | |
3045 | ||
64670ac8 SC |
3046 | /* Some controllers, like p400, will give us one interrupt |
3047 | * after a soft reset, even if we turned interrupts off. | |
3048 | * Only need to check for this in the hpsa_xxx_discard_completions | |
3049 | * functions. | |
3050 | */ | |
3051 | static int ignore_bogus_interrupt(struct ctlr_info *h) | |
3052 | { | |
3053 | if (likely(!reset_devices)) | |
3054 | return 0; | |
3055 | ||
3056 | if (likely(h->interrupts_enabled)) | |
3057 | return 0; | |
3058 | ||
3059 | dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " | |
3060 | "(known firmware bug.) Ignoring.\n"); | |
3061 | ||
3062 | return 1; | |
3063 | } | |
3064 | ||
3065 | static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id) | |
3066 | { | |
3067 | struct ctlr_info *h = dev_id; | |
3068 | unsigned long flags; | |
3069 | u32 raw_tag; | |
3070 | ||
3071 | if (ignore_bogus_interrupt(h)) | |
3072 | return IRQ_NONE; | |
3073 | ||
3074 | if (interrupt_not_for_us(h)) | |
3075 | return IRQ_NONE; | |
3076 | spin_lock_irqsave(&h->lock, flags); | |
3077 | while (interrupt_pending(h)) { | |
3078 | raw_tag = get_next_completion(h); | |
3079 | while (raw_tag != FIFO_EMPTY) | |
3080 | raw_tag = next_command(h); | |
3081 | } | |
3082 | spin_unlock_irqrestore(&h->lock, flags); | |
3083 | return IRQ_HANDLED; | |
3084 | } | |
3085 | ||
3086 | static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id) | |
3087 | { | |
3088 | struct ctlr_info *h = dev_id; | |
3089 | unsigned long flags; | |
3090 | u32 raw_tag; | |
3091 | ||
3092 | if (ignore_bogus_interrupt(h)) | |
3093 | return IRQ_NONE; | |
3094 | ||
3095 | spin_lock_irqsave(&h->lock, flags); | |
3096 | raw_tag = get_next_completion(h); | |
3097 | while (raw_tag != FIFO_EMPTY) | |
3098 | raw_tag = next_command(h); | |
3099 | spin_unlock_irqrestore(&h->lock, flags); | |
3100 | return IRQ_HANDLED; | |
3101 | } | |
3102 | ||
10f66018 | 3103 | static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) |
edd16368 SC |
3104 | { |
3105 | struct ctlr_info *h = dev_id; | |
edd16368 | 3106 | unsigned long flags; |
303932fd | 3107 | u32 raw_tag; |
edd16368 SC |
3108 | |
3109 | if (interrupt_not_for_us(h)) | |
3110 | return IRQ_NONE; | |
10f66018 SC |
3111 | spin_lock_irqsave(&h->lock, flags); |
3112 | while (interrupt_pending(h)) { | |
3113 | raw_tag = get_next_completion(h); | |
3114 | while (raw_tag != FIFO_EMPTY) { | |
3115 | if (hpsa_tag_contains_index(raw_tag)) | |
3116 | raw_tag = process_indexed_cmd(h, raw_tag); | |
3117 | else | |
3118 | raw_tag = process_nonindexed_cmd(h, raw_tag); | |
3119 | } | |
3120 | } | |
3121 | spin_unlock_irqrestore(&h->lock, flags); | |
3122 | return IRQ_HANDLED; | |
3123 | } | |
3124 | ||
3125 | static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id) | |
3126 | { | |
3127 | struct ctlr_info *h = dev_id; | |
3128 | unsigned long flags; | |
3129 | u32 raw_tag; | |
3130 | ||
edd16368 | 3131 | spin_lock_irqsave(&h->lock, flags); |
303932fd DB |
3132 | raw_tag = get_next_completion(h); |
3133 | while (raw_tag != FIFO_EMPTY) { | |
3134 | if (hpsa_tag_contains_index(raw_tag)) | |
3135 | raw_tag = process_indexed_cmd(h, raw_tag); | |
3136 | else | |
3137 | raw_tag = process_nonindexed_cmd(h, raw_tag); | |
edd16368 SC |
3138 | } |
3139 | spin_unlock_irqrestore(&h->lock, flags); | |
3140 | return IRQ_HANDLED; | |
3141 | } | |
3142 | ||
a9a3a273 SC |
3143 | /* Send a message CDB to the firmware. Careful, this only works |
3144 | * in simple mode, not performant mode due to the tag lookup. | |
3145 | * We only ever use this immediately after a controller reset. | |
3146 | */ | |
edd16368 SC |
3147 | static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, |
3148 | unsigned char type) | |
3149 | { | |
3150 | struct Command { | |
3151 | struct CommandListHeader CommandHeader; | |
3152 | struct RequestBlock Request; | |
3153 | struct ErrDescriptor ErrorDescriptor; | |
3154 | }; | |
3155 | struct Command *cmd; | |
3156 | static const size_t cmd_sz = sizeof(*cmd) + | |
3157 | sizeof(cmd->ErrorDescriptor); | |
3158 | dma_addr_t paddr64; | |
3159 | uint32_t paddr32, tag; | |
3160 | void __iomem *vaddr; | |
3161 | int i, err; | |
3162 | ||
3163 | vaddr = pci_ioremap_bar(pdev, 0); | |
3164 | if (vaddr == NULL) | |
3165 | return -ENOMEM; | |
3166 | ||
3167 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the | |
3168 | * CCISS commands, so they must be allocated from the lower 4GiB of | |
3169 | * memory. | |
3170 | */ | |
3171 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | |
3172 | if (err) { | |
3173 | iounmap(vaddr); | |
3174 | return -ENOMEM; | |
3175 | } | |
3176 | ||
3177 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); | |
3178 | if (cmd == NULL) { | |
3179 | iounmap(vaddr); | |
3180 | return -ENOMEM; | |
3181 | } | |
3182 | ||
3183 | /* This must fit, because of the 32-bit consistent DMA mask. Also, | |
3184 | * although there's no guarantee, we assume that the address is at | |
3185 | * least 4-byte aligned (most likely, it's page-aligned). | |
3186 | */ | |
3187 | paddr32 = paddr64; | |
3188 | ||
3189 | cmd->CommandHeader.ReplyQueue = 0; | |
3190 | cmd->CommandHeader.SGList = 0; | |
3191 | cmd->CommandHeader.SGTotal = 0; | |
3192 | cmd->CommandHeader.Tag.lower = paddr32; | |
3193 | cmd->CommandHeader.Tag.upper = 0; | |
3194 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); | |
3195 | ||
3196 | cmd->Request.CDBLen = 16; | |
3197 | cmd->Request.Type.Type = TYPE_MSG; | |
3198 | cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; | |
3199 | cmd->Request.Type.Direction = XFER_NONE; | |
3200 | cmd->Request.Timeout = 0; /* Don't time out */ | |
3201 | cmd->Request.CDB[0] = opcode; | |
3202 | cmd->Request.CDB[1] = type; | |
3203 | memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */ | |
3204 | cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd); | |
3205 | cmd->ErrorDescriptor.Addr.upper = 0; | |
3206 | cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo); | |
3207 | ||
3208 | writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); | |
3209 | ||
3210 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { | |
3211 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | |
a9a3a273 | 3212 | if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32) |
edd16368 SC |
3213 | break; |
3214 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); | |
3215 | } | |
3216 | ||
3217 | iounmap(vaddr); | |
3218 | ||
3219 | /* we leak the DMA buffer here ... no choice since the controller could | |
3220 | * still complete the command. | |
3221 | */ | |
3222 | if (i == HPSA_MSG_SEND_RETRY_LIMIT) { | |
3223 | dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", | |
3224 | opcode, type); | |
3225 | return -ETIMEDOUT; | |
3226 | } | |
3227 | ||
3228 | pci_free_consistent(pdev, cmd_sz, cmd, paddr64); | |
3229 | ||
3230 | if (tag & HPSA_ERROR_BIT) { | |
3231 | dev_err(&pdev->dev, "controller message %02x:%02x failed\n", | |
3232 | opcode, type); | |
3233 | return -EIO; | |
3234 | } | |
3235 | ||
3236 | dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", | |
3237 | opcode, type); | |
3238 | return 0; | |
3239 | } | |
3240 | ||
edd16368 SC |
3241 | #define hpsa_noop(p) hpsa_message(p, 3, 0) |
3242 | ||
1df8552a | 3243 | static int hpsa_controller_hard_reset(struct pci_dev *pdev, |
cf0b08d0 | 3244 | void * __iomem vaddr, u32 use_doorbell) |
1df8552a SC |
3245 | { |
3246 | u16 pmcsr; | |
3247 | int pos; | |
3248 | ||
3249 | if (use_doorbell) { | |
3250 | /* For everything after the P600, the PCI power state method | |
3251 | * of resetting the controller doesn't work, so we have this | |
3252 | * other way using the doorbell register. | |
3253 | */ | |
3254 | dev_info(&pdev->dev, "using doorbell to reset controller\n"); | |
cf0b08d0 | 3255 | writel(use_doorbell, vaddr + SA5_DOORBELL); |
1df8552a SC |
3256 | } else { /* Try to do it the PCI power state way */ |
3257 | ||
3258 | /* Quoting from the Open CISS Specification: "The Power | |
3259 | * Management Control/Status Register (CSR) controls the power | |
3260 | * state of the device. The normal operating state is D0, | |
3261 | * CSR=00h. The software off state is D3, CSR=03h. To reset | |
3262 | * the controller, place the interface device in D3 then to D0, | |
3263 | * this causes a secondary PCI reset which will reset the | |
3264 | * controller." */ | |
3265 | ||
3266 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | |
3267 | if (pos == 0) { | |
3268 | dev_err(&pdev->dev, | |
3269 | "hpsa_reset_controller: " | |
3270 | "PCI PM not supported\n"); | |
3271 | return -ENODEV; | |
3272 | } | |
3273 | dev_info(&pdev->dev, "using PCI PM to reset controller\n"); | |
3274 | /* enter the D3hot power management state */ | |
3275 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); | |
3276 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
3277 | pmcsr |= PCI_D3hot; | |
3278 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | |
3279 | ||
3280 | msleep(500); | |
3281 | ||
3282 | /* enter the D0 power management state */ | |
3283 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | |
3284 | pmcsr |= PCI_D0; | |
3285 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | |
1df8552a SC |
3286 | } |
3287 | return 0; | |
3288 | } | |
3289 | ||
580ada3c SC |
3290 | static __devinit void init_driver_version(char *driver_version, int len) |
3291 | { | |
3292 | memset(driver_version, 0, len); | |
3293 | strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1); | |
3294 | } | |
3295 | ||
3296 | static __devinit int write_driver_ver_to_cfgtable( | |
3297 | struct CfgTable __iomem *cfgtable) | |
3298 | { | |
3299 | char *driver_version; | |
3300 | int i, size = sizeof(cfgtable->driver_version); | |
3301 | ||
3302 | driver_version = kmalloc(size, GFP_KERNEL); | |
3303 | if (!driver_version) | |
3304 | return -ENOMEM; | |
3305 | ||
3306 | init_driver_version(driver_version, size); | |
3307 | for (i = 0; i < size; i++) | |
3308 | writeb(driver_version[i], &cfgtable->driver_version[i]); | |
3309 | kfree(driver_version); | |
3310 | return 0; | |
3311 | } | |
3312 | ||
3313 | static __devinit void read_driver_ver_from_cfgtable( | |
3314 | struct CfgTable __iomem *cfgtable, unsigned char *driver_ver) | |
3315 | { | |
3316 | int i; | |
3317 | ||
3318 | for (i = 0; i < sizeof(cfgtable->driver_version); i++) | |
3319 | driver_ver[i] = readb(&cfgtable->driver_version[i]); | |
3320 | } | |
3321 | ||
3322 | static __devinit int controller_reset_failed( | |
3323 | struct CfgTable __iomem *cfgtable) | |
3324 | { | |
3325 | ||
3326 | char *driver_ver, *old_driver_ver; | |
3327 | int rc, size = sizeof(cfgtable->driver_version); | |
3328 | ||
3329 | old_driver_ver = kmalloc(2 * size, GFP_KERNEL); | |
3330 | if (!old_driver_ver) | |
3331 | return -ENOMEM; | |
3332 | driver_ver = old_driver_ver + size; | |
3333 | ||
3334 | /* After a reset, the 32 bytes of "driver version" in the cfgtable | |
3335 | * should have been changed, otherwise we know the reset failed. | |
3336 | */ | |
3337 | init_driver_version(old_driver_ver, size); | |
3338 | read_driver_ver_from_cfgtable(cfgtable, driver_ver); | |
3339 | rc = !memcmp(driver_ver, old_driver_ver, size); | |
3340 | kfree(old_driver_ver); | |
3341 | return rc; | |
3342 | } | |
edd16368 | 3343 | /* This does a hard reset of the controller using PCI power management |
1df8552a | 3344 | * states or the using the doorbell register. |
edd16368 | 3345 | */ |
1df8552a | 3346 | static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) |
edd16368 | 3347 | { |
1df8552a SC |
3348 | u64 cfg_offset; |
3349 | u32 cfg_base_addr; | |
3350 | u64 cfg_base_addr_index; | |
3351 | void __iomem *vaddr; | |
3352 | unsigned long paddr; | |
580ada3c | 3353 | u32 misc_fw_support; |
270d05de | 3354 | int rc; |
1df8552a | 3355 | struct CfgTable __iomem *cfgtable; |
cf0b08d0 | 3356 | u32 use_doorbell; |
18867659 | 3357 | u32 board_id; |
270d05de | 3358 | u16 command_register; |
edd16368 | 3359 | |
1df8552a SC |
3360 | /* For controllers as old as the P600, this is very nearly |
3361 | * the same thing as | |
edd16368 SC |
3362 | * |
3363 | * pci_save_state(pci_dev); | |
3364 | * pci_set_power_state(pci_dev, PCI_D3hot); | |
3365 | * pci_set_power_state(pci_dev, PCI_D0); | |
3366 | * pci_restore_state(pci_dev); | |
3367 | * | |
1df8552a SC |
3368 | * For controllers newer than the P600, the pci power state |
3369 | * method of resetting doesn't work so we have another way | |
3370 | * using the doorbell register. | |
edd16368 | 3371 | */ |
18867659 | 3372 | |
25c1e56a | 3373 | rc = hpsa_lookup_board_id(pdev, &board_id); |
46380786 | 3374 | if (rc < 0 || !ctlr_is_resettable(board_id)) { |
25c1e56a SC |
3375 | dev_warn(&pdev->dev, "Not resetting device.\n"); |
3376 | return -ENODEV; | |
3377 | } | |
46380786 SC |
3378 | |
3379 | /* if controller is soft- but not hard resettable... */ | |
3380 | if (!ctlr_is_hard_resettable(board_id)) | |
3381 | return -ENOTSUPP; /* try soft reset later. */ | |
18867659 | 3382 | |
270d05de SC |
3383 | /* Save the PCI command register */ |
3384 | pci_read_config_word(pdev, 4, &command_register); | |
3385 | /* Turn the board off. This is so that later pci_restore_state() | |
3386 | * won't turn the board on before the rest of config space is ready. | |
3387 | */ | |
3388 | pci_disable_device(pdev); | |
3389 | pci_save_state(pdev); | |
edd16368 | 3390 | |
1df8552a SC |
3391 | /* find the first memory BAR, so we can find the cfg table */ |
3392 | rc = hpsa_pci_find_memory_BAR(pdev, &paddr); | |
3393 | if (rc) | |
3394 | return rc; | |
3395 | vaddr = remap_pci_mem(paddr, 0x250); | |
3396 | if (!vaddr) | |
3397 | return -ENOMEM; | |
edd16368 | 3398 | |
1df8552a SC |
3399 | /* find cfgtable in order to check if reset via doorbell is supported */ |
3400 | rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, | |
3401 | &cfg_base_addr_index, &cfg_offset); | |
3402 | if (rc) | |
3403 | goto unmap_vaddr; | |
3404 | cfgtable = remap_pci_mem(pci_resource_start(pdev, | |
3405 | cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); | |
3406 | if (!cfgtable) { | |
3407 | rc = -ENOMEM; | |
3408 | goto unmap_vaddr; | |
3409 | } | |
580ada3c SC |
3410 | rc = write_driver_ver_to_cfgtable(cfgtable); |
3411 | if (rc) | |
3412 | goto unmap_vaddr; | |
edd16368 | 3413 | |
cf0b08d0 SC |
3414 | /* If reset via doorbell register is supported, use that. |
3415 | * There are two such methods. Favor the newest method. | |
3416 | */ | |
1df8552a | 3417 | misc_fw_support = readl(&cfgtable->misc_fw_support); |
cf0b08d0 SC |
3418 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; |
3419 | if (use_doorbell) { | |
3420 | use_doorbell = DOORBELL_CTLR_RESET2; | |
3421 | } else { | |
3422 | use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; | |
3423 | if (use_doorbell) { | |
3424 | dev_warn(&pdev->dev, "Controller claims that " | |
3425 | "'Bit 2 doorbell reset' is " | |
3426 | "supported, but not 'bit 5 doorbell reset'. " | |
3427 | "Firmware update is recommended.\n"); | |
64670ac8 | 3428 | rc = -ENOTSUPP; /* try soft reset */ |
cf0b08d0 SC |
3429 | goto unmap_cfgtable; |
3430 | } | |
3431 | } | |
edd16368 | 3432 | |
1df8552a SC |
3433 | rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); |
3434 | if (rc) | |
3435 | goto unmap_cfgtable; | |
edd16368 | 3436 | |
270d05de SC |
3437 | pci_restore_state(pdev); |
3438 | rc = pci_enable_device(pdev); | |
3439 | if (rc) { | |
3440 | dev_warn(&pdev->dev, "failed to enable device.\n"); | |
3441 | goto unmap_cfgtable; | |
edd16368 | 3442 | } |
270d05de | 3443 | pci_write_config_word(pdev, 4, command_register); |
edd16368 | 3444 | |
1df8552a SC |
3445 | /* Some devices (notably the HP Smart Array 5i Controller) |
3446 | need a little pause here */ | |
3447 | msleep(HPSA_POST_RESET_PAUSE_MSECS); | |
3448 | ||
fe5389c8 | 3449 | /* Wait for board to become not ready, then ready. */ |
2b870cb3 | 3450 | dev_info(&pdev->dev, "Waiting for board to reset.\n"); |
fe5389c8 | 3451 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); |
64670ac8 | 3452 | if (rc) { |
fe5389c8 | 3453 | dev_warn(&pdev->dev, |
64670ac8 SC |
3454 | "failed waiting for board to reset." |
3455 | " Will try soft reset.\n"); | |
3456 | rc = -ENOTSUPP; /* Not expected, but try soft reset later */ | |
3457 | goto unmap_cfgtable; | |
3458 | } | |
fe5389c8 SC |
3459 | rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY); |
3460 | if (rc) { | |
3461 | dev_warn(&pdev->dev, | |
64670ac8 SC |
3462 | "failed waiting for board to become ready " |
3463 | "after hard reset\n"); | |
fe5389c8 SC |
3464 | goto unmap_cfgtable; |
3465 | } | |
fe5389c8 | 3466 | |
580ada3c SC |
3467 | rc = controller_reset_failed(vaddr); |
3468 | if (rc < 0) | |
3469 | goto unmap_cfgtable; | |
3470 | if (rc) { | |
64670ac8 SC |
3471 | dev_warn(&pdev->dev, "Unable to successfully reset " |
3472 | "controller. Will try soft reset.\n"); | |
3473 | rc = -ENOTSUPP; | |
580ada3c | 3474 | } else { |
64670ac8 | 3475 | dev_info(&pdev->dev, "board ready after hard reset.\n"); |
1df8552a SC |
3476 | } |
3477 | ||
3478 | unmap_cfgtable: | |
3479 | iounmap(cfgtable); | |
3480 | ||
3481 | unmap_vaddr: | |
3482 | iounmap(vaddr); | |
3483 | return rc; | |
edd16368 SC |
3484 | } |
3485 | ||
3486 | /* | |
3487 | * We cannot read the structure directly, for portability we must use | |
3488 | * the io functions. | |
3489 | * This is for debug only. | |
3490 | */ | |
edd16368 SC |
3491 | static void print_cfg_table(struct device *dev, struct CfgTable *tb) |
3492 | { | |
58f8665c | 3493 | #ifdef HPSA_DEBUG |
edd16368 SC |
3494 | int i; |
3495 | char temp_name[17]; | |
3496 | ||
3497 | dev_info(dev, "Controller Configuration information\n"); | |
3498 | dev_info(dev, "------------------------------------\n"); | |
3499 | for (i = 0; i < 4; i++) | |
3500 | temp_name[i] = readb(&(tb->Signature[i])); | |
3501 | temp_name[4] = '\0'; | |
3502 | dev_info(dev, " Signature = %s\n", temp_name); | |
3503 | dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); | |
3504 | dev_info(dev, " Transport methods supported = 0x%x\n", | |
3505 | readl(&(tb->TransportSupport))); | |
3506 | dev_info(dev, " Transport methods active = 0x%x\n", | |
3507 | readl(&(tb->TransportActive))); | |
3508 | dev_info(dev, " Requested transport Method = 0x%x\n", | |
3509 | readl(&(tb->HostWrite.TransportRequest))); | |
3510 | dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n", | |
3511 | readl(&(tb->HostWrite.CoalIntDelay))); | |
3512 | dev_info(dev, " Coalesce Interrupt Count = 0x%x\n", | |
3513 | readl(&(tb->HostWrite.CoalIntCount))); | |
3514 | dev_info(dev, " Max outstanding commands = 0x%d\n", | |
3515 | readl(&(tb->CmdsOutMax))); | |
3516 | dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); | |
3517 | for (i = 0; i < 16; i++) | |
3518 | temp_name[i] = readb(&(tb->ServerName[i])); | |
3519 | temp_name[16] = '\0'; | |
3520 | dev_info(dev, " Server Name = %s\n", temp_name); | |
3521 | dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n", | |
3522 | readl(&(tb->HeartBeat))); | |
edd16368 | 3523 | #endif /* HPSA_DEBUG */ |
58f8665c | 3524 | } |
edd16368 SC |
3525 | |
3526 | static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |
3527 | { | |
3528 | int i, offset, mem_type, bar_type; | |
3529 | ||
3530 | if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ | |
3531 | return 0; | |
3532 | offset = 0; | |
3533 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
3534 | bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; | |
3535 | if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) | |
3536 | offset += 4; | |
3537 | else { | |
3538 | mem_type = pci_resource_flags(pdev, i) & | |
3539 | PCI_BASE_ADDRESS_MEM_TYPE_MASK; | |
3540 | switch (mem_type) { | |
3541 | case PCI_BASE_ADDRESS_MEM_TYPE_32: | |
3542 | case PCI_BASE_ADDRESS_MEM_TYPE_1M: | |
3543 | offset += 4; /* 32 bit */ | |
3544 | break; | |
3545 | case PCI_BASE_ADDRESS_MEM_TYPE_64: | |
3546 | offset += 8; | |
3547 | break; | |
3548 | default: /* reserved in PCI 2.2 */ | |
3549 | dev_warn(&pdev->dev, | |
3550 | "base address is invalid\n"); | |
3551 | return -1; | |
3552 | break; | |
3553 | } | |
3554 | } | |
3555 | if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) | |
3556 | return i + 1; | |
3557 | } | |
3558 | return -1; | |
3559 | } | |
3560 | ||
3561 | /* If MSI/MSI-X is supported by the kernel we will try to enable it on | |
3562 | * controllers that are capable. If not, we use IO-APIC mode. | |
3563 | */ | |
3564 | ||
6b3f4c52 | 3565 | static void __devinit hpsa_interrupt_mode(struct ctlr_info *h) |
edd16368 SC |
3566 | { |
3567 | #ifdef CONFIG_PCI_MSI | |
3568 | int err; | |
3569 | struct msix_entry hpsa_msix_entries[4] = { {0, 0}, {0, 1}, | |
3570 | {0, 2}, {0, 3} | |
3571 | }; | |
3572 | ||
3573 | /* Some boards advertise MSI but don't really support it */ | |
6b3f4c52 SC |
3574 | if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || |
3575 | (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) | |
edd16368 | 3576 | goto default_int_mode; |
55c06c71 SC |
3577 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { |
3578 | dev_info(&h->pdev->dev, "MSIX\n"); | |
3579 | err = pci_enable_msix(h->pdev, hpsa_msix_entries, 4); | |
edd16368 SC |
3580 | if (!err) { |
3581 | h->intr[0] = hpsa_msix_entries[0].vector; | |
3582 | h->intr[1] = hpsa_msix_entries[1].vector; | |
3583 | h->intr[2] = hpsa_msix_entries[2].vector; | |
3584 | h->intr[3] = hpsa_msix_entries[3].vector; | |
3585 | h->msix_vector = 1; | |
3586 | return; | |
3587 | } | |
3588 | if (err > 0) { | |
55c06c71 | 3589 | dev_warn(&h->pdev->dev, "only %d MSI-X vectors " |
edd16368 SC |
3590 | "available\n", err); |
3591 | goto default_int_mode; | |
3592 | } else { | |
55c06c71 | 3593 | dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", |
edd16368 SC |
3594 | err); |
3595 | goto default_int_mode; | |
3596 | } | |
3597 | } | |
55c06c71 SC |
3598 | if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { |
3599 | dev_info(&h->pdev->dev, "MSI\n"); | |
3600 | if (!pci_enable_msi(h->pdev)) | |
edd16368 SC |
3601 | h->msi_vector = 1; |
3602 | else | |
55c06c71 | 3603 | dev_warn(&h->pdev->dev, "MSI init failed\n"); |
edd16368 SC |
3604 | } |
3605 | default_int_mode: | |
3606 | #endif /* CONFIG_PCI_MSI */ | |
3607 | /* if we get here we're going to use the default interrupt mode */ | |
a9a3a273 | 3608 | h->intr[h->intr_mode] = h->pdev->irq; |
edd16368 SC |
3609 | } |
3610 | ||
e5c880d1 SC |
3611 | static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) |
3612 | { | |
3613 | int i; | |
3614 | u32 subsystem_vendor_id, subsystem_device_id; | |
3615 | ||
3616 | subsystem_vendor_id = pdev->subsystem_vendor; | |
3617 | subsystem_device_id = pdev->subsystem_device; | |
3618 | *board_id = ((subsystem_device_id << 16) & 0xffff0000) | | |
3619 | subsystem_vendor_id; | |
3620 | ||
3621 | for (i = 0; i < ARRAY_SIZE(products); i++) | |
3622 | if (*board_id == products[i].board_id) | |
3623 | return i; | |
3624 | ||
6798cc0a SC |
3625 | if ((subsystem_vendor_id != PCI_VENDOR_ID_HP && |
3626 | subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) || | |
3627 | !hpsa_allow_any) { | |
e5c880d1 SC |
3628 | dev_warn(&pdev->dev, "unrecognized board ID: " |
3629 | "0x%08x, ignoring.\n", *board_id); | |
3630 | return -ENODEV; | |
3631 | } | |
3632 | return ARRAY_SIZE(products) - 1; /* generic unknown smart array */ | |
3633 | } | |
3634 | ||
85bdbabb SC |
3635 | static inline bool hpsa_board_disabled(struct pci_dev *pdev) |
3636 | { | |
3637 | u16 command; | |
3638 | ||
3639 | (void) pci_read_config_word(pdev, PCI_COMMAND, &command); | |
3640 | return ((command & PCI_COMMAND_MEMORY) == 0); | |
3641 | } | |
3642 | ||
12d2cd47 | 3643 | static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, |
3a7774ce SC |
3644 | unsigned long *memory_bar) |
3645 | { | |
3646 | int i; | |
3647 | ||
3648 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) | |
12d2cd47 | 3649 | if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { |
3a7774ce | 3650 | /* addressing mode bits already removed */ |
12d2cd47 SC |
3651 | *memory_bar = pci_resource_start(pdev, i); |
3652 | dev_dbg(&pdev->dev, "memory BAR = %lx\n", | |
3a7774ce SC |
3653 | *memory_bar); |
3654 | return 0; | |
3655 | } | |
12d2cd47 | 3656 | dev_warn(&pdev->dev, "no memory BAR found\n"); |
3a7774ce SC |
3657 | return -ENODEV; |
3658 | } | |
3659 | ||
fe5389c8 SC |
3660 | static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev, |
3661 | void __iomem *vaddr, int wait_for_ready) | |
2c4c8c8b | 3662 | { |
fe5389c8 | 3663 | int i, iterations; |
2c4c8c8b | 3664 | u32 scratchpad; |
fe5389c8 SC |
3665 | if (wait_for_ready) |
3666 | iterations = HPSA_BOARD_READY_ITERATIONS; | |
3667 | else | |
3668 | iterations = HPSA_BOARD_NOT_READY_ITERATIONS; | |
2c4c8c8b | 3669 | |
fe5389c8 SC |
3670 | for (i = 0; i < iterations; i++) { |
3671 | scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); | |
3672 | if (wait_for_ready) { | |
3673 | if (scratchpad == HPSA_FIRMWARE_READY) | |
3674 | return 0; | |
3675 | } else { | |
3676 | if (scratchpad != HPSA_FIRMWARE_READY) | |
3677 | return 0; | |
3678 | } | |
2c4c8c8b SC |
3679 | msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); |
3680 | } | |
fe5389c8 | 3681 | dev_warn(&pdev->dev, "board not ready, timed out.\n"); |
2c4c8c8b SC |
3682 | return -ENODEV; |
3683 | } | |
3684 | ||
a51fd47f SC |
3685 | static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev, |
3686 | void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, | |
3687 | u64 *cfg_offset) | |
3688 | { | |
3689 | *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); | |
3690 | *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); | |
3691 | *cfg_base_addr &= (u32) 0x0000ffff; | |
3692 | *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); | |
3693 | if (*cfg_base_addr_index == -1) { | |
3694 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); | |
3695 | return -ENODEV; | |
3696 | } | |
3697 | return 0; | |
3698 | } | |
3699 | ||
77c4495c | 3700 | static int __devinit hpsa_find_cfgtables(struct ctlr_info *h) |
edd16368 | 3701 | { |
01a02ffc SC |
3702 | u64 cfg_offset; |
3703 | u32 cfg_base_addr; | |
3704 | u64 cfg_base_addr_index; | |
303932fd | 3705 | u32 trans_offset; |
a51fd47f | 3706 | int rc; |
77c4495c | 3707 | |
a51fd47f SC |
3708 | rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, |
3709 | &cfg_base_addr_index, &cfg_offset); | |
3710 | if (rc) | |
3711 | return rc; | |
77c4495c | 3712 | h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, |
a51fd47f | 3713 | cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); |
77c4495c SC |
3714 | if (!h->cfgtable) |
3715 | return -ENOMEM; | |
580ada3c SC |
3716 | rc = write_driver_ver_to_cfgtable(h->cfgtable); |
3717 | if (rc) | |
3718 | return rc; | |
77c4495c | 3719 | /* Find performant mode table. */ |
a51fd47f | 3720 | trans_offset = readl(&h->cfgtable->TransMethodOffset); |
77c4495c SC |
3721 | h->transtable = remap_pci_mem(pci_resource_start(h->pdev, |
3722 | cfg_base_addr_index)+cfg_offset+trans_offset, | |
3723 | sizeof(*h->transtable)); | |
3724 | if (!h->transtable) | |
3725 | return -ENOMEM; | |
3726 | return 0; | |
3727 | } | |
3728 | ||
cba3d38b SC |
3729 | static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) |
3730 | { | |
3731 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); | |
72ceeaec SC |
3732 | |
3733 | /* Limit commands in memory limited kdump scenario. */ | |
3734 | if (reset_devices && h->max_commands > 32) | |
3735 | h->max_commands = 32; | |
3736 | ||
cba3d38b SC |
3737 | if (h->max_commands < 16) { |
3738 | dev_warn(&h->pdev->dev, "Controller reports " | |
3739 | "max supported commands of %d, an obvious lie. " | |
3740 | "Using 16. Ensure that firmware is up to date.\n", | |
3741 | h->max_commands); | |
3742 | h->max_commands = 16; | |
3743 | } | |
3744 | } | |
3745 | ||
b93d7536 SC |
3746 | /* Interrogate the hardware for some limits: |
3747 | * max commands, max SG elements without chaining, and with chaining, | |
3748 | * SG chain block size, etc. | |
3749 | */ | |
3750 | static void __devinit hpsa_find_board_params(struct ctlr_info *h) | |
3751 | { | |
cba3d38b | 3752 | hpsa_get_max_perf_mode_cmds(h); |
b93d7536 SC |
3753 | h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ |
3754 | h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); | |
3755 | /* | |
3756 | * Limit in-command s/g elements to 32 save dma'able memory. | |
3757 | * Howvever spec says if 0, use 31 | |
3758 | */ | |
3759 | h->max_cmd_sg_entries = 31; | |
3760 | if (h->maxsgentries > 512) { | |
3761 | h->max_cmd_sg_entries = 32; | |
3762 | h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1; | |
3763 | h->maxsgentries--; /* save one for chain pointer */ | |
3764 | } else { | |
3765 | h->maxsgentries = 31; /* default to traditional values */ | |
3766 | h->chainsize = 0; | |
3767 | } | |
3768 | } | |
3769 | ||
76c46e49 SC |
3770 | static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) |
3771 | { | |
3772 | if ((readb(&h->cfgtable->Signature[0]) != 'C') || | |
3773 | (readb(&h->cfgtable->Signature[1]) != 'I') || | |
3774 | (readb(&h->cfgtable->Signature[2]) != 'S') || | |
3775 | (readb(&h->cfgtable->Signature[3]) != 'S')) { | |
3776 | dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); | |
3777 | return false; | |
3778 | } | |
3779 | return true; | |
3780 | } | |
3781 | ||
f7c39101 SC |
3782 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
3783 | static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) | |
3784 | { | |
3785 | #ifdef CONFIG_X86 | |
3786 | u32 prefetch; | |
3787 | ||
3788 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); | |
3789 | prefetch |= 0x100; | |
3790 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); | |
3791 | #endif | |
3792 | } | |
3793 | ||
3d0eab67 SC |
3794 | /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result |
3795 | * in a prefetch beyond physical memory. | |
3796 | */ | |
3797 | static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h) | |
3798 | { | |
3799 | u32 dma_prefetch; | |
3800 | ||
3801 | if (h->board_id != 0x3225103C) | |
3802 | return; | |
3803 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); | |
3804 | dma_prefetch |= 0x8000; | |
3805 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); | |
3806 | } | |
3807 | ||
3f4336f3 | 3808 | static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) |
eb6b2ae9 SC |
3809 | { |
3810 | int i; | |
6eaf46fd SC |
3811 | u32 doorbell_value; |
3812 | unsigned long flags; | |
eb6b2ae9 SC |
3813 | |
3814 | /* under certain very rare conditions, this can take awhile. | |
3815 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | |
3816 | * as we enter this code.) | |
3817 | */ | |
3818 | for (i = 0; i < MAX_CONFIG_WAIT; i++) { | |
6eaf46fd SC |
3819 | spin_lock_irqsave(&h->lock, flags); |
3820 | doorbell_value = readl(h->vaddr + SA5_DOORBELL); | |
3821 | spin_unlock_irqrestore(&h->lock, flags); | |
382be668 | 3822 | if (!(doorbell_value & CFGTBL_ChangeReq)) |
eb6b2ae9 SC |
3823 | break; |
3824 | /* delay and try again */ | |
60d3f5b0 | 3825 | usleep_range(10000, 20000); |
eb6b2ae9 | 3826 | } |
3f4336f3 SC |
3827 | } |
3828 | ||
3829 | static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h) | |
3830 | { | |
3831 | u32 trans_support; | |
3832 | ||
3833 | trans_support = readl(&(h->cfgtable->TransportSupport)); | |
3834 | if (!(trans_support & SIMPLE_MODE)) | |
3835 | return -ENOTSUPP; | |
3836 | ||
3837 | h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); | |
3838 | /* Update the field, and then ring the doorbell */ | |
3839 | writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); | |
3840 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | |
3841 | hpsa_wait_for_mode_change_ack(h); | |
eb6b2ae9 | 3842 | print_cfg_table(&h->pdev->dev, h->cfgtable); |
eb6b2ae9 SC |
3843 | if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { |
3844 | dev_warn(&h->pdev->dev, | |
3845 | "unable to get board into simple mode\n"); | |
3846 | return -ENODEV; | |
3847 | } | |
960a30e7 | 3848 | h->transMethod = CFGTBL_Trans_Simple; |
eb6b2ae9 SC |
3849 | return 0; |
3850 | } | |
3851 | ||
77c4495c SC |
3852 | static int __devinit hpsa_pci_init(struct ctlr_info *h) |
3853 | { | |
eb6b2ae9 | 3854 | int prod_index, err; |
edd16368 | 3855 | |
e5c880d1 SC |
3856 | prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); |
3857 | if (prod_index < 0) | |
3858 | return -ENODEV; | |
3859 | h->product_name = products[prod_index].product_name; | |
3860 | h->access = *(products[prod_index].access); | |
edd16368 | 3861 | |
85bdbabb | 3862 | if (hpsa_board_disabled(h->pdev)) { |
55c06c71 | 3863 | dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); |
edd16368 SC |
3864 | return -ENODEV; |
3865 | } | |
55c06c71 | 3866 | err = pci_enable_device(h->pdev); |
edd16368 | 3867 | if (err) { |
55c06c71 | 3868 | dev_warn(&h->pdev->dev, "unable to enable PCI device\n"); |
edd16368 SC |
3869 | return err; |
3870 | } | |
3871 | ||
55c06c71 | 3872 | err = pci_request_regions(h->pdev, "hpsa"); |
edd16368 | 3873 | if (err) { |
55c06c71 SC |
3874 | dev_err(&h->pdev->dev, |
3875 | "cannot obtain PCI resources, aborting\n"); | |
edd16368 SC |
3876 | return err; |
3877 | } | |
6b3f4c52 | 3878 | hpsa_interrupt_mode(h); |
12d2cd47 | 3879 | err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr); |
3a7774ce | 3880 | if (err) |
edd16368 | 3881 | goto err_out_free_res; |
edd16368 | 3882 | h->vaddr = remap_pci_mem(h->paddr, 0x250); |
204892e9 SC |
3883 | if (!h->vaddr) { |
3884 | err = -ENOMEM; | |
3885 | goto err_out_free_res; | |
3886 | } | |
fe5389c8 | 3887 | err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); |
2c4c8c8b | 3888 | if (err) |
edd16368 | 3889 | goto err_out_free_res; |
77c4495c SC |
3890 | err = hpsa_find_cfgtables(h); |
3891 | if (err) | |
edd16368 | 3892 | goto err_out_free_res; |
b93d7536 | 3893 | hpsa_find_board_params(h); |
edd16368 | 3894 | |
76c46e49 | 3895 | if (!hpsa_CISS_signature_present(h)) { |
edd16368 SC |
3896 | err = -ENODEV; |
3897 | goto err_out_free_res; | |
3898 | } | |
f7c39101 | 3899 | hpsa_enable_scsi_prefetch(h); |
3d0eab67 | 3900 | hpsa_p600_dma_prefetch_quirk(h); |
eb6b2ae9 SC |
3901 | err = hpsa_enter_simple_mode(h); |
3902 | if (err) | |
edd16368 | 3903 | goto err_out_free_res; |
edd16368 SC |
3904 | return 0; |
3905 | ||
3906 | err_out_free_res: | |
204892e9 SC |
3907 | if (h->transtable) |
3908 | iounmap(h->transtable); | |
3909 | if (h->cfgtable) | |
3910 | iounmap(h->cfgtable); | |
3911 | if (h->vaddr) | |
3912 | iounmap(h->vaddr); | |
edd16368 SC |
3913 | /* |
3914 | * Deliberately omit pci_disable_device(): it does something nasty to | |
3915 | * Smart Array controllers that pci_enable_device does not undo | |
3916 | */ | |
55c06c71 | 3917 | pci_release_regions(h->pdev); |
edd16368 SC |
3918 | return err; |
3919 | } | |
3920 | ||
339b2b14 SC |
3921 | static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) |
3922 | { | |
3923 | int rc; | |
3924 | ||
3925 | #define HBA_INQUIRY_BYTE_COUNT 64 | |
3926 | h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); | |
3927 | if (!h->hba_inquiry_data) | |
3928 | return; | |
3929 | rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, | |
3930 | h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); | |
3931 | if (rc != 0) { | |
3932 | kfree(h->hba_inquiry_data); | |
3933 | h->hba_inquiry_data = NULL; | |
3934 | } | |
3935 | } | |
3936 | ||
4c2a8c40 SC |
3937 | static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev) |
3938 | { | |
1df8552a | 3939 | int rc, i; |
4c2a8c40 SC |
3940 | |
3941 | if (!reset_devices) | |
3942 | return 0; | |
3943 | ||
1df8552a SC |
3944 | /* Reset the controller with a PCI power-cycle or via doorbell */ |
3945 | rc = hpsa_kdump_hard_reset_controller(pdev); | |
4c2a8c40 | 3946 | |
1df8552a SC |
3947 | /* -ENOTSUPP here means we cannot reset the controller |
3948 | * but it's already (and still) up and running in | |
18867659 SC |
3949 | * "performant mode". Or, it might be 640x, which can't reset |
3950 | * due to concerns about shared bbwc between 6402/6404 pair. | |
1df8552a SC |
3951 | */ |
3952 | if (rc == -ENOTSUPP) | |
64670ac8 | 3953 | return rc; /* just try to do the kdump anyhow. */ |
1df8552a SC |
3954 | if (rc) |
3955 | return -ENODEV; | |
4c2a8c40 SC |
3956 | |
3957 | /* Now try to get the controller to respond to a no-op */ | |
2b870cb3 | 3958 | dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); |
4c2a8c40 SC |
3959 | for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { |
3960 | if (hpsa_noop(pdev) == 0) | |
3961 | break; | |
3962 | else | |
3963 | dev_warn(&pdev->dev, "no-op failed%s\n", | |
3964 | (i < 11 ? "; re-trying" : "")); | |
3965 | } | |
3966 | return 0; | |
3967 | } | |
3968 | ||
2e9d1b36 SC |
3969 | static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h) |
3970 | { | |
3971 | h->cmd_pool_bits = kzalloc( | |
3972 | DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * | |
3973 | sizeof(unsigned long), GFP_KERNEL); | |
3974 | h->cmd_pool = pci_alloc_consistent(h->pdev, | |
3975 | h->nr_cmds * sizeof(*h->cmd_pool), | |
3976 | &(h->cmd_pool_dhandle)); | |
3977 | h->errinfo_pool = pci_alloc_consistent(h->pdev, | |
3978 | h->nr_cmds * sizeof(*h->errinfo_pool), | |
3979 | &(h->errinfo_pool_dhandle)); | |
3980 | if ((h->cmd_pool_bits == NULL) | |
3981 | || (h->cmd_pool == NULL) | |
3982 | || (h->errinfo_pool == NULL)) { | |
3983 | dev_err(&h->pdev->dev, "out of memory in %s", __func__); | |
3984 | return -ENOMEM; | |
3985 | } | |
3986 | return 0; | |
3987 | } | |
3988 | ||
3989 | static void hpsa_free_cmd_pool(struct ctlr_info *h) | |
3990 | { | |
3991 | kfree(h->cmd_pool_bits); | |
3992 | if (h->cmd_pool) | |
3993 | pci_free_consistent(h->pdev, | |
3994 | h->nr_cmds * sizeof(struct CommandList), | |
3995 | h->cmd_pool, h->cmd_pool_dhandle); | |
3996 | if (h->errinfo_pool) | |
3997 | pci_free_consistent(h->pdev, | |
3998 | h->nr_cmds * sizeof(struct ErrorInfo), | |
3999 | h->errinfo_pool, | |
4000 | h->errinfo_pool_dhandle); | |
4001 | } | |
4002 | ||
0ae01a32 SC |
4003 | static int hpsa_request_irq(struct ctlr_info *h, |
4004 | irqreturn_t (*msixhandler)(int, void *), | |
4005 | irqreturn_t (*intxhandler)(int, void *)) | |
4006 | { | |
4007 | int rc; | |
4008 | ||
4009 | if (h->msix_vector || h->msi_vector) | |
4010 | rc = request_irq(h->intr[h->intr_mode], msixhandler, | |
4011 | IRQF_DISABLED, h->devname, h); | |
4012 | else | |
4013 | rc = request_irq(h->intr[h->intr_mode], intxhandler, | |
4014 | IRQF_DISABLED, h->devname, h); | |
4015 | if (rc) { | |
4016 | dev_err(&h->pdev->dev, "unable to get irq %d for %s\n", | |
4017 | h->intr[h->intr_mode], h->devname); | |
4018 | return -ENODEV; | |
4019 | } | |
4020 | return 0; | |
4021 | } | |
4022 | ||
64670ac8 SC |
4023 | static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h) |
4024 | { | |
4025 | if (hpsa_send_host_reset(h, RAID_CTLR_LUNID, | |
4026 | HPSA_RESET_TYPE_CONTROLLER)) { | |
4027 | dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); | |
4028 | return -EIO; | |
4029 | } | |
4030 | ||
4031 | dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); | |
4032 | if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { | |
4033 | dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); | |
4034 | return -1; | |
4035 | } | |
4036 | ||
4037 | dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); | |
4038 | if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { | |
4039 | dev_warn(&h->pdev->dev, "Board failed to become ready " | |
4040 | "after soft reset.\n"); | |
4041 | return -1; | |
4042 | } | |
4043 | ||
4044 | return 0; | |
4045 | } | |
4046 | ||
4047 | static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h) | |
4048 | { | |
4049 | free_irq(h->intr[h->intr_mode], h); | |
4050 | #ifdef CONFIG_PCI_MSI | |
4051 | if (h->msix_vector) | |
4052 | pci_disable_msix(h->pdev); | |
4053 | else if (h->msi_vector) | |
4054 | pci_disable_msi(h->pdev); | |
4055 | #endif /* CONFIG_PCI_MSI */ | |
4056 | hpsa_free_sg_chain_blocks(h); | |
4057 | hpsa_free_cmd_pool(h); | |
4058 | kfree(h->blockFetchTable); | |
4059 | pci_free_consistent(h->pdev, h->reply_pool_size, | |
4060 | h->reply_pool, h->reply_pool_dhandle); | |
4061 | if (h->vaddr) | |
4062 | iounmap(h->vaddr); | |
4063 | if (h->transtable) | |
4064 | iounmap(h->transtable); | |
4065 | if (h->cfgtable) | |
4066 | iounmap(h->cfgtable); | |
4067 | pci_release_regions(h->pdev); | |
4068 | kfree(h); | |
4069 | } | |
4070 | ||
edd16368 SC |
4071 | static int __devinit hpsa_init_one(struct pci_dev *pdev, |
4072 | const struct pci_device_id *ent) | |
4073 | { | |
4c2a8c40 | 4074 | int dac, rc; |
edd16368 | 4075 | struct ctlr_info *h; |
64670ac8 SC |
4076 | int try_soft_reset = 0; |
4077 | unsigned long flags; | |
edd16368 SC |
4078 | |
4079 | if (number_of_controllers == 0) | |
4080 | printk(KERN_INFO DRIVER_NAME "\n"); | |
edd16368 | 4081 | |
4c2a8c40 | 4082 | rc = hpsa_init_reset_devices(pdev); |
64670ac8 SC |
4083 | if (rc) { |
4084 | if (rc != -ENOTSUPP) | |
4085 | return rc; | |
4086 | /* If the reset fails in a particular way (it has no way to do | |
4087 | * a proper hard reset, so returns -ENOTSUPP) we can try to do | |
4088 | * a soft reset once we get the controller configured up to the | |
4089 | * point that it can accept a command. | |
4090 | */ | |
4091 | try_soft_reset = 1; | |
4092 | rc = 0; | |
4093 | } | |
4094 | ||
4095 | reinit_after_soft_reset: | |
edd16368 | 4096 | |
303932fd DB |
4097 | /* Command structures must be aligned on a 32-byte boundary because |
4098 | * the 5 lower bits of the address are used by the hardware. and by | |
4099 | * the driver. See comments in hpsa.h for more info. | |
4100 | */ | |
4101 | #define COMMANDLIST_ALIGNMENT 32 | |
4102 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); | |
edd16368 SC |
4103 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
4104 | if (!h) | |
ecd9aad4 | 4105 | return -ENOMEM; |
edd16368 | 4106 | |
55c06c71 | 4107 | h->pdev = pdev; |
edd16368 | 4108 | h->busy_initializing = 1; |
a9a3a273 | 4109 | h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; |
9e0fc764 SC |
4110 | INIT_LIST_HEAD(&h->cmpQ); |
4111 | INIT_LIST_HEAD(&h->reqQ); | |
6eaf46fd SC |
4112 | spin_lock_init(&h->lock); |
4113 | spin_lock_init(&h->scan_lock); | |
55c06c71 | 4114 | rc = hpsa_pci_init(h); |
ecd9aad4 | 4115 | if (rc != 0) |
edd16368 SC |
4116 | goto clean1; |
4117 | ||
4118 | sprintf(h->devname, "hpsa%d", number_of_controllers); | |
4119 | h->ctlr = number_of_controllers; | |
4120 | number_of_controllers++; | |
edd16368 SC |
4121 | |
4122 | /* configure PCI DMA stuff */ | |
ecd9aad4 SC |
4123 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
4124 | if (rc == 0) { | |
edd16368 | 4125 | dac = 1; |
ecd9aad4 SC |
4126 | } else { |
4127 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | |
4128 | if (rc == 0) { | |
4129 | dac = 0; | |
4130 | } else { | |
4131 | dev_err(&pdev->dev, "no suitable DMA available\n"); | |
4132 | goto clean1; | |
4133 | } | |
edd16368 SC |
4134 | } |
4135 | ||
4136 | /* make sure the board interrupts are off */ | |
4137 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
10f66018 | 4138 | |
0ae01a32 | 4139 | if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx)) |
edd16368 | 4140 | goto clean2; |
303932fd DB |
4141 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", |
4142 | h->devname, pdev->device, | |
a9a3a273 | 4143 | h->intr[h->intr_mode], dac ? "" : " not"); |
2e9d1b36 | 4144 | if (hpsa_allocate_cmd_pool(h)) |
edd16368 | 4145 | goto clean4; |
33a2ffce SC |
4146 | if (hpsa_allocate_sg_chain_blocks(h)) |
4147 | goto clean4; | |
a08a8471 SC |
4148 | init_waitqueue_head(&h->scan_wait_queue); |
4149 | h->scan_finished = 1; /* no scan currently in progress */ | |
edd16368 SC |
4150 | |
4151 | pci_set_drvdata(pdev, h); | |
9a41338e SC |
4152 | h->ndevices = 0; |
4153 | h->scsi_host = NULL; | |
4154 | spin_lock_init(&h->devlock); | |
64670ac8 SC |
4155 | hpsa_put_ctlr_into_performant_mode(h); |
4156 | ||
4157 | /* At this point, the controller is ready to take commands. | |
4158 | * Now, if reset_devices and the hard reset didn't work, try | |
4159 | * the soft reset and see if that works. | |
4160 | */ | |
4161 | if (try_soft_reset) { | |
4162 | ||
4163 | /* This is kind of gross. We may or may not get a completion | |
4164 | * from the soft reset command, and if we do, then the value | |
4165 | * from the fifo may or may not be valid. So, we wait 10 secs | |
4166 | * after the reset throwing away any completions we get during | |
4167 | * that time. Unregister the interrupt handler and register | |
4168 | * fake ones to scoop up any residual completions. | |
4169 | */ | |
4170 | spin_lock_irqsave(&h->lock, flags); | |
4171 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
4172 | spin_unlock_irqrestore(&h->lock, flags); | |
4173 | free_irq(h->intr[h->intr_mode], h); | |
4174 | rc = hpsa_request_irq(h, hpsa_msix_discard_completions, | |
4175 | hpsa_intx_discard_completions); | |
4176 | if (rc) { | |
4177 | dev_warn(&h->pdev->dev, "Failed to request_irq after " | |
4178 | "soft reset.\n"); | |
4179 | goto clean4; | |
4180 | } | |
4181 | ||
4182 | rc = hpsa_kdump_soft_reset(h); | |
4183 | if (rc) | |
4184 | /* Neither hard nor soft reset worked, we're hosed. */ | |
4185 | goto clean4; | |
4186 | ||
4187 | dev_info(&h->pdev->dev, "Board READY.\n"); | |
4188 | dev_info(&h->pdev->dev, | |
4189 | "Waiting for stale completions to drain.\n"); | |
4190 | h->access.set_intr_mask(h, HPSA_INTR_ON); | |
4191 | msleep(10000); | |
4192 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
4193 | ||
4194 | rc = controller_reset_failed(h->cfgtable); | |
4195 | if (rc) | |
4196 | dev_info(&h->pdev->dev, | |
4197 | "Soft reset appears to have failed.\n"); | |
4198 | ||
4199 | /* since the controller's reset, we have to go back and re-init | |
4200 | * everything. Easiest to just forget what we've done and do it | |
4201 | * all over again. | |
4202 | */ | |
4203 | hpsa_undo_allocations_after_kdump_soft_reset(h); | |
4204 | try_soft_reset = 0; | |
4205 | if (rc) | |
4206 | /* don't go to clean4, we already unallocated */ | |
4207 | return -ENODEV; | |
4208 | ||
4209 | goto reinit_after_soft_reset; | |
4210 | } | |
edd16368 SC |
4211 | |
4212 | /* Turn the interrupts on so we can service requests */ | |
4213 | h->access.set_intr_mask(h, HPSA_INTR_ON); | |
4214 | ||
339b2b14 | 4215 | hpsa_hba_inquiry(h); |
edd16368 SC |
4216 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ |
4217 | h->busy_initializing = 0; | |
4218 | return 1; | |
4219 | ||
4220 | clean4: | |
33a2ffce | 4221 | hpsa_free_sg_chain_blocks(h); |
2e9d1b36 | 4222 | hpsa_free_cmd_pool(h); |
a9a3a273 | 4223 | free_irq(h->intr[h->intr_mode], h); |
edd16368 SC |
4224 | clean2: |
4225 | clean1: | |
4226 | h->busy_initializing = 0; | |
4227 | kfree(h); | |
ecd9aad4 | 4228 | return rc; |
edd16368 SC |
4229 | } |
4230 | ||
4231 | static void hpsa_flush_cache(struct ctlr_info *h) | |
4232 | { | |
4233 | char *flush_buf; | |
4234 | struct CommandList *c; | |
4235 | ||
4236 | flush_buf = kzalloc(4, GFP_KERNEL); | |
4237 | if (!flush_buf) | |
4238 | return; | |
4239 | ||
4240 | c = cmd_special_alloc(h); | |
4241 | if (!c) { | |
4242 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | |
4243 | goto out_of_memory; | |
4244 | } | |
4245 | fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0, | |
4246 | RAID_CTLR_LUNID, TYPE_CMD); | |
4247 | hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE); | |
4248 | if (c->err_info->CommandStatus != 0) | |
4249 | dev_warn(&h->pdev->dev, | |
4250 | "error flushing cache on controller\n"); | |
4251 | cmd_special_free(h, c); | |
4252 | out_of_memory: | |
4253 | kfree(flush_buf); | |
4254 | } | |
4255 | ||
4256 | static void hpsa_shutdown(struct pci_dev *pdev) | |
4257 | { | |
4258 | struct ctlr_info *h; | |
4259 | ||
4260 | h = pci_get_drvdata(pdev); | |
4261 | /* Turn board interrupts off and send the flush cache command | |
4262 | * sendcmd will turn off interrupt, and send the flush... | |
4263 | * To write all data in the battery backed cache to disks | |
4264 | */ | |
4265 | hpsa_flush_cache(h); | |
4266 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | |
a9a3a273 | 4267 | free_irq(h->intr[h->intr_mode], h); |
edd16368 SC |
4268 | #ifdef CONFIG_PCI_MSI |
4269 | if (h->msix_vector) | |
4270 | pci_disable_msix(h->pdev); | |
4271 | else if (h->msi_vector) | |
4272 | pci_disable_msi(h->pdev); | |
4273 | #endif /* CONFIG_PCI_MSI */ | |
4274 | } | |
4275 | ||
4276 | static void __devexit hpsa_remove_one(struct pci_dev *pdev) | |
4277 | { | |
4278 | struct ctlr_info *h; | |
4279 | ||
4280 | if (pci_get_drvdata(pdev) == NULL) { | |
4281 | dev_err(&pdev->dev, "unable to remove device \n"); | |
4282 | return; | |
4283 | } | |
4284 | h = pci_get_drvdata(pdev); | |
edd16368 SC |
4285 | hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */ |
4286 | hpsa_shutdown(pdev); | |
4287 | iounmap(h->vaddr); | |
204892e9 SC |
4288 | iounmap(h->transtable); |
4289 | iounmap(h->cfgtable); | |
33a2ffce | 4290 | hpsa_free_sg_chain_blocks(h); |
edd16368 SC |
4291 | pci_free_consistent(h->pdev, |
4292 | h->nr_cmds * sizeof(struct CommandList), | |
4293 | h->cmd_pool, h->cmd_pool_dhandle); | |
4294 | pci_free_consistent(h->pdev, | |
4295 | h->nr_cmds * sizeof(struct ErrorInfo), | |
4296 | h->errinfo_pool, h->errinfo_pool_dhandle); | |
303932fd DB |
4297 | pci_free_consistent(h->pdev, h->reply_pool_size, |
4298 | h->reply_pool, h->reply_pool_dhandle); | |
edd16368 | 4299 | kfree(h->cmd_pool_bits); |
303932fd | 4300 | kfree(h->blockFetchTable); |
339b2b14 | 4301 | kfree(h->hba_inquiry_data); |
edd16368 SC |
4302 | /* |
4303 | * Deliberately omit pci_disable_device(): it does something nasty to | |
4304 | * Smart Array controllers that pci_enable_device does not undo | |
4305 | */ | |
4306 | pci_release_regions(pdev); | |
4307 | pci_set_drvdata(pdev, NULL); | |
edd16368 SC |
4308 | kfree(h); |
4309 | } | |
4310 | ||
4311 | static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev, | |
4312 | __attribute__((unused)) pm_message_t state) | |
4313 | { | |
4314 | return -ENOSYS; | |
4315 | } | |
4316 | ||
4317 | static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev) | |
4318 | { | |
4319 | return -ENOSYS; | |
4320 | } | |
4321 | ||
4322 | static struct pci_driver hpsa_pci_driver = { | |
4323 | .name = "hpsa", | |
4324 | .probe = hpsa_init_one, | |
4325 | .remove = __devexit_p(hpsa_remove_one), | |
4326 | .id_table = hpsa_pci_device_id, /* id_table */ | |
4327 | .shutdown = hpsa_shutdown, | |
4328 | .suspend = hpsa_suspend, | |
4329 | .resume = hpsa_resume, | |
4330 | }; | |
4331 | ||
303932fd DB |
4332 | /* Fill in bucket_map[], given nsgs (the max number of |
4333 | * scatter gather elements supported) and bucket[], | |
4334 | * which is an array of 8 integers. The bucket[] array | |
4335 | * contains 8 different DMA transfer sizes (in 16 | |
4336 | * byte increments) which the controller uses to fetch | |
4337 | * commands. This function fills in bucket_map[], which | |
4338 | * maps a given number of scatter gather elements to one of | |
4339 | * the 8 DMA transfer sizes. The point of it is to allow the | |
4340 | * controller to only do as much DMA as needed to fetch the | |
4341 | * command, with the DMA transfer size encoded in the lower | |
4342 | * bits of the command address. | |
4343 | */ | |
4344 | static void calc_bucket_map(int bucket[], int num_buckets, | |
4345 | int nsgs, int *bucket_map) | |
4346 | { | |
4347 | int i, j, b, size; | |
4348 | ||
4349 | /* even a command with 0 SGs requires 4 blocks */ | |
4350 | #define MINIMUM_TRANSFER_BLOCKS 4 | |
4351 | #define NUM_BUCKETS 8 | |
4352 | /* Note, bucket_map must have nsgs+1 entries. */ | |
4353 | for (i = 0; i <= nsgs; i++) { | |
4354 | /* Compute size of a command with i SG entries */ | |
4355 | size = i + MINIMUM_TRANSFER_BLOCKS; | |
4356 | b = num_buckets; /* Assume the biggest bucket */ | |
4357 | /* Find the bucket that is just big enough */ | |
4358 | for (j = 0; j < 8; j++) { | |
4359 | if (bucket[j] >= size) { | |
4360 | b = j; | |
4361 | break; | |
4362 | } | |
4363 | } | |
4364 | /* for a command with i SG entries, use bucket b. */ | |
4365 | bucket_map[i] = b; | |
4366 | } | |
4367 | } | |
4368 | ||
960a30e7 SC |
4369 | static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h, |
4370 | u32 use_short_tags) | |
303932fd | 4371 | { |
6c311b57 SC |
4372 | int i; |
4373 | unsigned long register_value; | |
def342bd SC |
4374 | |
4375 | /* This is a bit complicated. There are 8 registers on | |
4376 | * the controller which we write to to tell it 8 different | |
4377 | * sizes of commands which there may be. It's a way of | |
4378 | * reducing the DMA done to fetch each command. Encoded into | |
4379 | * each command's tag are 3 bits which communicate to the controller | |
4380 | * which of the eight sizes that command fits within. The size of | |
4381 | * each command depends on how many scatter gather entries there are. | |
4382 | * Each SG entry requires 16 bytes. The eight registers are programmed | |
4383 | * with the number of 16-byte blocks a command of that size requires. | |
4384 | * The smallest command possible requires 5 such 16 byte blocks. | |
4385 | * the largest command possible requires MAXSGENTRIES + 4 16-byte | |
4386 | * blocks. Note, this only extends to the SG entries contained | |
4387 | * within the command block, and does not extend to chained blocks | |
4388 | * of SG elements. bft[] contains the eight values we write to | |
4389 | * the registers. They are not evenly distributed, but have more | |
4390 | * sizes for small commands, and fewer sizes for larger commands. | |
4391 | */ | |
4392 | int bft[8] = {5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; | |
4393 | BUILD_BUG_ON(28 > MAXSGENTRIES + 4); | |
303932fd DB |
4394 | /* 5 = 1 s/g entry or 4k |
4395 | * 6 = 2 s/g entry or 8k | |
4396 | * 8 = 4 s/g entry or 16k | |
4397 | * 10 = 6 s/g entry or 24k | |
4398 | */ | |
303932fd DB |
4399 | |
4400 | h->reply_pool_wraparound = 1; /* spec: init to 1 */ | |
4401 | ||
4402 | /* Controller spec: zero out this buffer. */ | |
4403 | memset(h->reply_pool, 0, h->reply_pool_size); | |
4404 | h->reply_pool_head = h->reply_pool; | |
4405 | ||
303932fd DB |
4406 | bft[7] = h->max_sg_entries + 4; |
4407 | calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); | |
4408 | for (i = 0; i < 8; i++) | |
4409 | writel(bft[i], &h->transtable->BlockFetch[i]); | |
4410 | ||
4411 | /* size of controller ring buffer */ | |
4412 | writel(h->max_commands, &h->transtable->RepQSize); | |
4413 | writel(1, &h->transtable->RepQCount); | |
4414 | writel(0, &h->transtable->RepQCtrAddrLow32); | |
4415 | writel(0, &h->transtable->RepQCtrAddrHigh32); | |
4416 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | |
4417 | writel(0, &h->transtable->RepQAddr0High32); | |
960a30e7 | 4418 | writel(CFGTBL_Trans_Performant | use_short_tags, |
303932fd DB |
4419 | &(h->cfgtable->HostWrite.TransportRequest)); |
4420 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | |
3f4336f3 | 4421 | hpsa_wait_for_mode_change_ack(h); |
303932fd DB |
4422 | register_value = readl(&(h->cfgtable->TransportActive)); |
4423 | if (!(register_value & CFGTBL_Trans_Performant)) { | |
4424 | dev_warn(&h->pdev->dev, "unable to get board into" | |
4425 | " performant mode\n"); | |
4426 | return; | |
4427 | } | |
960a30e7 SC |
4428 | /* Change the access methods to the performant access methods */ |
4429 | h->access = SA5_performant_access; | |
4430 | h->transMethod = CFGTBL_Trans_Performant; | |
6c311b57 SC |
4431 | } |
4432 | ||
4433 | static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | |
4434 | { | |
4435 | u32 trans_support; | |
4436 | ||
02ec19c8 SC |
4437 | if (hpsa_simple_mode) |
4438 | return; | |
4439 | ||
6c311b57 SC |
4440 | trans_support = readl(&(h->cfgtable->TransportSupport)); |
4441 | if (!(trans_support & PERFORMANT_MODE)) | |
4442 | return; | |
4443 | ||
cba3d38b | 4444 | hpsa_get_max_perf_mode_cmds(h); |
6c311b57 SC |
4445 | h->max_sg_entries = 32; |
4446 | /* Performant mode ring buffer and supporting data structures */ | |
4447 | h->reply_pool_size = h->max_commands * sizeof(u64); | |
4448 | h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, | |
4449 | &(h->reply_pool_dhandle)); | |
4450 | ||
4451 | /* Need a block fetch table for performant mode */ | |
4452 | h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * | |
4453 | sizeof(u32)), GFP_KERNEL); | |
4454 | ||
4455 | if ((h->reply_pool == NULL) | |
4456 | || (h->blockFetchTable == NULL)) | |
4457 | goto clean_up; | |
4458 | ||
960a30e7 SC |
4459 | hpsa_enter_performant_mode(h, |
4460 | trans_support & CFGTBL_Trans_use_short_tags); | |
303932fd DB |
4461 | |
4462 | return; | |
4463 | ||
4464 | clean_up: | |
4465 | if (h->reply_pool) | |
4466 | pci_free_consistent(h->pdev, h->reply_pool_size, | |
4467 | h->reply_pool, h->reply_pool_dhandle); | |
4468 | kfree(h->blockFetchTable); | |
4469 | } | |
4470 | ||
edd16368 SC |
4471 | /* |
4472 | * This is it. Register the PCI driver information for the cards we control | |
4473 | * the OS will call our registered routines when it finds one of our cards. | |
4474 | */ | |
4475 | static int __init hpsa_init(void) | |
4476 | { | |
31468401 | 4477 | return pci_register_driver(&hpsa_pci_driver); |
edd16368 SC |
4478 | } |
4479 | ||
4480 | static void __exit hpsa_cleanup(void) | |
4481 | { | |
4482 | pci_unregister_driver(&hpsa_pci_driver); | |
edd16368 SC |
4483 | } |
4484 | ||
4485 | module_init(hpsa_init); | |
4486 | module_exit(hpsa_cleanup); |