]>
Commit | Line | Data |
---|---|---|
89bcb05d DA |
1 | /* |
2 | * Driver for HP iLO/iLO2 management processor. | |
3 | * | |
4 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. | |
5 | * David Altobelli <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/fs.h> | |
15 | #include <linux/pci.h> | |
16 | #include <linux/ioport.h> | |
17 | #include <linux/device.h> | |
18 | #include <linux/file.h> | |
19 | #include <linux/cdev.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/delay.h> | |
22 | #include <linux/uaccess.h> | |
23 | #include <linux/io.h> | |
24 | #include "hpilo.h" | |
25 | ||
26 | static struct class *ilo_class; | |
27 | static unsigned int ilo_major; | |
28 | static char ilo_hwdev[MAX_ILO_DEV]; | |
29 | ||
30 | static inline int get_entry_id(int entry) | |
31 | { | |
32 | return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR; | |
33 | } | |
34 | ||
35 | static inline int get_entry_len(int entry) | |
36 | { | |
37 | return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3; | |
38 | } | |
39 | ||
40 | static inline int mk_entry(int id, int len) | |
41 | { | |
42 | int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; | |
43 | return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; | |
44 | } | |
45 | ||
46 | static inline int desc_mem_sz(int nr_entry) | |
47 | { | |
48 | return nr_entry << L2_QENTRY_SZ; | |
49 | } | |
50 | ||
51 | /* | |
52 | * FIFO queues, shared with hardware. | |
53 | * | |
54 | * If a queue has empty slots, an entry is added to the queue tail, | |
55 | * and that entry is marked as occupied. | |
56 | * Entries can be dequeued from the head of the list, when the device | |
57 | * has marked the entry as consumed. | |
58 | * | |
59 | * Returns true on successful queue/dequeue, false on failure. | |
60 | */ | |
61 | static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) | |
62 | { | |
63 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
64 | int ret = 0; | |
65 | ||
66 | spin_lock(&hw->fifo_lock); | |
67 | if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] | |
68 | & ENTRY_MASK_O)) { | |
69 | fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= | |
70 | (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge; | |
71 | fifo_q->tail += 1; | |
72 | ret = 1; | |
73 | } | |
74 | spin_unlock(&hw->fifo_lock); | |
75 | ||
76 | return ret; | |
77 | } | |
78 | ||
79 | static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) | |
80 | { | |
81 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
82 | int ret = 0; | |
83 | u64 c; | |
84 | ||
85 | spin_lock(&hw->fifo_lock); | |
86 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; | |
87 | if (c & ENTRY_MASK_C) { | |
88 | if (entry) | |
89 | *entry = c & ENTRY_MASK_NOSTATE; | |
90 | ||
91 | fifo_q->fifobar[fifo_q->head & fifo_q->imask] = | |
92 | (c | ENTRY_MASK) + 1; | |
93 | fifo_q->head += 1; | |
94 | ret = 1; | |
95 | } | |
96 | spin_unlock(&hw->fifo_lock); | |
97 | ||
98 | return ret; | |
99 | } | |
100 | ||
101 | static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb, | |
102 | int dir, int id, int len) | |
103 | { | |
104 | char *fifobar; | |
105 | int entry; | |
106 | ||
107 | if (dir == SENDQ) | |
108 | fifobar = ccb->ccb_u1.send_fifobar; | |
109 | else | |
110 | fifobar = ccb->ccb_u3.recv_fifobar; | |
111 | ||
112 | entry = mk_entry(id, len); | |
113 | return fifo_enqueue(hw, fifobar, entry); | |
114 | } | |
115 | ||
116 | static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, | |
117 | int dir, int *id, int *len, void **pkt) | |
118 | { | |
119 | char *fifobar, *desc; | |
120 | int entry = 0, pkt_id = 0; | |
121 | int ret; | |
122 | ||
123 | if (dir == SENDQ) { | |
124 | fifobar = ccb->ccb_u1.send_fifobar; | |
125 | desc = ccb->ccb_u2.send_desc; | |
126 | } else { | |
127 | fifobar = ccb->ccb_u3.recv_fifobar; | |
128 | desc = ccb->ccb_u4.recv_desc; | |
129 | } | |
130 | ||
131 | ret = fifo_dequeue(hw, fifobar, &entry); | |
132 | if (ret) { | |
133 | pkt_id = get_entry_id(entry); | |
134 | if (id) | |
135 | *id = pkt_id; | |
136 | if (len) | |
137 | *len = get_entry_len(entry); | |
138 | if (pkt) | |
139 | *pkt = (void *)(desc + desc_mem_sz(pkt_id)); | |
140 | } | |
141 | ||
142 | return ret; | |
143 | } | |
144 | ||
145 | static inline void doorbell_set(struct ccb *ccb) | |
146 | { | |
147 | iowrite8(1, ccb->ccb_u5.db_base); | |
148 | } | |
149 | ||
150 | static inline void doorbell_clr(struct ccb *ccb) | |
151 | { | |
152 | iowrite8(2, ccb->ccb_u5.db_base); | |
153 | } | |
154 | static inline int ctrl_set(int l2sz, int idxmask, int desclim) | |
155 | { | |
156 | int active = 0, go = 1; | |
157 | return l2sz << CTRL_BITPOS_L2SZ | | |
158 | idxmask << CTRL_BITPOS_FIFOINDEXMASK | | |
159 | desclim << CTRL_BITPOS_DESCLIMIT | | |
160 | active << CTRL_BITPOS_A | | |
161 | go << CTRL_BITPOS_G; | |
162 | } | |
163 | static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) | |
164 | { | |
165 | /* for simplicity, use the same parameters for send and recv ctrls */ | |
166 | ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | |
167 | ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | |
168 | } | |
169 | ||
170 | static inline int fifo_sz(int nr_entry) | |
171 | { | |
172 | /* size of a fifo is determined by the number of entries it contains */ | |
173 | return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE; | |
174 | } | |
175 | ||
176 | static void fifo_setup(void *base_addr, int nr_entry) | |
177 | { | |
178 | struct fifo *fifo_q = base_addr; | |
179 | int i; | |
180 | ||
181 | /* set up an empty fifo */ | |
182 | fifo_q->head = 0; | |
183 | fifo_q->tail = 0; | |
184 | fifo_q->reset = 0; | |
185 | fifo_q->nrents = nr_entry; | |
186 | fifo_q->imask = nr_entry - 1; | |
187 | fifo_q->merge = ENTRY_MASK_O; | |
188 | ||
189 | for (i = 0; i < nr_entry; i++) | |
190 | fifo_q->fifobar[i] = 0; | |
191 | } | |
192 | ||
193 | static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) | |
194 | { | |
195 | struct ccb *driver_ccb; | |
196 | struct ccb __iomem *device_ccb; | |
197 | int retries; | |
198 | ||
199 | driver_ccb = &data->driver_ccb; | |
200 | device_ccb = data->mapped_ccb; | |
201 | ||
202 | /* complicated dance to tell the hw we are stopping */ | |
203 | doorbell_clr(driver_ccb); | |
204 | iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), | |
205 | &device_ccb->send_ctrl); | |
206 | iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G), | |
207 | &device_ccb->recv_ctrl); | |
208 | ||
209 | /* give iLO some time to process stop request */ | |
210 | for (retries = 1000; retries > 0; retries--) { | |
211 | doorbell_set(driver_ccb); | |
212 | udelay(1); | |
213 | if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) | |
214 | && | |
215 | !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A))) | |
216 | break; | |
217 | } | |
218 | if (retries == 0) | |
219 | dev_err(&pdev->dev, "Closing, but controller still active\n"); | |
220 | ||
221 | /* clear the hw ccb */ | |
222 | memset_io(device_ccb, 0, sizeof(struct ccb)); | |
223 | ||
224 | /* free resources used to back send/recv queues */ | |
225 | pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); | |
226 | } | |
227 | ||
228 | static int ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) | |
229 | { | |
230 | char *dma_va, *dma_pa; | |
231 | int pkt_id, pkt_sz, i, error; | |
232 | struct ccb *driver_ccb, *ilo_ccb; | |
233 | struct pci_dev *pdev; | |
234 | ||
235 | driver_ccb = &data->driver_ccb; | |
236 | ilo_ccb = &data->ilo_ccb; | |
237 | pdev = hw->ilo_dev; | |
238 | ||
239 | data->dma_size = 2 * fifo_sz(NR_QENTRY) + | |
240 | 2 * desc_mem_sz(NR_QENTRY) + | |
241 | ILO_START_ALIGN + ILO_CACHE_SZ; | |
242 | ||
243 | error = -ENOMEM; | |
244 | data->dma_va = pci_alloc_consistent(pdev, data->dma_size, | |
245 | &data->dma_pa); | |
246 | if (!data->dma_va) | |
247 | goto out; | |
248 | ||
249 | dma_va = (char *)data->dma_va; | |
250 | dma_pa = (char *)data->dma_pa; | |
251 | ||
252 | memset(dma_va, 0, data->dma_size); | |
253 | ||
254 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); | |
255 | dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_START_ALIGN); | |
256 | ||
257 | /* | |
258 | * Create two ccb's, one with virt addrs, one with phys addrs. | |
259 | * Copy the phys addr ccb to device shared mem. | |
260 | */ | |
261 | ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ); | |
262 | ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ); | |
263 | ||
264 | fifo_setup(dma_va, NR_QENTRY); | |
265 | driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; | |
266 | ilo_ccb->ccb_u1.send_fifobar = dma_pa + FIFOHANDLESIZE; | |
267 | dma_va += fifo_sz(NR_QENTRY); | |
268 | dma_pa += fifo_sz(NR_QENTRY); | |
269 | ||
270 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); | |
271 | dma_pa = (char *)roundup((unsigned long)dma_pa, ILO_CACHE_SZ); | |
272 | ||
273 | fifo_setup(dma_va, NR_QENTRY); | |
274 | driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; | |
275 | ilo_ccb->ccb_u3.recv_fifobar = dma_pa + FIFOHANDLESIZE; | |
276 | dma_va += fifo_sz(NR_QENTRY); | |
277 | dma_pa += fifo_sz(NR_QENTRY); | |
278 | ||
279 | driver_ccb->ccb_u2.send_desc = dma_va; | |
280 | ilo_ccb->ccb_u2.send_desc = dma_pa; | |
281 | dma_pa += desc_mem_sz(NR_QENTRY); | |
282 | dma_va += desc_mem_sz(NR_QENTRY); | |
283 | ||
284 | driver_ccb->ccb_u4.recv_desc = dma_va; | |
285 | ilo_ccb->ccb_u4.recv_desc = dma_pa; | |
286 | ||
287 | driver_ccb->channel = slot; | |
288 | ilo_ccb->channel = slot; | |
289 | ||
290 | driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); | |
291 | ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ | |
292 | ||
293 | /* copy the ccb with physical addrs to device memory */ | |
294 | data->mapped_ccb = (struct ccb __iomem *) | |
295 | (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); | |
296 | memcpy_toio(data->mapped_ccb, ilo_ccb, sizeof(struct ccb)); | |
297 | ||
298 | /* put packets on the send and receive queues */ | |
299 | pkt_sz = 0; | |
300 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) { | |
301 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz); | |
302 | doorbell_set(driver_ccb); | |
303 | } | |
304 | ||
305 | pkt_sz = desc_mem_sz(1); | |
306 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) | |
307 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); | |
308 | ||
309 | doorbell_clr(driver_ccb); | |
310 | ||
311 | /* make sure iLO is really handling requests */ | |
312 | for (i = 1000; i > 0; i--) { | |
313 | if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) | |
314 | break; | |
315 | udelay(1); | |
316 | } | |
317 | ||
318 | if (i) { | |
319 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); | |
320 | doorbell_set(driver_ccb); | |
321 | } else { | |
322 | dev_err(&pdev->dev, "Open could not dequeue a packet\n"); | |
323 | error = -EBUSY; | |
324 | goto free; | |
325 | } | |
326 | ||
327 | return 0; | |
328 | free: | |
329 | pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); | |
330 | out: | |
331 | return error; | |
332 | } | |
333 | ||
334 | static inline int is_channel_reset(struct ccb *ccb) | |
335 | { | |
336 | /* check for this particular channel needing a reset */ | |
337 | return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset; | |
338 | } | |
339 | ||
340 | static inline void set_channel_reset(struct ccb *ccb) | |
341 | { | |
342 | /* set a flag indicating this channel needs a reset */ | |
343 | FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; | |
344 | } | |
345 | ||
346 | static inline int is_device_reset(struct ilo_hwinfo *hw) | |
347 | { | |
348 | /* check for global reset condition */ | |
349 | return ioread32(&hw->mmio_vaddr[DB_OUT]) & (1 << DB_RESET); | |
350 | } | |
351 | ||
352 | static inline void clear_device(struct ilo_hwinfo *hw) | |
353 | { | |
354 | /* clear the device (reset bits, pending channel entries) */ | |
355 | iowrite32(-1, &hw->mmio_vaddr[DB_OUT]); | |
356 | } | |
357 | ||
358 | static void ilo_locked_reset(struct ilo_hwinfo *hw) | |
359 | { | |
360 | int slot; | |
361 | ||
362 | /* | |
363 | * Mapped memory is zeroed on ilo reset, so set a per ccb flag | |
364 | * to indicate that this ccb needs to be closed and reopened. | |
365 | */ | |
366 | for (slot = 0; slot < MAX_CCB; slot++) { | |
367 | if (!hw->ccb_alloc[slot]) | |
368 | continue; | |
369 | set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); | |
370 | } | |
371 | ||
372 | clear_device(hw); | |
373 | } | |
374 | ||
375 | static void ilo_reset(struct ilo_hwinfo *hw) | |
376 | { | |
377 | spin_lock(&hw->alloc_lock); | |
378 | ||
379 | /* reset might have been handled after lock was taken */ | |
380 | if (is_device_reset(hw)) | |
381 | ilo_locked_reset(hw); | |
382 | ||
383 | spin_unlock(&hw->alloc_lock); | |
384 | } | |
385 | ||
386 | static ssize_t ilo_read(struct file *fp, char __user *buf, | |
387 | size_t len, loff_t *off) | |
388 | { | |
389 | int err, found, cnt, pkt_id, pkt_len; | |
390 | struct ccb_data *data; | |
391 | struct ccb *driver_ccb; | |
392 | struct ilo_hwinfo *hw; | |
393 | void *pkt; | |
394 | ||
395 | data = fp->private_data; | |
396 | driver_ccb = &data->driver_ccb; | |
397 | hw = data->ilo_hw; | |
398 | ||
399 | if (is_device_reset(hw) || is_channel_reset(driver_ccb)) { | |
400 | /* | |
401 | * If the device has been reset, applications | |
402 | * need to close and reopen all ccbs. | |
403 | */ | |
404 | ilo_reset(hw); | |
405 | return -ENODEV; | |
406 | } | |
407 | ||
408 | /* | |
409 | * This function is to be called when data is expected | |
410 | * in the channel, and will return an error if no packet is found | |
411 | * during the loop below. The sleep/retry logic is to allow | |
412 | * applications to call read() immediately post write(), | |
413 | * and give iLO some time to process the sent packet. | |
414 | */ | |
415 | cnt = 20; | |
416 | do { | |
417 | /* look for a received packet */ | |
418 | found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id, | |
419 | &pkt_len, &pkt); | |
420 | if (found) | |
421 | break; | |
422 | cnt--; | |
423 | msleep(100); | |
424 | } while (!found && cnt); | |
425 | ||
426 | if (!found) | |
427 | return -EAGAIN; | |
428 | ||
429 | /* only copy the length of the received packet */ | |
430 | if (pkt_len < len) | |
431 | len = pkt_len; | |
432 | ||
433 | err = copy_to_user(buf, pkt, len); | |
434 | ||
435 | /* return the received packet to the queue */ | |
436 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1)); | |
437 | ||
438 | return err ? -EFAULT : len; | |
439 | } | |
440 | ||
441 | static ssize_t ilo_write(struct file *fp, const char __user *buf, | |
442 | size_t len, loff_t *off) | |
443 | { | |
444 | int err, pkt_id, pkt_len; | |
445 | struct ccb_data *data; | |
446 | struct ccb *driver_ccb; | |
447 | struct ilo_hwinfo *hw; | |
448 | void *pkt; | |
449 | ||
450 | data = fp->private_data; | |
451 | driver_ccb = &data->driver_ccb; | |
452 | hw = data->ilo_hw; | |
453 | ||
454 | if (is_device_reset(hw) || is_channel_reset(driver_ccb)) { | |
455 | /* | |
456 | * If the device has been reset, applications | |
457 | * need to close and reopen all ccbs. | |
458 | */ | |
459 | ilo_reset(hw); | |
460 | return -ENODEV; | |
461 | } | |
462 | ||
463 | /* get a packet to send the user command */ | |
464 | if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) | |
465 | return -EBUSY; | |
466 | ||
467 | /* limit the length to the length of the packet */ | |
468 | if (pkt_len < len) | |
469 | len = pkt_len; | |
470 | ||
471 | /* on failure, set the len to 0 to return empty packet to the device */ | |
472 | err = copy_from_user(pkt, buf, len); | |
473 | if (err) | |
474 | len = 0; | |
475 | ||
476 | /* send the packet */ | |
477 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len); | |
478 | doorbell_set(driver_ccb); | |
479 | ||
480 | return err ? -EFAULT : len; | |
481 | } | |
482 | ||
483 | static int ilo_close(struct inode *ip, struct file *fp) | |
484 | { | |
485 | int slot; | |
486 | struct ccb_data *data; | |
487 | struct ilo_hwinfo *hw; | |
488 | ||
489 | slot = iminor(ip) % MAX_CCB; | |
490 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); | |
491 | ||
492 | spin_lock(&hw->alloc_lock); | |
493 | ||
494 | if (is_device_reset(hw)) | |
495 | ilo_locked_reset(hw); | |
496 | ||
497 | if (hw->ccb_alloc[slot]->ccb_cnt == 1) { | |
498 | ||
499 | data = fp->private_data; | |
500 | ||
501 | ilo_ccb_close(hw->ilo_dev, data); | |
502 | ||
503 | kfree(data); | |
504 | hw->ccb_alloc[slot] = NULL; | |
505 | } else | |
506 | hw->ccb_alloc[slot]->ccb_cnt--; | |
507 | ||
508 | spin_unlock(&hw->alloc_lock); | |
509 | ||
510 | return 0; | |
511 | } | |
512 | ||
513 | static int ilo_open(struct inode *ip, struct file *fp) | |
514 | { | |
515 | int slot, error; | |
516 | struct ccb_data *data; | |
517 | struct ilo_hwinfo *hw; | |
518 | ||
519 | slot = iminor(ip) % MAX_CCB; | |
520 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); | |
521 | ||
522 | /* new ccb allocation */ | |
523 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
524 | if (!data) | |
525 | return -ENOMEM; | |
526 | ||
527 | spin_lock(&hw->alloc_lock); | |
528 | ||
529 | if (is_device_reset(hw)) | |
530 | ilo_locked_reset(hw); | |
531 | ||
532 | /* each fd private_data holds sw/hw view of ccb */ | |
533 | if (hw->ccb_alloc[slot] == NULL) { | |
534 | /* create a channel control block for this minor */ | |
535 | error = ilo_ccb_open(hw, data, slot); | |
536 | if (!error) { | |
537 | hw->ccb_alloc[slot] = data; | |
538 | hw->ccb_alloc[slot]->ccb_cnt = 1; | |
539 | hw->ccb_alloc[slot]->ccb_excl = fp->f_flags & O_EXCL; | |
540 | hw->ccb_alloc[slot]->ilo_hw = hw; | |
541 | } else | |
542 | kfree(data); | |
543 | } else { | |
544 | kfree(data); | |
545 | if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { | |
546 | /* | |
547 | * The channel exists, and either this open | |
548 | * or a previous open of this channel wants | |
549 | * exclusive access. | |
550 | */ | |
551 | error = -EBUSY; | |
552 | } else { | |
553 | hw->ccb_alloc[slot]->ccb_cnt++; | |
554 | error = 0; | |
555 | } | |
556 | } | |
557 | spin_unlock(&hw->alloc_lock); | |
558 | ||
559 | if (!error) | |
560 | fp->private_data = hw->ccb_alloc[slot]; | |
561 | ||
562 | return error; | |
563 | } | |
564 | ||
565 | static const struct file_operations ilo_fops = { | |
566 | .owner = THIS_MODULE, | |
567 | .read = ilo_read, | |
568 | .write = ilo_write, | |
569 | .open = ilo_open, | |
570 | .release = ilo_close, | |
571 | }; | |
572 | ||
573 | static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) | |
574 | { | |
575 | pci_iounmap(pdev, hw->db_vaddr); | |
576 | pci_iounmap(pdev, hw->ram_vaddr); | |
577 | pci_iounmap(pdev, hw->mmio_vaddr); | |
578 | } | |
579 | ||
580 | static int __devinit ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) | |
581 | { | |
582 | int error = -ENOMEM; | |
583 | ||
584 | /* map the memory mapped i/o registers */ | |
585 | hw->mmio_vaddr = pci_iomap(pdev, 1, 0); | |
586 | if (hw->mmio_vaddr == NULL) { | |
587 | dev_err(&pdev->dev, "Error mapping mmio\n"); | |
588 | goto out; | |
589 | } | |
590 | ||
591 | /* map the adapter shared memory region */ | |
592 | hw->ram_vaddr = pci_iomap(pdev, 2, MAX_CCB * ILOHW_CCB_SZ); | |
593 | if (hw->ram_vaddr == NULL) { | |
594 | dev_err(&pdev->dev, "Error mapping shared mem\n"); | |
595 | goto mmio_free; | |
596 | } | |
597 | ||
598 | /* map the doorbell aperture */ | |
599 | hw->db_vaddr = pci_iomap(pdev, 3, MAX_CCB * ONE_DB_SIZE); | |
600 | if (hw->db_vaddr == NULL) { | |
601 | dev_err(&pdev->dev, "Error mapping doorbell\n"); | |
602 | goto ram_free; | |
603 | } | |
604 | ||
605 | return 0; | |
606 | ram_free: | |
607 | pci_iounmap(pdev, hw->ram_vaddr); | |
608 | mmio_free: | |
609 | pci_iounmap(pdev, hw->mmio_vaddr); | |
610 | out: | |
611 | return error; | |
612 | } | |
613 | ||
614 | static void ilo_remove(struct pci_dev *pdev) | |
615 | { | |
616 | int i, minor; | |
617 | struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); | |
618 | ||
619 | clear_device(ilo_hw); | |
620 | ||
621 | minor = MINOR(ilo_hw->cdev.dev); | |
622 | for (i = minor; i < minor + MAX_CCB; i++) | |
623 | device_destroy(ilo_class, MKDEV(ilo_major, i)); | |
624 | ||
625 | cdev_del(&ilo_hw->cdev); | |
626 | ilo_unmap_device(pdev, ilo_hw); | |
627 | pci_release_regions(pdev); | |
628 | pci_disable_device(pdev); | |
629 | kfree(ilo_hw); | |
630 | ilo_hwdev[(minor / MAX_CCB)] = 0; | |
631 | } | |
632 | ||
633 | static int __devinit ilo_probe(struct pci_dev *pdev, | |
634 | const struct pci_device_id *ent) | |
635 | { | |
636 | int devnum, minor, start, error; | |
637 | struct ilo_hwinfo *ilo_hw; | |
638 | ||
639 | /* find a free range for device files */ | |
640 | for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { | |
641 | if (ilo_hwdev[devnum] == 0) { | |
642 | ilo_hwdev[devnum] = 1; | |
643 | break; | |
644 | } | |
645 | } | |
646 | ||
647 | if (devnum == MAX_ILO_DEV) { | |
648 | dev_err(&pdev->dev, "Error finding free device\n"); | |
649 | return -ENODEV; | |
650 | } | |
651 | ||
652 | /* track global allocations for this device */ | |
653 | error = -ENOMEM; | |
654 | ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL); | |
655 | if (!ilo_hw) | |
656 | goto out; | |
657 | ||
658 | ilo_hw->ilo_dev = pdev; | |
659 | spin_lock_init(&ilo_hw->alloc_lock); | |
660 | spin_lock_init(&ilo_hw->fifo_lock); | |
661 | ||
662 | error = pci_enable_device(pdev); | |
663 | if (error) | |
664 | goto free; | |
665 | ||
666 | pci_set_master(pdev); | |
667 | ||
668 | error = pci_request_regions(pdev, ILO_NAME); | |
669 | if (error) | |
670 | goto disable; | |
671 | ||
672 | error = ilo_map_device(pdev, ilo_hw); | |
673 | if (error) | |
674 | goto free_regions; | |
675 | ||
676 | pci_set_drvdata(pdev, ilo_hw); | |
677 | clear_device(ilo_hw); | |
678 | ||
679 | cdev_init(&ilo_hw->cdev, &ilo_fops); | |
680 | ilo_hw->cdev.owner = THIS_MODULE; | |
681 | start = devnum * MAX_CCB; | |
682 | error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), MAX_CCB); | |
683 | if (error) { | |
684 | dev_err(&pdev->dev, "Could not add cdev\n"); | |
685 | goto unmap; | |
686 | } | |
687 | ||
688 | for (minor = 0 ; minor < MAX_CCB; minor++) { | |
689 | struct device *dev; | |
690 | dev = device_create(ilo_class, &pdev->dev, | |
691 | MKDEV(ilo_major, minor), NULL, | |
692 | "hpilo!d%dccb%d", devnum, minor); | |
693 | if (IS_ERR(dev)) | |
694 | dev_err(&pdev->dev, "Could not create files\n"); | |
695 | } | |
696 | ||
697 | return 0; | |
698 | unmap: | |
699 | ilo_unmap_device(pdev, ilo_hw); | |
700 | free_regions: | |
701 | pci_release_regions(pdev); | |
702 | disable: | |
703 | pci_disable_device(pdev); | |
704 | free: | |
705 | kfree(ilo_hw); | |
706 | out: | |
707 | ilo_hwdev[devnum] = 0; | |
708 | return error; | |
709 | } | |
710 | ||
711 | static struct pci_device_id ilo_devices[] = { | |
712 | { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, | |
713 | { } | |
714 | }; | |
715 | MODULE_DEVICE_TABLE(pci, ilo_devices); | |
716 | ||
717 | static struct pci_driver ilo_driver = { | |
718 | .name = ILO_NAME, | |
719 | .id_table = ilo_devices, | |
720 | .probe = ilo_probe, | |
721 | .remove = __devexit_p(ilo_remove), | |
722 | }; | |
723 | ||
724 | static int __init ilo_init(void) | |
725 | { | |
726 | int error; | |
727 | dev_t dev; | |
728 | ||
729 | ilo_class = class_create(THIS_MODULE, "iLO"); | |
730 | if (IS_ERR(ilo_class)) { | |
731 | error = PTR_ERR(ilo_class); | |
732 | goto out; | |
733 | } | |
734 | ||
735 | error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); | |
736 | if (error) | |
737 | goto class_destroy; | |
738 | ||
739 | ilo_major = MAJOR(dev); | |
740 | ||
741 | error = pci_register_driver(&ilo_driver); | |
742 | if (error) | |
743 | goto chr_remove; | |
744 | ||
745 | return 0; | |
746 | chr_remove: | |
747 | unregister_chrdev_region(dev, MAX_OPEN); | |
748 | class_destroy: | |
749 | class_destroy(ilo_class); | |
750 | out: | |
751 | return error; | |
752 | } | |
753 | ||
754 | static void __exit ilo_exit(void) | |
755 | { | |
756 | pci_unregister_driver(&ilo_driver); | |
757 | unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); | |
758 | class_destroy(ilo_class); | |
759 | } | |
760 | ||
761 | MODULE_VERSION("0.05"); | |
762 | MODULE_ALIAS(ILO_NAME); | |
763 | MODULE_DESCRIPTION(ILO_NAME); | |
764 | MODULE_AUTHOR("David Altobelli <[email protected]>"); | |
765 | MODULE_LICENSE("GPL v2"); | |
766 | ||
767 | module_init(ilo_init); | |
768 | module_exit(ilo_exit); |