]>
Commit | Line | Data |
---|---|---|
92d5f4ca | 1 | // SPDX-License-Identifier: GPL-2.0 |
89bcb05d | 2 | /* |
1ce873ab | 3 | * Driver for the HP iLO management processor. |
89bcb05d DA |
4 | * |
5 | * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. | |
6b1eb145 | 6 | * David Altobelli <[email protected]> |
89bcb05d DA |
7 | */ |
8 | #include <linux/kernel.h> | |
9 | #include <linux/types.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/pci.h> | |
9f704841 | 13 | #include <linux/interrupt.h> |
89bcb05d DA |
14 | #include <linux/ioport.h> |
15 | #include <linux/device.h> | |
16 | #include <linux/file.h> | |
17 | #include <linux/cdev.h> | |
d43c36dc | 18 | #include <linux/sched.h> |
89bcb05d DA |
19 | #include <linux/spinlock.h> |
20 | #include <linux/delay.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/io.h> | |
9f704841 | 23 | #include <linux/wait.h> |
4a351471 | 24 | #include <linux/poll.h> |
5a0e3ad6 | 25 | #include <linux/slab.h> |
89bcb05d DA |
26 | #include "hpilo.h" |
27 | ||
28 | static struct class *ilo_class; | |
29 | static unsigned int ilo_major; | |
ebf1b764 | 30 | static unsigned int max_ccb = 16; |
89bcb05d DA |
31 | static char ilo_hwdev[MAX_ILO_DEV]; |
32 | ||
33 | static inline int get_entry_id(int entry) | |
34 | { | |
35 | return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR; | |
36 | } | |
37 | ||
38 | static inline int get_entry_len(int entry) | |
39 | { | |
40 | return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3; | |
41 | } | |
42 | ||
43 | static inline int mk_entry(int id, int len) | |
44 | { | |
45 | int qlen = len & 7 ? (len >> 3) + 1 : len >> 3; | |
46 | return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS; | |
47 | } | |
48 | ||
49 | static inline int desc_mem_sz(int nr_entry) | |
50 | { | |
51 | return nr_entry << L2_QENTRY_SZ; | |
52 | } | |
53 | ||
54 | /* | |
55 | * FIFO queues, shared with hardware. | |
56 | * | |
57 | * If a queue has empty slots, an entry is added to the queue tail, | |
58 | * and that entry is marked as occupied. | |
59 | * Entries can be dequeued from the head of the list, when the device | |
60 | * has marked the entry as consumed. | |
61 | * | |
62 | * Returns true on successful queue/dequeue, false on failure. | |
63 | */ | |
64 | static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry) | |
65 | { | |
66 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
9f704841 | 67 | unsigned long flags; |
89bcb05d DA |
68 | int ret = 0; |
69 | ||
9f704841 | 70 | spin_lock_irqsave(&hw->fifo_lock, flags); |
89bcb05d DA |
71 | if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask] |
72 | & ENTRY_MASK_O)) { | |
73 | fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |= | |
74 | (entry & ENTRY_MASK_NOSTATE) | fifo_q->merge; | |
75 | fifo_q->tail += 1; | |
76 | ret = 1; | |
77 | } | |
9f704841 | 78 | spin_unlock_irqrestore(&hw->fifo_lock, flags); |
89bcb05d DA |
79 | |
80 | return ret; | |
81 | } | |
82 | ||
83 | static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry) | |
84 | { | |
85 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
9f704841 | 86 | unsigned long flags; |
89bcb05d DA |
87 | int ret = 0; |
88 | u64 c; | |
89 | ||
9f704841 | 90 | spin_lock_irqsave(&hw->fifo_lock, flags); |
89bcb05d DA |
91 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; |
92 | if (c & ENTRY_MASK_C) { | |
93 | if (entry) | |
94 | *entry = c & ENTRY_MASK_NOSTATE; | |
95 | ||
96 | fifo_q->fifobar[fifo_q->head & fifo_q->imask] = | |
97 | (c | ENTRY_MASK) + 1; | |
98 | fifo_q->head += 1; | |
99 | ret = 1; | |
100 | } | |
9f704841 | 101 | spin_unlock_irqrestore(&hw->fifo_lock, flags); |
89bcb05d DA |
102 | |
103 | return ret; | |
104 | } | |
105 | ||
4a351471 DA |
106 | static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar) |
107 | { | |
108 | struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar); | |
109 | unsigned long flags; | |
110 | int ret = 0; | |
111 | u64 c; | |
112 | ||
113 | spin_lock_irqsave(&hw->fifo_lock, flags); | |
114 | c = fifo_q->fifobar[fifo_q->head & fifo_q->imask]; | |
115 | if (c & ENTRY_MASK_C) | |
116 | ret = 1; | |
117 | spin_unlock_irqrestore(&hw->fifo_lock, flags); | |
118 | ||
119 | return ret; | |
120 | } | |
121 | ||
89bcb05d DA |
122 | static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb, |
123 | int dir, int id, int len) | |
124 | { | |
125 | char *fifobar; | |
126 | int entry; | |
127 | ||
128 | if (dir == SENDQ) | |
129 | fifobar = ccb->ccb_u1.send_fifobar; | |
130 | else | |
131 | fifobar = ccb->ccb_u3.recv_fifobar; | |
132 | ||
133 | entry = mk_entry(id, len); | |
134 | return fifo_enqueue(hw, fifobar, entry); | |
135 | } | |
136 | ||
137 | static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb, | |
138 | int dir, int *id, int *len, void **pkt) | |
139 | { | |
140 | char *fifobar, *desc; | |
141 | int entry = 0, pkt_id = 0; | |
142 | int ret; | |
143 | ||
144 | if (dir == SENDQ) { | |
145 | fifobar = ccb->ccb_u1.send_fifobar; | |
146 | desc = ccb->ccb_u2.send_desc; | |
147 | } else { | |
148 | fifobar = ccb->ccb_u3.recv_fifobar; | |
149 | desc = ccb->ccb_u4.recv_desc; | |
150 | } | |
151 | ||
152 | ret = fifo_dequeue(hw, fifobar, &entry); | |
153 | if (ret) { | |
154 | pkt_id = get_entry_id(entry); | |
155 | if (id) | |
156 | *id = pkt_id; | |
157 | if (len) | |
158 | *len = get_entry_len(entry); | |
159 | if (pkt) | |
160 | *pkt = (void *)(desc + desc_mem_sz(pkt_id)); | |
161 | } | |
162 | ||
163 | return ret; | |
164 | } | |
165 | ||
4a351471 DA |
166 | static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb) |
167 | { | |
168 | char *fifobar = ccb->ccb_u3.recv_fifobar; | |
169 | ||
170 | return fifo_check_recv(hw, fifobar); | |
171 | } | |
172 | ||
89bcb05d DA |
173 | static inline void doorbell_set(struct ccb *ccb) |
174 | { | |
175 | iowrite8(1, ccb->ccb_u5.db_base); | |
176 | } | |
177 | ||
178 | static inline void doorbell_clr(struct ccb *ccb) | |
179 | { | |
180 | iowrite8(2, ccb->ccb_u5.db_base); | |
181 | } | |
66d5e516 | 182 | |
89bcb05d DA |
183 | static inline int ctrl_set(int l2sz, int idxmask, int desclim) |
184 | { | |
185 | int active = 0, go = 1; | |
186 | return l2sz << CTRL_BITPOS_L2SZ | | |
187 | idxmask << CTRL_BITPOS_FIFOINDEXMASK | | |
188 | desclim << CTRL_BITPOS_DESCLIMIT | | |
189 | active << CTRL_BITPOS_A | | |
190 | go << CTRL_BITPOS_G; | |
191 | } | |
66d5e516 | 192 | |
89bcb05d DA |
193 | static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz) |
194 | { | |
195 | /* for simplicity, use the same parameters for send and recv ctrls */ | |
196 | ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | |
197 | ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1); | |
198 | } | |
199 | ||
200 | static inline int fifo_sz(int nr_entry) | |
201 | { | |
202 | /* size of a fifo is determined by the number of entries it contains */ | |
203 | return (nr_entry * sizeof(u64)) + FIFOHANDLESIZE; | |
204 | } | |
205 | ||
206 | static void fifo_setup(void *base_addr, int nr_entry) | |
207 | { | |
208 | struct fifo *fifo_q = base_addr; | |
209 | int i; | |
210 | ||
211 | /* set up an empty fifo */ | |
212 | fifo_q->head = 0; | |
213 | fifo_q->tail = 0; | |
214 | fifo_q->reset = 0; | |
215 | fifo_q->nrents = nr_entry; | |
216 | fifo_q->imask = nr_entry - 1; | |
217 | fifo_q->merge = ENTRY_MASK_O; | |
218 | ||
219 | for (i = 0; i < nr_entry; i++) | |
220 | fifo_q->fifobar[i] = 0; | |
221 | } | |
222 | ||
223 | static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data) | |
224 | { | |
66d5e516 DA |
225 | struct ccb *driver_ccb = &data->driver_ccb; |
226 | struct ccb __iomem *device_ccb = data->mapped_ccb; | |
89bcb05d DA |
227 | int retries; |
228 | ||
89bcb05d DA |
229 | /* complicated dance to tell the hw we are stopping */ |
230 | doorbell_clr(driver_ccb); | |
231 | iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G), | |
232 | &device_ccb->send_ctrl); | |
233 | iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G), | |
234 | &device_ccb->recv_ctrl); | |
235 | ||
236 | /* give iLO some time to process stop request */ | |
c073b2db | 237 | for (retries = MAX_WAIT; retries > 0; retries--) { |
89bcb05d | 238 | doorbell_set(driver_ccb); |
891f7d73 | 239 | udelay(WAIT_TIME); |
89bcb05d DA |
240 | if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A)) |
241 | && | |
242 | !(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A))) | |
243 | break; | |
244 | } | |
245 | if (retries == 0) | |
246 | dev_err(&pdev->dev, "Closing, but controller still active\n"); | |
247 | ||
248 | /* clear the hw ccb */ | |
249 | memset_io(device_ccb, 0, sizeof(struct ccb)); | |
250 | ||
251 | /* free resources used to back send/recv queues */ | |
252 | pci_free_consistent(pdev, data->dma_size, data->dma_va, data->dma_pa); | |
253 | } | |
254 | ||
66d5e516 | 255 | static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) |
89bcb05d | 256 | { |
cdf8afca PB |
257 | char *dma_va; |
258 | dma_addr_t dma_pa; | |
89bcb05d | 259 | struct ccb *driver_ccb, *ilo_ccb; |
89bcb05d DA |
260 | |
261 | driver_ccb = &data->driver_ccb; | |
262 | ilo_ccb = &data->ilo_ccb; | |
89bcb05d DA |
263 | |
264 | data->dma_size = 2 * fifo_sz(NR_QENTRY) + | |
265 | 2 * desc_mem_sz(NR_QENTRY) + | |
266 | ILO_START_ALIGN + ILO_CACHE_SZ; | |
267 | ||
66d5e516 | 268 | data->dma_va = pci_alloc_consistent(hw->ilo_dev, data->dma_size, |
89bcb05d DA |
269 | &data->dma_pa); |
270 | if (!data->dma_va) | |
66d5e516 | 271 | return -ENOMEM; |
89bcb05d DA |
272 | |
273 | dma_va = (char *)data->dma_va; | |
cdf8afca | 274 | dma_pa = data->dma_pa; |
89bcb05d DA |
275 | |
276 | memset(dma_va, 0, data->dma_size); | |
277 | ||
278 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN); | |
cdf8afca | 279 | dma_pa = roundup(dma_pa, ILO_START_ALIGN); |
89bcb05d DA |
280 | |
281 | /* | |
282 | * Create two ccb's, one with virt addrs, one with phys addrs. | |
283 | * Copy the phys addr ccb to device shared mem. | |
284 | */ | |
285 | ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ); | |
286 | ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ); | |
287 | ||
288 | fifo_setup(dma_va, NR_QENTRY); | |
289 | driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE; | |
cdf8afca | 290 | ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE; |
89bcb05d DA |
291 | dma_va += fifo_sz(NR_QENTRY); |
292 | dma_pa += fifo_sz(NR_QENTRY); | |
293 | ||
294 | dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ); | |
cdf8afca | 295 | dma_pa = roundup(dma_pa, ILO_CACHE_SZ); |
89bcb05d DA |
296 | |
297 | fifo_setup(dma_va, NR_QENTRY); | |
298 | driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE; | |
cdf8afca | 299 | ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE; |
89bcb05d DA |
300 | dma_va += fifo_sz(NR_QENTRY); |
301 | dma_pa += fifo_sz(NR_QENTRY); | |
302 | ||
303 | driver_ccb->ccb_u2.send_desc = dma_va; | |
cdf8afca | 304 | ilo_ccb->ccb_u2.send_desc_pa = dma_pa; |
89bcb05d DA |
305 | dma_pa += desc_mem_sz(NR_QENTRY); |
306 | dma_va += desc_mem_sz(NR_QENTRY); | |
307 | ||
308 | driver_ccb->ccb_u4.recv_desc = dma_va; | |
cdf8afca | 309 | ilo_ccb->ccb_u4.recv_desc_pa = dma_pa; |
89bcb05d DA |
310 | |
311 | driver_ccb->channel = slot; | |
312 | ilo_ccb->channel = slot; | |
313 | ||
314 | driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE); | |
315 | ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */ | |
316 | ||
66d5e516 DA |
317 | return 0; |
318 | } | |
319 | ||
320 | static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot) | |
321 | { | |
322 | int pkt_id, pkt_sz; | |
323 | struct ccb *driver_ccb = &data->driver_ccb; | |
324 | ||
89bcb05d DA |
325 | /* copy the ccb with physical addrs to device memory */ |
326 | data->mapped_ccb = (struct ccb __iomem *) | |
327 | (hw->ram_vaddr + (slot * ILOHW_CCB_SZ)); | |
66d5e516 | 328 | memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb)); |
89bcb05d DA |
329 | |
330 | /* put packets on the send and receive queues */ | |
331 | pkt_sz = 0; | |
332 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) { | |
333 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz); | |
334 | doorbell_set(driver_ccb); | |
335 | } | |
336 | ||
337 | pkt_sz = desc_mem_sz(1); | |
338 | for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) | |
339 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz); | |
340 | ||
66d5e516 | 341 | /* the ccb is ready to use */ |
89bcb05d | 342 | doorbell_clr(driver_ccb); |
66d5e516 DA |
343 | } |
344 | ||
345 | static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data) | |
346 | { | |
347 | int pkt_id, i; | |
348 | struct ccb *driver_ccb = &data->driver_ccb; | |
89bcb05d DA |
349 | |
350 | /* make sure iLO is really handling requests */ | |
c073b2db | 351 | for (i = MAX_WAIT; i > 0; i--) { |
89bcb05d DA |
352 | if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL)) |
353 | break; | |
891f7d73 | 354 | udelay(WAIT_TIME); |
89bcb05d DA |
355 | } |
356 | ||
66d5e516 DA |
357 | if (i == 0) { |
358 | dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n"); | |
359 | return -EBUSY; | |
89bcb05d DA |
360 | } |
361 | ||
66d5e516 DA |
362 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0); |
363 | doorbell_set(driver_ccb); | |
89bcb05d | 364 | return 0; |
89bcb05d DA |
365 | } |
366 | ||
367 | static inline int is_channel_reset(struct ccb *ccb) | |
368 | { | |
369 | /* check for this particular channel needing a reset */ | |
370 | return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset; | |
371 | } | |
372 | ||
373 | static inline void set_channel_reset(struct ccb *ccb) | |
374 | { | |
375 | /* set a flag indicating this channel needs a reset */ | |
376 | FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1; | |
377 | } | |
378 | ||
66d5e516 DA |
379 | static inline int get_device_outbound(struct ilo_hwinfo *hw) |
380 | { | |
381 | return ioread32(&hw->mmio_vaddr[DB_OUT]); | |
382 | } | |
383 | ||
384 | static inline int is_db_reset(int db_out) | |
385 | { | |
386 | return db_out & (1 << DB_RESET); | |
387 | } | |
388 | ||
89bcb05d DA |
389 | static inline int is_device_reset(struct ilo_hwinfo *hw) |
390 | { | |
391 | /* check for global reset condition */ | |
66d5e516 DA |
392 | return is_db_reset(get_device_outbound(hw)); |
393 | } | |
394 | ||
395 | static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr) | |
396 | { | |
397 | iowrite32(clr, &hw->mmio_vaddr[DB_OUT]); | |
89bcb05d DA |
398 | } |
399 | ||
400 | static inline void clear_device(struct ilo_hwinfo *hw) | |
401 | { | |
402 | /* clear the device (reset bits, pending channel entries) */ | |
66d5e516 | 403 | clear_pending_db(hw, -1); |
89bcb05d DA |
404 | } |
405 | ||
9f704841 DA |
406 | static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw) |
407 | { | |
408 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]); | |
409 | } | |
410 | ||
411 | static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw) | |
412 | { | |
413 | iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1, | |
414 | &hw->mmio_vaddr[DB_IRQ]); | |
415 | } | |
416 | ||
417 | static void ilo_set_reset(struct ilo_hwinfo *hw) | |
89bcb05d DA |
418 | { |
419 | int slot; | |
420 | ||
421 | /* | |
422 | * Mapped memory is zeroed on ilo reset, so set a per ccb flag | |
423 | * to indicate that this ccb needs to be closed and reopened. | |
424 | */ | |
98dcd59d | 425 | for (slot = 0; slot < max_ccb; slot++) { |
89bcb05d DA |
426 | if (!hw->ccb_alloc[slot]) |
427 | continue; | |
428 | set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb); | |
429 | } | |
89bcb05d DA |
430 | } |
431 | ||
432 | static ssize_t ilo_read(struct file *fp, char __user *buf, | |
433 | size_t len, loff_t *off) | |
434 | { | |
435 | int err, found, cnt, pkt_id, pkt_len; | |
66d5e516 DA |
436 | struct ccb_data *data = fp->private_data; |
437 | struct ccb *driver_ccb = &data->driver_ccb; | |
438 | struct ilo_hwinfo *hw = data->ilo_hw; | |
89bcb05d DA |
439 | void *pkt; |
440 | ||
9f704841 | 441 | if (is_channel_reset(driver_ccb)) { |
89bcb05d DA |
442 | /* |
443 | * If the device has been reset, applications | |
444 | * need to close and reopen all ccbs. | |
445 | */ | |
89bcb05d DA |
446 | return -ENODEV; |
447 | } | |
448 | ||
449 | /* | |
450 | * This function is to be called when data is expected | |
451 | * in the channel, and will return an error if no packet is found | |
452 | * during the loop below. The sleep/retry logic is to allow | |
453 | * applications to call read() immediately post write(), | |
454 | * and give iLO some time to process the sent packet. | |
455 | */ | |
456 | cnt = 20; | |
457 | do { | |
458 | /* look for a received packet */ | |
459 | found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id, | |
460 | &pkt_len, &pkt); | |
461 | if (found) | |
462 | break; | |
463 | cnt--; | |
464 | msleep(100); | |
465 | } while (!found && cnt); | |
466 | ||
467 | if (!found) | |
468 | return -EAGAIN; | |
469 | ||
470 | /* only copy the length of the received packet */ | |
471 | if (pkt_len < len) | |
472 | len = pkt_len; | |
473 | ||
474 | err = copy_to_user(buf, pkt, len); | |
475 | ||
476 | /* return the received packet to the queue */ | |
477 | ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1)); | |
478 | ||
479 | return err ? -EFAULT : len; | |
480 | } | |
481 | ||
482 | static ssize_t ilo_write(struct file *fp, const char __user *buf, | |
483 | size_t len, loff_t *off) | |
484 | { | |
485 | int err, pkt_id, pkt_len; | |
66d5e516 DA |
486 | struct ccb_data *data = fp->private_data; |
487 | struct ccb *driver_ccb = &data->driver_ccb; | |
488 | struct ilo_hwinfo *hw = data->ilo_hw; | |
89bcb05d DA |
489 | void *pkt; |
490 | ||
9f704841 | 491 | if (is_channel_reset(driver_ccb)) |
89bcb05d | 492 | return -ENODEV; |
89bcb05d DA |
493 | |
494 | /* get a packet to send the user command */ | |
495 | if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt)) | |
496 | return -EBUSY; | |
497 | ||
498 | /* limit the length to the length of the packet */ | |
499 | if (pkt_len < len) | |
500 | len = pkt_len; | |
501 | ||
502 | /* on failure, set the len to 0 to return empty packet to the device */ | |
503 | err = copy_from_user(pkt, buf, len); | |
504 | if (err) | |
505 | len = 0; | |
506 | ||
507 | /* send the packet */ | |
508 | ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len); | |
509 | doorbell_set(driver_ccb); | |
510 | ||
511 | return err ? -EFAULT : len; | |
512 | } | |
513 | ||
afc9a42b | 514 | static __poll_t ilo_poll(struct file *fp, poll_table *wait) |
4a351471 DA |
515 | { |
516 | struct ccb_data *data = fp->private_data; | |
517 | struct ccb *driver_ccb = &data->driver_ccb; | |
518 | ||
519 | poll_wait(fp, &data->ccb_waitq, wait); | |
520 | ||
521 | if (is_channel_reset(driver_ccb)) | |
a9a08845 | 522 | return EPOLLERR; |
4a351471 | 523 | else if (ilo_pkt_recv(data->ilo_hw, driver_ccb)) |
a9a08845 | 524 | return EPOLLIN | EPOLLRDNORM; |
4a351471 DA |
525 | |
526 | return 0; | |
527 | } | |
528 | ||
89bcb05d DA |
529 | static int ilo_close(struct inode *ip, struct file *fp) |
530 | { | |
531 | int slot; | |
532 | struct ccb_data *data; | |
533 | struct ilo_hwinfo *hw; | |
9f704841 | 534 | unsigned long flags; |
89bcb05d | 535 | |
98dcd59d | 536 | slot = iminor(ip) % max_ccb; |
89bcb05d DA |
537 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); |
538 | ||
9f704841 | 539 | spin_lock(&hw->open_lock); |
89bcb05d DA |
540 | |
541 | if (hw->ccb_alloc[slot]->ccb_cnt == 1) { | |
542 | ||
543 | data = fp->private_data; | |
544 | ||
9f704841 DA |
545 | spin_lock_irqsave(&hw->alloc_lock, flags); |
546 | hw->ccb_alloc[slot] = NULL; | |
547 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | |
548 | ||
89bcb05d DA |
549 | ilo_ccb_close(hw->ilo_dev, data); |
550 | ||
551 | kfree(data); | |
89bcb05d DA |
552 | } else |
553 | hw->ccb_alloc[slot]->ccb_cnt--; | |
554 | ||
9f704841 | 555 | spin_unlock(&hw->open_lock); |
89bcb05d DA |
556 | |
557 | return 0; | |
558 | } | |
559 | ||
560 | static int ilo_open(struct inode *ip, struct file *fp) | |
561 | { | |
562 | int slot, error; | |
563 | struct ccb_data *data; | |
564 | struct ilo_hwinfo *hw; | |
9f704841 | 565 | unsigned long flags; |
89bcb05d | 566 | |
98dcd59d | 567 | slot = iminor(ip) % max_ccb; |
89bcb05d DA |
568 | hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev); |
569 | ||
570 | /* new ccb allocation */ | |
571 | data = kzalloc(sizeof(*data), GFP_KERNEL); | |
572 | if (!data) | |
573 | return -ENOMEM; | |
574 | ||
9f704841 | 575 | spin_lock(&hw->open_lock); |
89bcb05d DA |
576 | |
577 | /* each fd private_data holds sw/hw view of ccb */ | |
578 | if (hw->ccb_alloc[slot] == NULL) { | |
579 | /* create a channel control block for this minor */ | |
66d5e516 DA |
580 | error = ilo_ccb_setup(hw, data, slot); |
581 | if (error) { | |
89bcb05d | 582 | kfree(data); |
66d5e516 DA |
583 | goto out; |
584 | } | |
585 | ||
9f704841 DA |
586 | data->ccb_cnt = 1; |
587 | data->ccb_excl = fp->f_flags & O_EXCL; | |
588 | data->ilo_hw = hw; | |
589 | init_waitqueue_head(&data->ccb_waitq); | |
590 | ||
66d5e516 | 591 | /* write the ccb to hw */ |
9f704841 | 592 | spin_lock_irqsave(&hw->alloc_lock, flags); |
66d5e516 | 593 | ilo_ccb_open(hw, data, slot); |
9f704841 DA |
594 | hw->ccb_alloc[slot] = data; |
595 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | |
66d5e516 DA |
596 | |
597 | /* make sure the channel is functional */ | |
598 | error = ilo_ccb_verify(hw, data); | |
599 | if (error) { | |
9f704841 DA |
600 | |
601 | spin_lock_irqsave(&hw->alloc_lock, flags); | |
602 | hw->ccb_alloc[slot] = NULL; | |
603 | spin_unlock_irqrestore(&hw->alloc_lock, flags); | |
604 | ||
66d5e516 | 605 | ilo_ccb_close(hw->ilo_dev, data); |
9f704841 | 606 | |
66d5e516 DA |
607 | kfree(data); |
608 | goto out; | |
609 | } | |
610 | ||
89bcb05d DA |
611 | } else { |
612 | kfree(data); | |
613 | if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) { | |
614 | /* | |
615 | * The channel exists, and either this open | |
616 | * or a previous open of this channel wants | |
617 | * exclusive access. | |
618 | */ | |
619 | error = -EBUSY; | |
620 | } else { | |
621 | hw->ccb_alloc[slot]->ccb_cnt++; | |
622 | error = 0; | |
623 | } | |
624 | } | |
66d5e516 | 625 | out: |
9f704841 | 626 | spin_unlock(&hw->open_lock); |
89bcb05d DA |
627 | |
628 | if (!error) | |
629 | fp->private_data = hw->ccb_alloc[slot]; | |
630 | ||
631 | return error; | |
632 | } | |
633 | ||
634 | static const struct file_operations ilo_fops = { | |
635 | .owner = THIS_MODULE, | |
636 | .read = ilo_read, | |
637 | .write = ilo_write, | |
4a351471 | 638 | .poll = ilo_poll, |
89bcb05d DA |
639 | .open = ilo_open, |
640 | .release = ilo_close, | |
6038f373 | 641 | .llseek = noop_llseek, |
89bcb05d DA |
642 | }; |
643 | ||
9f704841 DA |
644 | static irqreturn_t ilo_isr(int irq, void *data) |
645 | { | |
646 | struct ilo_hwinfo *hw = data; | |
647 | int pending, i; | |
648 | ||
649 | spin_lock(&hw->alloc_lock); | |
650 | ||
651 | /* check for ccbs which have data */ | |
652 | pending = get_device_outbound(hw); | |
653 | if (!pending) { | |
654 | spin_unlock(&hw->alloc_lock); | |
655 | return IRQ_NONE; | |
656 | } | |
657 | ||
658 | if (is_db_reset(pending)) { | |
659 | /* wake up all ccbs if the device was reset */ | |
660 | pending = -1; | |
661 | ilo_set_reset(hw); | |
662 | } | |
663 | ||
98dcd59d | 664 | for (i = 0; i < max_ccb; i++) { |
9f704841 DA |
665 | if (!hw->ccb_alloc[i]) |
666 | continue; | |
667 | if (pending & (1 << i)) | |
668 | wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq); | |
669 | } | |
670 | ||
671 | /* clear the device of the channels that have been handled */ | |
672 | clear_pending_db(hw, pending); | |
673 | ||
674 | spin_unlock(&hw->alloc_lock); | |
675 | ||
676 | return IRQ_HANDLED; | |
677 | } | |
678 | ||
89bcb05d DA |
679 | static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) |
680 | { | |
681 | pci_iounmap(pdev, hw->db_vaddr); | |
682 | pci_iounmap(pdev, hw->ram_vaddr); | |
683 | pci_iounmap(pdev, hw->mmio_vaddr); | |
684 | } | |
685 | ||
80c8ae28 | 686 | static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw) |
89bcb05d | 687 | { |
c9fef1cc RM |
688 | int bar; |
689 | unsigned long off; | |
89bcb05d DA |
690 | |
691 | /* map the memory mapped i/o registers */ | |
692 | hw->mmio_vaddr = pci_iomap(pdev, 1, 0); | |
693 | if (hw->mmio_vaddr == NULL) { | |
694 | dev_err(&pdev->dev, "Error mapping mmio\n"); | |
695 | goto out; | |
696 | } | |
697 | ||
698 | /* map the adapter shared memory region */ | |
c9fef1cc RM |
699 | if (pdev->subsystem_device == 0x00E4) { |
700 | bar = 5; | |
701 | /* Last 8k is reserved for CCBs */ | |
702 | off = pci_resource_len(pdev, bar) - 0x2000; | |
703 | } else { | |
704 | bar = 2; | |
705 | off = 0; | |
706 | } | |
707 | hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ); | |
89bcb05d DA |
708 | if (hw->ram_vaddr == NULL) { |
709 | dev_err(&pdev->dev, "Error mapping shared mem\n"); | |
710 | goto mmio_free; | |
711 | } | |
712 | ||
713 | /* map the doorbell aperture */ | |
98dcd59d | 714 | hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE); |
89bcb05d DA |
715 | if (hw->db_vaddr == NULL) { |
716 | dev_err(&pdev->dev, "Error mapping doorbell\n"); | |
717 | goto ram_free; | |
718 | } | |
719 | ||
720 | return 0; | |
721 | ram_free: | |
722 | pci_iounmap(pdev, hw->ram_vaddr); | |
723 | mmio_free: | |
724 | pci_iounmap(pdev, hw->mmio_vaddr); | |
725 | out: | |
c9fef1cc | 726 | return -ENOMEM; |
89bcb05d DA |
727 | } |
728 | ||
729 | static void ilo_remove(struct pci_dev *pdev) | |
730 | { | |
731 | int i, minor; | |
732 | struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev); | |
733 | ||
ebf1b764 MR |
734 | if (!ilo_hw) |
735 | return; | |
736 | ||
89bcb05d DA |
737 | clear_device(ilo_hw); |
738 | ||
739 | minor = MINOR(ilo_hw->cdev.dev); | |
98dcd59d | 740 | for (i = minor; i < minor + max_ccb; i++) |
89bcb05d DA |
741 | device_destroy(ilo_class, MKDEV(ilo_major, i)); |
742 | ||
743 | cdev_del(&ilo_hw->cdev); | |
9f704841 DA |
744 | ilo_disable_interrupts(ilo_hw); |
745 | free_irq(pdev->irq, ilo_hw); | |
89bcb05d DA |
746 | ilo_unmap_device(pdev, ilo_hw); |
747 | pci_release_regions(pdev); | |
bcdee04e JS |
748 | /* |
749 | * pci_disable_device(pdev) used to be here. But this PCI device has | |
750 | * two functions with interrupt lines connected to a single pin. The | |
751 | * other one is a USB host controller. So when we disable the PIN here | |
752 | * e.g. by rmmod hpilo, the controller stops working. It is because | |
753 | * the interrupt link is disabled in ACPI since it is not refcounted | |
754 | * yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable. | |
755 | */ | |
89bcb05d | 756 | kfree(ilo_hw); |
98dcd59d | 757 | ilo_hwdev[(minor / max_ccb)] = 0; |
89bcb05d DA |
758 | } |
759 | ||
80c8ae28 | 760 | static int ilo_probe(struct pci_dev *pdev, |
89bcb05d DA |
761 | const struct pci_device_id *ent) |
762 | { | |
ebf1b764 | 763 | int devnum, minor, start, error = 0; |
89bcb05d DA |
764 | struct ilo_hwinfo *ilo_hw; |
765 | ||
ebf1b764 MR |
766 | /* Ignore subsystem_device = 0x1979 (set by BIOS) */ |
767 | if (pdev->subsystem_device == 0x1979) | |
eefbc594 | 768 | return 0; |
ebf1b764 | 769 | |
98dcd59d CT |
770 | if (max_ccb > MAX_CCB) |
771 | max_ccb = MAX_CCB; | |
772 | else if (max_ccb < MIN_CCB) | |
773 | max_ccb = MIN_CCB; | |
774 | ||
89bcb05d DA |
775 | /* find a free range for device files */ |
776 | for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) { | |
777 | if (ilo_hwdev[devnum] == 0) { | |
778 | ilo_hwdev[devnum] = 1; | |
779 | break; | |
780 | } | |
781 | } | |
782 | ||
783 | if (devnum == MAX_ILO_DEV) { | |
784 | dev_err(&pdev->dev, "Error finding free device\n"); | |
785 | return -ENODEV; | |
786 | } | |
787 | ||
788 | /* track global allocations for this device */ | |
789 | error = -ENOMEM; | |
790 | ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL); | |
791 | if (!ilo_hw) | |
792 | goto out; | |
793 | ||
794 | ilo_hw->ilo_dev = pdev; | |
795 | spin_lock_init(&ilo_hw->alloc_lock); | |
796 | spin_lock_init(&ilo_hw->fifo_lock); | |
9f704841 | 797 | spin_lock_init(&ilo_hw->open_lock); |
89bcb05d DA |
798 | |
799 | error = pci_enable_device(pdev); | |
800 | if (error) | |
801 | goto free; | |
802 | ||
803 | pci_set_master(pdev); | |
804 | ||
805 | error = pci_request_regions(pdev, ILO_NAME); | |
806 | if (error) | |
807 | goto disable; | |
808 | ||
809 | error = ilo_map_device(pdev, ilo_hw); | |
810 | if (error) | |
811 | goto free_regions; | |
812 | ||
813 | pci_set_drvdata(pdev, ilo_hw); | |
814 | clear_device(ilo_hw); | |
815 | ||
9f704841 DA |
816 | error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw); |
817 | if (error) | |
818 | goto unmap; | |
819 | ||
820 | ilo_enable_interrupts(ilo_hw); | |
821 | ||
89bcb05d DA |
822 | cdev_init(&ilo_hw->cdev, &ilo_fops); |
823 | ilo_hw->cdev.owner = THIS_MODULE; | |
98dcd59d CT |
824 | start = devnum * max_ccb; |
825 | error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb); | |
89bcb05d DA |
826 | if (error) { |
827 | dev_err(&pdev->dev, "Could not add cdev\n"); | |
9f704841 | 828 | goto remove_isr; |
89bcb05d DA |
829 | } |
830 | ||
98dcd59d | 831 | for (minor = 0 ; minor < max_ccb; minor++) { |
89bcb05d DA |
832 | struct device *dev; |
833 | dev = device_create(ilo_class, &pdev->dev, | |
834 | MKDEV(ilo_major, minor), NULL, | |
835 | "hpilo!d%dccb%d", devnum, minor); | |
836 | if (IS_ERR(dev)) | |
837 | dev_err(&pdev->dev, "Could not create files\n"); | |
838 | } | |
839 | ||
840 | return 0; | |
9f704841 DA |
841 | remove_isr: |
842 | ilo_disable_interrupts(ilo_hw); | |
843 | free_irq(pdev->irq, ilo_hw); | |
89bcb05d DA |
844 | unmap: |
845 | ilo_unmap_device(pdev, ilo_hw); | |
846 | free_regions: | |
847 | pci_release_regions(pdev); | |
848 | disable: | |
bcdee04e | 849 | /* pci_disable_device(pdev); see comment in ilo_remove */ |
89bcb05d DA |
850 | free: |
851 | kfree(ilo_hw); | |
852 | out: | |
853 | ilo_hwdev[devnum] = 0; | |
854 | return error; | |
855 | } | |
856 | ||
8efa6fb5 | 857 | static const struct pci_device_id ilo_devices[] = { |
89bcb05d | 858 | { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) }, |
31d8b563 | 859 | { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) }, |
89bcb05d DA |
860 | { } |
861 | }; | |
862 | MODULE_DEVICE_TABLE(pci, ilo_devices); | |
863 | ||
864 | static struct pci_driver ilo_driver = { | |
865 | .name = ILO_NAME, | |
866 | .id_table = ilo_devices, | |
867 | .probe = ilo_probe, | |
2d6bed9c | 868 | .remove = ilo_remove, |
89bcb05d DA |
869 | }; |
870 | ||
871 | static int __init ilo_init(void) | |
872 | { | |
873 | int error; | |
874 | dev_t dev; | |
875 | ||
876 | ilo_class = class_create(THIS_MODULE, "iLO"); | |
877 | if (IS_ERR(ilo_class)) { | |
878 | error = PTR_ERR(ilo_class); | |
879 | goto out; | |
880 | } | |
881 | ||
882 | error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME); | |
883 | if (error) | |
884 | goto class_destroy; | |
885 | ||
886 | ilo_major = MAJOR(dev); | |
887 | ||
888 | error = pci_register_driver(&ilo_driver); | |
889 | if (error) | |
890 | goto chr_remove; | |
891 | ||
892 | return 0; | |
893 | chr_remove: | |
894 | unregister_chrdev_region(dev, MAX_OPEN); | |
895 | class_destroy: | |
896 | class_destroy(ilo_class); | |
897 | out: | |
898 | return error; | |
899 | } | |
900 | ||
901 | static void __exit ilo_exit(void) | |
902 | { | |
903 | pci_unregister_driver(&ilo_driver); | |
904 | unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN); | |
905 | class_destroy(ilo_class); | |
906 | } | |
907 | ||
c9fef1cc | 908 | MODULE_VERSION("1.5.0"); |
89bcb05d DA |
909 | MODULE_ALIAS(ILO_NAME); |
910 | MODULE_DESCRIPTION(ILO_NAME); | |
6b1eb145 | 911 | MODULE_AUTHOR("David Altobelli <[email protected]>"); |
89bcb05d DA |
912 | MODULE_LICENSE("GPL v2"); |
913 | ||
98dcd59d | 914 | module_param(max_ccb, uint, 0444); |
7a56f329 | 915 | MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)"); |
98dcd59d | 916 | |
89bcb05d DA |
917 | module_init(ilo_init); |
918 | module_exit(ilo_exit); |