]>
Commit | Line | Data |
---|---|---|
667ef3c3 DM |
1 | /* sunvdc.c: Sun LDOM Virtual Disk Client. |
2 | * | |
3d452e55 | 3 | * Copyright (C) 2007, 2008 David S. Miller <[email protected]> |
667ef3c3 DM |
4 | */ |
5 | ||
6 | #include <linux/module.h> | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/types.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/hdreg.h> | |
11 | #include <linux/genhd.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/completion.h> | |
15 | #include <linux/delay.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/list.h> | |
d91c5e88 | 18 | #include <linux/scatterlist.h> |
667ef3c3 DM |
19 | |
20 | #include <asm/vio.h> | |
21 | #include <asm/ldc.h> | |
22 | ||
23 | #define DRV_MODULE_NAME "sunvdc" | |
24 | #define PFX DRV_MODULE_NAME ": " | |
25 | #define DRV_MODULE_VERSION "1.0" | |
26 | #define DRV_MODULE_RELDATE "June 25, 2007" | |
27 | ||
28 | static char version[] __devinitdata = | |
29 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | |
30 | MODULE_AUTHOR("David S. Miller ([email protected])"); | |
31 | MODULE_DESCRIPTION("Sun LDOM virtual disk client driver"); | |
32 | MODULE_LICENSE("GPL"); | |
33 | MODULE_VERSION(DRV_MODULE_VERSION); | |
34 | ||
35 | #define VDC_TX_RING_SIZE 256 | |
36 | ||
37 | #define WAITING_FOR_LINK_UP 0x01 | |
38 | #define WAITING_FOR_TX_SPACE 0x02 | |
39 | #define WAITING_FOR_GEN_CMD 0x04 | |
40 | #define WAITING_FOR_ANY -1 | |
41 | ||
42 | struct vdc_req_entry { | |
43 | struct request *req; | |
44 | }; | |
45 | ||
46 | struct vdc_port { | |
47 | struct vio_driver_state vio; | |
48 | ||
667ef3c3 DM |
49 | struct gendisk *disk; |
50 | ||
51 | struct vdc_completion *cmp; | |
52 | ||
53 | u64 req_id; | |
54 | u64 seq; | |
55 | struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE]; | |
56 | ||
57 | unsigned long ring_cookies; | |
58 | ||
59 | u64 max_xfer_size; | |
60 | u32 vdisk_block_size; | |
61 | ||
62 | /* The server fills these in for us in the disk attribute | |
63 | * ACK packet. | |
64 | */ | |
65 | u64 operations; | |
66 | u32 vdisk_size; | |
67 | u8 vdisk_type; | |
667ef3c3 DM |
68 | |
69 | char disk_name[32]; | |
70 | ||
71 | struct vio_disk_geom geom; | |
72 | struct vio_disk_vtoc label; | |
667ef3c3 DM |
73 | }; |
74 | ||
75 | static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) | |
76 | { | |
77 | return container_of(vio, struct vdc_port, vio); | |
78 | } | |
79 | ||
667ef3c3 DM |
80 | /* Ordered from largest major to lowest */ |
81 | static struct vio_version vdc_versions[] = { | |
82 | { .major = 1, .minor = 0 }, | |
83 | }; | |
84 | ||
85 | #define VDCBLK_NAME "vdisk" | |
86 | static int vdc_major; | |
87 | #define PARTITION_SHIFT 3 | |
88 | ||
89 | static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr) | |
90 | { | |
91 | return vio_dring_avail(dr, VDC_TX_RING_SIZE); | |
92 | } | |
93 | ||
94 | static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
95 | { | |
96 | struct gendisk *disk = bdev->bd_disk; | |
97 | struct vdc_port *port = disk->private_data; | |
98 | ||
99 | geo->heads = (u8) port->geom.num_hd; | |
100 | geo->sectors = (u8) port->geom.num_sec; | |
101 | geo->cylinders = port->geom.num_cyl; | |
102 | ||
103 | return 0; | |
104 | } | |
105 | ||
83d5cde4 | 106 | static const struct block_device_operations vdc_fops = { |
667ef3c3 DM |
107 | .owner = THIS_MODULE, |
108 | .getgeo = vdc_getgeo, | |
109 | }; | |
110 | ||
111 | static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) | |
112 | { | |
113 | if (vio->cmp && | |
114 | (waiting_for == -1 || | |
115 | vio->cmp->waiting_for == waiting_for)) { | |
116 | vio->cmp->err = err; | |
117 | complete(&vio->cmp->com); | |
118 | vio->cmp = NULL; | |
119 | } | |
120 | } | |
121 | ||
122 | static void vdc_handshake_complete(struct vio_driver_state *vio) | |
123 | { | |
124 | vdc_finish(vio, 0, WAITING_FOR_LINK_UP); | |
125 | } | |
126 | ||
127 | static int vdc_handle_unknown(struct vdc_port *port, void *arg) | |
128 | { | |
129 | struct vio_msg_tag *pkt = arg; | |
130 | ||
131 | printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n", | |
132 | pkt->type, pkt->stype, pkt->stype_env, pkt->sid); | |
133 | printk(KERN_ERR PFX "Resetting connection.\n"); | |
134 | ||
135 | ldc_disconnect(port->vio.lp); | |
136 | ||
137 | return -ECONNRESET; | |
138 | } | |
139 | ||
140 | static int vdc_send_attr(struct vio_driver_state *vio) | |
141 | { | |
142 | struct vdc_port *port = to_vdc_port(vio); | |
143 | struct vio_disk_attr_info pkt; | |
144 | ||
145 | memset(&pkt, 0, sizeof(pkt)); | |
146 | ||
147 | pkt.tag.type = VIO_TYPE_CTRL; | |
148 | pkt.tag.stype = VIO_SUBTYPE_INFO; | |
149 | pkt.tag.stype_env = VIO_ATTR_INFO; | |
150 | pkt.tag.sid = vio_send_sid(vio); | |
151 | ||
152 | pkt.xfer_mode = VIO_DRING_MODE; | |
153 | pkt.vdisk_block_size = port->vdisk_block_size; | |
154 | pkt.max_xfer_size = port->max_xfer_size; | |
155 | ||
3f4528d6 | 156 | viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", |
667ef3c3 DM |
157 | pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size); |
158 | ||
159 | return vio_ldc_send(&port->vio, &pkt, sizeof(pkt)); | |
160 | } | |
161 | ||
162 | static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) | |
163 | { | |
164 | struct vdc_port *port = to_vdc_port(vio); | |
165 | struct vio_disk_attr_info *pkt = arg; | |
166 | ||
3f4528d6 SR |
167 | viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] " |
168 | "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", | |
667ef3c3 DM |
169 | pkt->tag.stype, pkt->operations, |
170 | pkt->vdisk_size, pkt->vdisk_type, | |
171 | pkt->xfer_mode, pkt->vdisk_block_size, | |
172 | pkt->max_xfer_size); | |
173 | ||
174 | if (pkt->tag.stype == VIO_SUBTYPE_ACK) { | |
175 | switch (pkt->vdisk_type) { | |
176 | case VD_DISK_TYPE_DISK: | |
177 | case VD_DISK_TYPE_SLICE: | |
178 | break; | |
179 | ||
180 | default: | |
181 | printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n", | |
182 | vio->name, pkt->vdisk_type); | |
183 | return -ECONNRESET; | |
184 | } | |
185 | ||
186 | if (pkt->vdisk_block_size > port->vdisk_block_size) { | |
187 | printk(KERN_ERR PFX "%s: BLOCK size increased " | |
188 | "%u --> %u\n", | |
189 | vio->name, | |
190 | port->vdisk_block_size, pkt->vdisk_block_size); | |
191 | return -ECONNRESET; | |
192 | } | |
193 | ||
194 | port->operations = pkt->operations; | |
195 | port->vdisk_size = pkt->vdisk_size; | |
196 | port->vdisk_type = pkt->vdisk_type; | |
197 | if (pkt->max_xfer_size < port->max_xfer_size) | |
198 | port->max_xfer_size = pkt->max_xfer_size; | |
199 | port->vdisk_block_size = pkt->vdisk_block_size; | |
200 | return 0; | |
201 | } else { | |
202 | printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name); | |
203 | ||
204 | return -ECONNRESET; | |
205 | } | |
206 | } | |
207 | ||
208 | static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc) | |
209 | { | |
210 | int err = desc->status; | |
211 | ||
212 | vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); | |
213 | } | |
214 | ||
667ef3c3 DM |
215 | static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, |
216 | unsigned int index) | |
217 | { | |
218 | struct vio_disk_desc *desc = vio_dring_entry(dr, index); | |
219 | struct vdc_req_entry *rqe = &port->rq_arr[index]; | |
220 | struct request *req; | |
221 | ||
222 | if (unlikely(desc->hdr.state != VIO_DESC_DONE)) | |
223 | return; | |
224 | ||
225 | ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies); | |
226 | desc->hdr.state = VIO_DESC_FREE; | |
227 | dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1); | |
228 | ||
229 | req = rqe->req; | |
230 | if (req == NULL) { | |
231 | vdc_end_special(port, desc); | |
232 | return; | |
233 | } | |
234 | ||
235 | rqe->req = NULL; | |
236 | ||
04420850 | 237 | __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); |
667ef3c3 DM |
238 | |
239 | if (blk_queue_stopped(port->disk->queue)) | |
240 | blk_start_queue(port->disk->queue); | |
241 | } | |
242 | ||
243 | static int vdc_ack(struct vdc_port *port, void *msgbuf) | |
244 | { | |
245 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
246 | struct vio_dring_data *pkt = msgbuf; | |
247 | ||
248 | if (unlikely(pkt->dring_ident != dr->ident || | |
249 | pkt->start_idx != pkt->end_idx || | |
250 | pkt->start_idx >= VDC_TX_RING_SIZE)) | |
251 | return 0; | |
252 | ||
253 | vdc_end_one(port, dr, pkt->start_idx); | |
254 | ||
255 | return 0; | |
256 | } | |
257 | ||
258 | static int vdc_nack(struct vdc_port *port, void *msgbuf) | |
259 | { | |
260 | /* XXX Implement me XXX */ | |
261 | return 0; | |
262 | } | |
263 | ||
264 | static void vdc_event(void *arg, int event) | |
265 | { | |
266 | struct vdc_port *port = arg; | |
267 | struct vio_driver_state *vio = &port->vio; | |
268 | unsigned long flags; | |
269 | int err; | |
270 | ||
271 | spin_lock_irqsave(&vio->lock, flags); | |
272 | ||
273 | if (unlikely(event == LDC_EVENT_RESET || | |
274 | event == LDC_EVENT_UP)) { | |
275 | vio_link_state_change(vio, event); | |
276 | spin_unlock_irqrestore(&vio->lock, flags); | |
277 | return; | |
278 | } | |
279 | ||
280 | if (unlikely(event != LDC_EVENT_DATA_READY)) { | |
281 | printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); | |
282 | spin_unlock_irqrestore(&vio->lock, flags); | |
283 | return; | |
284 | } | |
285 | ||
286 | err = 0; | |
287 | while (1) { | |
288 | union { | |
289 | struct vio_msg_tag tag; | |
290 | u64 raw[8]; | |
291 | } msgbuf; | |
292 | ||
293 | err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); | |
294 | if (unlikely(err < 0)) { | |
295 | if (err == -ECONNRESET) | |
296 | vio_conn_reset(vio); | |
297 | break; | |
298 | } | |
299 | if (err == 0) | |
300 | break; | |
301 | viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", | |
302 | msgbuf.tag.type, | |
303 | msgbuf.tag.stype, | |
304 | msgbuf.tag.stype_env, | |
305 | msgbuf.tag.sid); | |
306 | err = vio_validate_sid(vio, &msgbuf.tag); | |
307 | if (err < 0) | |
308 | break; | |
309 | ||
310 | if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { | |
311 | if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) | |
312 | err = vdc_ack(port, &msgbuf); | |
313 | else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) | |
314 | err = vdc_nack(port, &msgbuf); | |
315 | else | |
316 | err = vdc_handle_unknown(port, &msgbuf); | |
317 | } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { | |
318 | err = vio_control_pkt_engine(vio, &msgbuf); | |
319 | } else { | |
320 | err = vdc_handle_unknown(port, &msgbuf); | |
321 | } | |
322 | if (err < 0) | |
323 | break; | |
324 | } | |
325 | if (err < 0) | |
326 | vdc_finish(&port->vio, err, WAITING_FOR_ANY); | |
327 | spin_unlock_irqrestore(&vio->lock, flags); | |
328 | } | |
329 | ||
330 | static int __vdc_tx_trigger(struct vdc_port *port) | |
331 | { | |
332 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
333 | struct vio_dring_data hdr = { | |
334 | .tag = { | |
335 | .type = VIO_TYPE_DATA, | |
336 | .stype = VIO_SUBTYPE_INFO, | |
337 | .stype_env = VIO_DRING_DATA, | |
338 | .sid = vio_send_sid(&port->vio), | |
339 | }, | |
340 | .dring_ident = dr->ident, | |
341 | .start_idx = dr->prod, | |
342 | .end_idx = dr->prod, | |
343 | }; | |
344 | int err, delay; | |
345 | ||
346 | hdr.seq = dr->snd_nxt; | |
347 | delay = 1; | |
348 | do { | |
349 | err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); | |
350 | if (err > 0) { | |
351 | dr->snd_nxt++; | |
352 | break; | |
353 | } | |
354 | udelay(delay); | |
355 | if ((delay <<= 1) > 128) | |
356 | delay = 128; | |
357 | } while (err == -EAGAIN); | |
358 | ||
359 | return err; | |
360 | } | |
361 | ||
362 | static int __send_request(struct request *req) | |
363 | { | |
364 | struct vdc_port *port = req->rq_disk->private_data; | |
365 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
366 | struct scatterlist sg[port->ring_cookies]; | |
367 | struct vdc_req_entry *rqe; | |
368 | struct vio_disk_desc *desc; | |
369 | unsigned int map_perm; | |
370 | int nsg, err, i; | |
371 | u64 len; | |
372 | u8 op; | |
373 | ||
374 | map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; | |
375 | ||
376 | if (rq_data_dir(req) == READ) { | |
377 | map_perm |= LDC_MAP_W; | |
378 | op = VD_OP_BREAD; | |
379 | } else { | |
380 | map_perm |= LDC_MAP_R; | |
381 | op = VD_OP_BWRITE; | |
382 | } | |
383 | ||
45711f1a | 384 | sg_init_table(sg, port->ring_cookies); |
667ef3c3 DM |
385 | nsg = blk_rq_map_sg(req->q, req, sg); |
386 | ||
387 | len = 0; | |
388 | for (i = 0; i < nsg; i++) | |
389 | len += sg[i].length; | |
390 | ||
391 | if (unlikely(vdc_tx_dring_avail(dr) < 1)) { | |
392 | blk_stop_queue(port->disk->queue); | |
393 | err = -ENOMEM; | |
394 | goto out; | |
395 | } | |
396 | ||
397 | desc = vio_dring_cur(dr); | |
398 | ||
399 | err = ldc_map_sg(port->vio.lp, sg, nsg, | |
400 | desc->cookies, port->ring_cookies, | |
401 | map_perm); | |
402 | if (err < 0) { | |
403 | printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err); | |
404 | return err; | |
405 | } | |
406 | ||
407 | rqe = &port->rq_arr[dr->prod]; | |
408 | rqe->req = req; | |
409 | ||
410 | desc->hdr.ack = VIO_ACK_ENABLE; | |
411 | desc->req_id = port->req_id; | |
412 | desc->operation = op; | |
413 | if (port->vdisk_type == VD_DISK_TYPE_DISK) { | |
1bd4b280 | 414 | desc->slice = 0xff; |
667ef3c3 DM |
415 | } else { |
416 | desc->slice = 0; | |
417 | } | |
418 | desc->status = ~0; | |
83096ebf | 419 | desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size; |
667ef3c3 DM |
420 | desc->size = len; |
421 | desc->ncookies = err; | |
422 | ||
423 | /* This has to be a non-SMP write barrier because we are writing | |
424 | * to memory which is shared with the peer LDOM. | |
425 | */ | |
426 | wmb(); | |
427 | desc->hdr.state = VIO_DESC_READY; | |
428 | ||
429 | err = __vdc_tx_trigger(port); | |
430 | if (err < 0) { | |
431 | printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err); | |
432 | } else { | |
433 | port->req_id++; | |
434 | dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); | |
435 | } | |
436 | out: | |
437 | ||
438 | return err; | |
439 | } | |
440 | ||
165125e1 | 441 | static void do_vdc_request(struct request_queue *q) |
667ef3c3 DM |
442 | { |
443 | while (1) { | |
9934c8c0 | 444 | struct request *req = blk_fetch_request(q); |
667ef3c3 DM |
445 | |
446 | if (!req) | |
447 | break; | |
448 | ||
667ef3c3 | 449 | if (__send_request(req) < 0) |
04420850 | 450 | __blk_end_request_all(req, -EIO); |
667ef3c3 DM |
451 | } |
452 | } | |
453 | ||
454 | static int generic_request(struct vdc_port *port, u8 op, void *buf, int len) | |
455 | { | |
456 | struct vio_dring_state *dr; | |
457 | struct vio_completion comp; | |
458 | struct vio_disk_desc *desc; | |
459 | unsigned int map_perm; | |
460 | unsigned long flags; | |
461 | int op_len, err; | |
462 | void *req_buf; | |
463 | ||
464 | if (!(((u64)1 << ((u64)op - 1)) & port->operations)) | |
465 | return -EOPNOTSUPP; | |
466 | ||
467 | switch (op) { | |
468 | case VD_OP_BREAD: | |
469 | case VD_OP_BWRITE: | |
470 | default: | |
471 | return -EINVAL; | |
472 | ||
473 | case VD_OP_FLUSH: | |
474 | op_len = 0; | |
475 | map_perm = 0; | |
476 | break; | |
477 | ||
478 | case VD_OP_GET_WCE: | |
479 | op_len = sizeof(u32); | |
480 | map_perm = LDC_MAP_W; | |
481 | break; | |
482 | ||
483 | case VD_OP_SET_WCE: | |
484 | op_len = sizeof(u32); | |
485 | map_perm = LDC_MAP_R; | |
486 | break; | |
487 | ||
488 | case VD_OP_GET_VTOC: | |
489 | op_len = sizeof(struct vio_disk_vtoc); | |
490 | map_perm = LDC_MAP_W; | |
491 | break; | |
492 | ||
493 | case VD_OP_SET_VTOC: | |
494 | op_len = sizeof(struct vio_disk_vtoc); | |
495 | map_perm = LDC_MAP_R; | |
496 | break; | |
497 | ||
498 | case VD_OP_GET_DISKGEOM: | |
499 | op_len = sizeof(struct vio_disk_geom); | |
500 | map_perm = LDC_MAP_W; | |
501 | break; | |
502 | ||
503 | case VD_OP_SET_DISKGEOM: | |
504 | op_len = sizeof(struct vio_disk_geom); | |
505 | map_perm = LDC_MAP_R; | |
506 | break; | |
507 | ||
508 | case VD_OP_SCSICMD: | |
509 | op_len = 16; | |
510 | map_perm = LDC_MAP_RW; | |
511 | break; | |
512 | ||
513 | case VD_OP_GET_DEVID: | |
514 | op_len = sizeof(struct vio_disk_devid); | |
515 | map_perm = LDC_MAP_W; | |
516 | break; | |
517 | ||
518 | case VD_OP_GET_EFI: | |
519 | case VD_OP_SET_EFI: | |
520 | return -EOPNOTSUPP; | |
521 | break; | |
522 | }; | |
523 | ||
524 | map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; | |
525 | ||
526 | op_len = (op_len + 7) & ~7; | |
527 | req_buf = kzalloc(op_len, GFP_KERNEL); | |
528 | if (!req_buf) | |
529 | return -ENOMEM; | |
530 | ||
531 | if (len > op_len) | |
532 | len = op_len; | |
533 | ||
534 | if (map_perm & LDC_MAP_R) | |
535 | memcpy(req_buf, buf, len); | |
536 | ||
537 | spin_lock_irqsave(&port->vio.lock, flags); | |
538 | ||
539 | dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
540 | ||
541 | /* XXX If we want to use this code generically we have to | |
542 | * XXX handle TX ring exhaustion etc. | |
543 | */ | |
544 | desc = vio_dring_cur(dr); | |
545 | ||
546 | err = ldc_map_single(port->vio.lp, req_buf, op_len, | |
547 | desc->cookies, port->ring_cookies, | |
548 | map_perm); | |
549 | if (err < 0) { | |
550 | spin_unlock_irqrestore(&port->vio.lock, flags); | |
551 | kfree(req_buf); | |
552 | return err; | |
553 | } | |
554 | ||
555 | init_completion(&comp.com); | |
556 | comp.waiting_for = WAITING_FOR_GEN_CMD; | |
557 | port->vio.cmp = ∁ | |
558 | ||
559 | desc->hdr.ack = VIO_ACK_ENABLE; | |
560 | desc->req_id = port->req_id; | |
561 | desc->operation = op; | |
562 | desc->slice = 0; | |
563 | desc->status = ~0; | |
564 | desc->offset = 0; | |
565 | desc->size = op_len; | |
566 | desc->ncookies = err; | |
567 | ||
568 | /* This has to be a non-SMP write barrier because we are writing | |
569 | * to memory which is shared with the peer LDOM. | |
570 | */ | |
571 | wmb(); | |
572 | desc->hdr.state = VIO_DESC_READY; | |
573 | ||
574 | err = __vdc_tx_trigger(port); | |
575 | if (err >= 0) { | |
576 | port->req_id++; | |
577 | dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); | |
578 | spin_unlock_irqrestore(&port->vio.lock, flags); | |
579 | ||
580 | wait_for_completion(&comp.com); | |
581 | err = comp.err; | |
582 | } else { | |
583 | port->vio.cmp = NULL; | |
584 | spin_unlock_irqrestore(&port->vio.lock, flags); | |
585 | } | |
586 | ||
587 | if (map_perm & LDC_MAP_W) | |
588 | memcpy(buf, req_buf, len); | |
589 | ||
590 | kfree(req_buf); | |
591 | ||
592 | return err; | |
593 | } | |
594 | ||
595 | static int __devinit vdc_alloc_tx_ring(struct vdc_port *port) | |
596 | { | |
597 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
598 | unsigned long len, entry_size; | |
599 | int ncookies; | |
600 | void *dring; | |
601 | ||
602 | entry_size = sizeof(struct vio_disk_desc) + | |
603 | (sizeof(struct ldc_trans_cookie) * port->ring_cookies); | |
604 | len = (VDC_TX_RING_SIZE * entry_size); | |
605 | ||
606 | ncookies = VIO_MAX_RING_COOKIES; | |
607 | dring = ldc_alloc_exp_dring(port->vio.lp, len, | |
608 | dr->cookies, &ncookies, | |
609 | (LDC_MAP_SHADOW | | |
610 | LDC_MAP_DIRECT | | |
611 | LDC_MAP_RW)); | |
612 | if (IS_ERR(dring)) | |
613 | return PTR_ERR(dring); | |
614 | ||
615 | dr->base = dring; | |
616 | dr->entry_size = entry_size; | |
617 | dr->num_entries = VDC_TX_RING_SIZE; | |
618 | dr->prod = dr->cons = 0; | |
619 | dr->pending = VDC_TX_RING_SIZE; | |
620 | dr->ncookies = ncookies; | |
621 | ||
622 | return 0; | |
623 | } | |
624 | ||
625 | static void vdc_free_tx_ring(struct vdc_port *port) | |
626 | { | |
627 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | |
628 | ||
629 | if (dr->base) { | |
630 | ldc_free_exp_dring(port->vio.lp, dr->base, | |
631 | (dr->entry_size * dr->num_entries), | |
632 | dr->cookies, dr->ncookies); | |
633 | dr->base = NULL; | |
634 | dr->entry_size = 0; | |
635 | dr->num_entries = 0; | |
636 | dr->pending = 0; | |
637 | dr->ncookies = 0; | |
638 | } | |
639 | } | |
640 | ||
641 | static int probe_disk(struct vdc_port *port) | |
642 | { | |
643 | struct vio_completion comp; | |
644 | struct request_queue *q; | |
645 | struct gendisk *g; | |
646 | int err; | |
647 | ||
648 | init_completion(&comp.com); | |
649 | comp.err = 0; | |
650 | comp.waiting_for = WAITING_FOR_LINK_UP; | |
651 | port->vio.cmp = ∁ | |
652 | ||
653 | vio_port_up(&port->vio); | |
654 | ||
655 | wait_for_completion(&comp.com); | |
656 | if (comp.err) | |
657 | return comp.err; | |
658 | ||
659 | err = generic_request(port, VD_OP_GET_VTOC, | |
660 | &port->label, sizeof(port->label)); | |
661 | if (err < 0) { | |
662 | printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); | |
663 | return err; | |
664 | } | |
665 | ||
666 | err = generic_request(port, VD_OP_GET_DISKGEOM, | |
667 | &port->geom, sizeof(port->geom)); | |
668 | if (err < 0) { | |
669 | printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " | |
670 | "error %d\n", err); | |
671 | return err; | |
672 | } | |
673 | ||
674 | port->vdisk_size = ((u64)port->geom.num_cyl * | |
675 | (u64)port->geom.num_hd * | |
676 | (u64)port->geom.num_sec); | |
677 | ||
678 | q = blk_init_queue(do_vdc_request, &port->vio.lock); | |
679 | if (!q) { | |
680 | printk(KERN_ERR PFX "%s: Could not allocate queue.\n", | |
681 | port->vio.name); | |
682 | return -ENOMEM; | |
683 | } | |
684 | g = alloc_disk(1 << PARTITION_SHIFT); | |
685 | if (!g) { | |
686 | printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n", | |
687 | port->vio.name); | |
688 | blk_cleanup_queue(q); | |
689 | return -ENOMEM; | |
690 | } | |
691 | ||
692 | port->disk = g; | |
693 | ||
8a78362c | 694 | blk_queue_max_segments(q, port->ring_cookies); |
086fa5ff | 695 | blk_queue_max_hw_sectors(q, port->max_xfer_size); |
667ef3c3 | 696 | g->major = vdc_major; |
91ba3c21 | 697 | g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; |
667ef3c3 DM |
698 | strcpy(g->disk_name, port->disk_name); |
699 | ||
700 | g->fops = &vdc_fops; | |
701 | g->queue = q; | |
702 | g->private_data = port; | |
703 | g->driverfs_dev = &port->vio.vdev->dev; | |
704 | ||
705 | set_capacity(g, port->vdisk_size); | |
706 | ||
707 | printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", | |
708 | g->disk_name, | |
709 | port->vdisk_size, (port->vdisk_size >> (20 - 9))); | |
710 | ||
711 | add_disk(g); | |
712 | ||
713 | return 0; | |
714 | } | |
715 | ||
716 | static struct ldc_channel_config vdc_ldc_cfg = { | |
717 | .event = vdc_event, | |
718 | .mtu = 64, | |
719 | .mode = LDC_MODE_UNRELIABLE, | |
720 | }; | |
721 | ||
722 | static struct vio_driver_ops vdc_vio_ops = { | |
723 | .send_attr = vdc_send_attr, | |
724 | .handle_attr = vdc_handle_attr, | |
725 | .handshake_complete = vdc_handshake_complete, | |
726 | }; | |
727 | ||
e30f98fc | 728 | static void __devinit print_version(void) |
80dc35df DM |
729 | { |
730 | static int version_printed; | |
731 | ||
732 | if (version_printed++ == 0) | |
733 | printk(KERN_INFO "%s", version); | |
734 | } | |
735 | ||
667ef3c3 DM |
736 | static int __devinit vdc_port_probe(struct vio_dev *vdev, |
737 | const struct vio_device_id *id) | |
738 | { | |
43fdf274 | 739 | struct mdesc_handle *hp; |
667ef3c3 | 740 | struct vdc_port *port; |
667ef3c3 DM |
741 | int err; |
742 | ||
80dc35df | 743 | print_version(); |
667ef3c3 | 744 | |
43fdf274 | 745 | hp = mdesc_grab(); |
667ef3c3 | 746 | |
43fdf274 | 747 | err = -ENODEV; |
91ba3c21 | 748 | if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { |
3f4528d6 | 749 | printk(KERN_ERR PFX "Port id [%llu] too large.\n", |
91ba3c21 | 750 | vdev->dev_no); |
43fdf274 | 751 | goto err_out_release_mdesc; |
667ef3c3 DM |
752 | } |
753 | ||
754 | port = kzalloc(sizeof(*port), GFP_KERNEL); | |
43fdf274 | 755 | err = -ENOMEM; |
667ef3c3 DM |
756 | if (!port) { |
757 | printk(KERN_ERR PFX "Cannot allocate vdc_port.\n"); | |
43fdf274 | 758 | goto err_out_release_mdesc; |
667ef3c3 DM |
759 | } |
760 | ||
91ba3c21 | 761 | if (vdev->dev_no >= 26) |
667ef3c3 DM |
762 | snprintf(port->disk_name, sizeof(port->disk_name), |
763 | VDCBLK_NAME "%c%c", | |
91ba3c21 DM |
764 | 'a' + ((int)vdev->dev_no / 26) - 1, |
765 | 'a' + ((int)vdev->dev_no % 26)); | |
667ef3c3 DM |
766 | else |
767 | snprintf(port->disk_name, sizeof(port->disk_name), | |
91ba3c21 | 768 | VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); |
667ef3c3 | 769 | |
43fdf274 | 770 | err = vio_driver_init(&port->vio, vdev, VDEV_DISK, |
667ef3c3 DM |
771 | vdc_versions, ARRAY_SIZE(vdc_versions), |
772 | &vdc_vio_ops, port->disk_name); | |
773 | if (err) | |
774 | goto err_out_free_port; | |
775 | ||
776 | port->vdisk_block_size = 512; | |
777 | port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); | |
778 | port->ring_cookies = ((port->max_xfer_size * | |
779 | port->vdisk_block_size) / PAGE_SIZE) + 2; | |
780 | ||
781 | err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); | |
782 | if (err) | |
783 | goto err_out_free_port; | |
784 | ||
785 | err = vdc_alloc_tx_ring(port); | |
786 | if (err) | |
787 | goto err_out_free_ldc; | |
788 | ||
789 | err = probe_disk(port); | |
790 | if (err) | |
791 | goto err_out_free_tx_ring; | |
792 | ||
667ef3c3 DM |
793 | dev_set_drvdata(&vdev->dev, port); |
794 | ||
43fdf274 DM |
795 | mdesc_release(hp); |
796 | ||
667ef3c3 DM |
797 | return 0; |
798 | ||
799 | err_out_free_tx_ring: | |
800 | vdc_free_tx_ring(port); | |
801 | ||
802 | err_out_free_ldc: | |
803 | vio_ldc_free(&port->vio); | |
804 | ||
805 | err_out_free_port: | |
806 | kfree(port); | |
807 | ||
43fdf274 DM |
808 | err_out_release_mdesc: |
809 | mdesc_release(hp); | |
667ef3c3 DM |
810 | return err; |
811 | } | |
812 | ||
813 | static int vdc_port_remove(struct vio_dev *vdev) | |
814 | { | |
815 | struct vdc_port *port = dev_get_drvdata(&vdev->dev); | |
816 | ||
817 | if (port) { | |
818 | del_timer_sync(&port->vio.timer); | |
819 | ||
820 | vdc_free_tx_ring(port); | |
821 | vio_ldc_free(&port->vio); | |
822 | ||
823 | dev_set_drvdata(&vdev->dev, NULL); | |
824 | ||
825 | kfree(port); | |
826 | } | |
827 | return 0; | |
828 | } | |
829 | ||
3d452e55 | 830 | static const struct vio_device_id vdc_port_match[] = { |
667ef3c3 DM |
831 | { |
832 | .type = "vdc-port", | |
833 | }, | |
834 | {}, | |
835 | }; | |
da68e081 | 836 | MODULE_DEVICE_TABLE(vio, vdc_port_match); |
667ef3c3 DM |
837 | |
838 | static struct vio_driver vdc_port_driver = { | |
839 | .id_table = vdc_port_match, | |
840 | .probe = vdc_port_probe, | |
841 | .remove = vdc_port_remove, | |
cb52d897 | 842 | .name = "vdc_port", |
667ef3c3 DM |
843 | }; |
844 | ||
667ef3c3 DM |
845 | static int __init vdc_init(void) |
846 | { | |
847 | int err; | |
848 | ||
849 | err = register_blkdev(0, VDCBLK_NAME); | |
850 | if (err < 0) | |
851 | goto out_err; | |
852 | ||
853 | vdc_major = err; | |
667ef3c3 DM |
854 | |
855 | err = vio_register_driver(&vdc_port_driver); | |
856 | if (err) | |
80dc35df | 857 | goto out_unregister_blkdev; |
667ef3c3 DM |
858 | |
859 | return 0; | |
860 | ||
667ef3c3 DM |
861 | out_unregister_blkdev: |
862 | unregister_blkdev(vdc_major, VDCBLK_NAME); | |
863 | vdc_major = 0; | |
864 | ||
865 | out_err: | |
866 | return err; | |
867 | } | |
868 | ||
869 | static void __exit vdc_exit(void) | |
870 | { | |
871 | vio_unregister_driver(&vdc_port_driver); | |
667ef3c3 DM |
872 | unregister_blkdev(vdc_major, VDCBLK_NAME); |
873 | } | |
874 | ||
875 | module_init(vdc_init); | |
876 | module_exit(vdc_exit); |