]> Git Repo - qemu.git/blame - hw/scsi/vmw_pvscsi.c
vmw_pvscsi: coding: Introduce PVSCSIClass
[qemu.git] / hw / scsi / vmw_pvscsi.c
CommitLineData
881d588a
DF
1/*
2 * QEMU VMWARE PVSCSI paravirtual SCSI bus
3 *
4 * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
5 *
6 * Developed by Daynix Computing LTD (http://www.daynix.com)
7 *
8 * Based on implementation by Paolo Bonzini
9 * http://lists.gnu.org/archive/html/qemu-devel/2011-08/msg00729.html
10 *
11 * Authors:
12 * Paolo Bonzini <[email protected]>
13 * Dmitry Fleytman <[email protected]>
14 * Yan Vugenfirer <[email protected]>
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2.
17 * See the COPYING file in the top-level directory.
18 *
19 * NOTE about MSI-X:
20 * MSI-X support has been removed for the moment because it leads Windows OS
21 * to crash on startup. The crash happens because Windows driver requires
22 * MSI-X shared memory to be part of the same BAR used for rings state
23 * registers, etc. This is not supported by QEMU infrastructure so separate
24 * BAR created from MSI-X purposes. Windows driver fails to deal with 2 BARs.
25 *
26 */
27
28#include "hw/scsi/scsi.h"
29#include <block/scsi.h>
30#include "hw/pci/msi.h"
31#include "vmw_pvscsi.h"
32#include "trace.h"
33
34
881d588a
DF
35#define PVSCSI_USE_64BIT (true)
36#define PVSCSI_PER_VECTOR_MASK (false)
37
38#define PVSCSI_MAX_DEVS (64)
39#define PVSCSI_MSIX_NUM_VECTORS (1)
40
41#define PVSCSI_MAX_CMD_DATA_WORDS \
42 (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t))
43
0dc40f28
PB
44#define RS_GET_FIELD(m, field) \
45 (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
46 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field)))
47#define RS_SET_FIELD(m, field, val) \
48 (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \
49 (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val))
881d588a 50
e2d4f3f7
SL
51typedef struct PVSCSIClass {
52 PCIDeviceClass parent_class;
53} PVSCSIClass;
54
881d588a
DF
55#define TYPE_PVSCSI "pvscsi"
56#define PVSCSI(obj) OBJECT_CHECK(PVSCSIState, (obj), TYPE_PVSCSI)
57
e2d4f3f7
SL
58#define PVSCSI_DEVICE_CLASS(klass) \
59 OBJECT_CLASS_CHECK(PVSCSIClass, (klass), TYPE_PVSCSI)
60#define PVSCSI_DEVICE_GET_CLASS(obj) \
61 OBJECT_GET_CLASS(PVSCSIClass, (obj), TYPE_PVSCSI)
62
d29d4ff8
SL
63/* Compatability flags for migration */
64#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT 0
65#define PVSCSI_COMPAT_OLD_PCI_CONFIGURATION \
66 (1 << PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT)
67
68#define PVSCSI_USE_OLD_PCI_CONFIGURATION(s) \
69 ((s)->compat_flags & PVSCSI_COMPAT_OLD_PCI_CONFIGURATION)
836fc48c
SL
70#define PVSCSI_MSI_OFFSET(s) \
71 (PVSCSI_USE_OLD_PCI_CONFIGURATION(s) ? 0x50 : 0x7c)
d29d4ff8 72
881d588a
DF
73typedef struct PVSCSIRingInfo {
74 uint64_t rs_pa;
75 uint32_t txr_len_mask;
76 uint32_t rxr_len_mask;
77 uint32_t msg_len_mask;
78 uint64_t req_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
79 uint64_t cmp_ring_pages_pa[PVSCSI_SETUP_RINGS_MAX_NUM_PAGES];
80 uint64_t msg_ring_pages_pa[PVSCSI_SETUP_MSG_RING_MAX_NUM_PAGES];
81 uint64_t consumed_ptr;
82 uint64_t filled_cmp_ptr;
83 uint64_t filled_msg_ptr;
84} PVSCSIRingInfo;
85
86typedef struct PVSCSISGState {
87 hwaddr elemAddr;
88 hwaddr dataAddr;
89 uint32_t resid;
90} PVSCSISGState;
91
92typedef QTAILQ_HEAD(, PVSCSIRequest) PVSCSIRequestList;
93
94typedef struct {
95 PCIDevice parent_obj;
96 MemoryRegion io_space;
97 SCSIBus bus;
98 QEMUBH *completion_worker;
99 PVSCSIRequestList pending_queue;
100 PVSCSIRequestList completion_queue;
101
102 uint64_t reg_interrupt_status; /* Interrupt status register value */
103 uint64_t reg_interrupt_enabled; /* Interrupt mask register value */
104 uint64_t reg_command_status; /* Command status register value */
105
106 /* Command data adoption mechanism */
107 uint64_t curr_cmd; /* Last command arrived */
108 uint32_t curr_cmd_data_cntr; /* Amount of data for last command */
109
110 /* Collector for current command data */
111 uint32_t curr_cmd_data[PVSCSI_MAX_CMD_DATA_WORDS];
112
113 uint8_t rings_info_valid; /* Whether data rings initialized */
114 uint8_t msg_ring_info_valid; /* Whether message ring initialized */
115 uint8_t use_msg; /* Whether to use message ring */
116
117 uint8_t msi_used; /* Whether MSI support was installed successfully */
118
119 PVSCSIRingInfo rings; /* Data transfer rings manager */
120 uint32_t resetting; /* Reset in progress */
d29d4ff8
SL
121
122 uint32_t compat_flags;
881d588a
DF
123} PVSCSIState;
124
125typedef struct PVSCSIRequest {
126 SCSIRequest *sreq;
127 PVSCSIState *dev;
128 uint8_t sense_key;
129 uint8_t completed;
130 int lun;
131 QEMUSGList sgl;
132 PVSCSISGState sg;
133 struct PVSCSIRingReqDesc req;
134 struct PVSCSIRingCmpDesc cmp;
135 QTAILQ_ENTRY(PVSCSIRequest) next;
136} PVSCSIRequest;
137
138/* Integer binary logarithm */
139static int
140pvscsi_log2(uint32_t input)
141{
142 int log = 0;
143 assert(input > 0);
144 while (input >> ++log) {
145 }
146 return log;
147}
148
149static void
150pvscsi_ring_init_data(PVSCSIRingInfo *m, PVSCSICmdDescSetupRings *ri)
151{
152 int i;
153 uint32_t txr_len_log2, rxr_len_log2;
154 uint32_t req_ring_size, cmp_ring_size;
155 m->rs_pa = ri->ringsStatePPN << VMW_PAGE_SHIFT;
156
157 req_ring_size = ri->reqRingNumPages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
158 cmp_ring_size = ri->cmpRingNumPages * PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
159 txr_len_log2 = pvscsi_log2(req_ring_size - 1);
160 rxr_len_log2 = pvscsi_log2(cmp_ring_size - 1);
161
162 m->txr_len_mask = MASK(txr_len_log2);
163 m->rxr_len_mask = MASK(rxr_len_log2);
164
165 m->consumed_ptr = 0;
166 m->filled_cmp_ptr = 0;
167
168 for (i = 0; i < ri->reqRingNumPages; i++) {
169 m->req_ring_pages_pa[i] = ri->reqRingPPNs[i] << VMW_PAGE_SHIFT;
170 }
171
172 for (i = 0; i < ri->cmpRingNumPages; i++) {
173 m->cmp_ring_pages_pa[i] = ri->cmpRingPPNs[i] << VMW_PAGE_SHIFT;
174 }
175
0dc40f28
PB
176 RS_SET_FIELD(m, reqProdIdx, 0);
177 RS_SET_FIELD(m, reqConsIdx, 0);
178 RS_SET_FIELD(m, reqNumEntriesLog2, txr_len_log2);
881d588a 179
0dc40f28
PB
180 RS_SET_FIELD(m, cmpProdIdx, 0);
181 RS_SET_FIELD(m, cmpConsIdx, 0);
182 RS_SET_FIELD(m, cmpNumEntriesLog2, rxr_len_log2);
881d588a
DF
183
184 trace_pvscsi_ring_init_data(txr_len_log2, rxr_len_log2);
185
186 /* Flush ring state page changes */
187 smp_wmb();
188}
189
190static void
191pvscsi_ring_init_msg(PVSCSIRingInfo *m, PVSCSICmdDescSetupMsgRing *ri)
192{
193 int i;
194 uint32_t len_log2;
195 uint32_t ring_size;
196
197 ring_size = ri->numPages * PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
198 len_log2 = pvscsi_log2(ring_size - 1);
199
200 m->msg_len_mask = MASK(len_log2);
201
202 m->filled_msg_ptr = 0;
203
204 for (i = 0; i < ri->numPages; i++) {
205 m->msg_ring_pages_pa[i] = ri->ringPPNs[i] << VMW_PAGE_SHIFT;
206 }
207
0dc40f28
PB
208 RS_SET_FIELD(m, msgProdIdx, 0);
209 RS_SET_FIELD(m, msgConsIdx, 0);
210 RS_SET_FIELD(m, msgNumEntriesLog2, len_log2);
881d588a
DF
211
212 trace_pvscsi_ring_init_msg(len_log2);
213
214 /* Flush ring state page changes */
215 smp_wmb();
216}
217
218static void
219pvscsi_ring_cleanup(PVSCSIRingInfo *mgr)
220{
221 mgr->rs_pa = 0;
222 mgr->txr_len_mask = 0;
223 mgr->rxr_len_mask = 0;
224 mgr->msg_len_mask = 0;
225 mgr->consumed_ptr = 0;
226 mgr->filled_cmp_ptr = 0;
227 mgr->filled_msg_ptr = 0;
228 memset(mgr->req_ring_pages_pa, 0, sizeof(mgr->req_ring_pages_pa));
229 memset(mgr->cmp_ring_pages_pa, 0, sizeof(mgr->cmp_ring_pages_pa));
230 memset(mgr->msg_ring_pages_pa, 0, sizeof(mgr->msg_ring_pages_pa));
231}
232
233static hwaddr
234pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr)
235{
0dc40f28 236 uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx);
881d588a
DF
237
238 if (ready_ptr != mgr->consumed_ptr) {
239 uint32_t next_ready_ptr =
240 mgr->consumed_ptr++ & mgr->txr_len_mask;
241 uint32_t next_ready_page =
242 next_ready_ptr / PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
243 uint32_t inpage_idx =
244 next_ready_ptr % PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
245
246 return mgr->req_ring_pages_pa[next_ready_page] +
247 inpage_idx * sizeof(PVSCSIRingReqDesc);
248 } else {
249 return 0;
250 }
251}
252
253static void
254pvscsi_ring_flush_req(PVSCSIRingInfo *mgr)
255{
0dc40f28 256 RS_SET_FIELD(mgr, reqConsIdx, mgr->consumed_ptr);
881d588a
DF
257}
258
259static hwaddr
260pvscsi_ring_pop_cmp_descr(PVSCSIRingInfo *mgr)
261{
262 /*
263 * According to Linux driver code it explicitly verifies that number
264 * of requests being processed by device is less then the size of
265 * completion queue, so device may omit completion queue overflow
266 * conditions check. We assume that this is true for other (Windows)
267 * drivers as well.
268 */
269
270 uint32_t free_cmp_ptr =
271 mgr->filled_cmp_ptr++ & mgr->rxr_len_mask;
272 uint32_t free_cmp_page =
273 free_cmp_ptr / PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
274 uint32_t inpage_idx =
275 free_cmp_ptr % PVSCSI_MAX_NUM_CMP_ENTRIES_PER_PAGE;
276 return mgr->cmp_ring_pages_pa[free_cmp_page] +
277 inpage_idx * sizeof(PVSCSIRingCmpDesc);
278}
279
280static hwaddr
281pvscsi_ring_pop_msg_descr(PVSCSIRingInfo *mgr)
282{
283 uint32_t free_msg_ptr =
284 mgr->filled_msg_ptr++ & mgr->msg_len_mask;
285 uint32_t free_msg_page =
286 free_msg_ptr / PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
287 uint32_t inpage_idx =
288 free_msg_ptr % PVSCSI_MAX_NUM_MSG_ENTRIES_PER_PAGE;
289 return mgr->msg_ring_pages_pa[free_msg_page] +
290 inpage_idx * sizeof(PVSCSIRingMsgDesc);
291}
292
293static void
294pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr)
295{
296 /* Flush descriptor changes */
297 smp_wmb();
298
299 trace_pvscsi_ring_flush_cmp(mgr->filled_cmp_ptr);
300
0dc40f28 301 RS_SET_FIELD(mgr, cmpProdIdx, mgr->filled_cmp_ptr);
881d588a
DF
302}
303
304static bool
305pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr)
306{
0dc40f28
PB
307 uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx);
308 uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx);
881d588a
DF
309
310 return (prodIdx - consIdx) < (mgr->msg_len_mask + 1);
311}
312
313static void
314pvscsi_ring_flush_msg(PVSCSIRingInfo *mgr)
315{
316 /* Flush descriptor changes */
317 smp_wmb();
318
319 trace_pvscsi_ring_flush_msg(mgr->filled_msg_ptr);
320
0dc40f28 321 RS_SET_FIELD(mgr, msgProdIdx, mgr->filled_msg_ptr);
881d588a
DF
322}
323
324static void
325pvscsi_reset_state(PVSCSIState *s)
326{
327 s->curr_cmd = PVSCSI_CMD_FIRST;
328 s->curr_cmd_data_cntr = 0;
329 s->reg_command_status = PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
330 s->reg_interrupt_status = 0;
331 pvscsi_ring_cleanup(&s->rings);
332 s->rings_info_valid = FALSE;
333 s->msg_ring_info_valid = FALSE;
334 QTAILQ_INIT(&s->pending_queue);
335 QTAILQ_INIT(&s->completion_queue);
336}
337
338static void
339pvscsi_update_irq_status(PVSCSIState *s)
340{
341 PCIDevice *d = PCI_DEVICE(s);
342 bool should_raise = s->reg_interrupt_enabled & s->reg_interrupt_status;
343
344 trace_pvscsi_update_irq_level(should_raise, s->reg_interrupt_enabled,
345 s->reg_interrupt_status);
346
347 if (s->msi_used && msi_enabled(d)) {
348 if (should_raise) {
349 trace_pvscsi_update_irq_msi();
350 msi_notify(d, PVSCSI_VECTOR_COMPLETION);
351 }
352 return;
353 }
354
9e64f8a3 355 pci_set_irq(d, !!should_raise);
881d588a
DF
356}
357
358static void
359pvscsi_raise_completion_interrupt(PVSCSIState *s)
360{
361 s->reg_interrupt_status |= PVSCSI_INTR_CMPL_0;
362
363 /* Memory barrier to flush interrupt status register changes*/
364 smp_wmb();
365
366 pvscsi_update_irq_status(s);
367}
368
369static void
370pvscsi_raise_message_interrupt(PVSCSIState *s)
371{
372 s->reg_interrupt_status |= PVSCSI_INTR_MSG_0;
373
374 /* Memory barrier to flush interrupt status register changes*/
375 smp_wmb();
376
377 pvscsi_update_irq_status(s);
378}
379
380static void
381pvscsi_cmp_ring_put(PVSCSIState *s, struct PVSCSIRingCmpDesc *cmp_desc)
382{
383 hwaddr cmp_descr_pa;
384
385 cmp_descr_pa = pvscsi_ring_pop_cmp_descr(&s->rings);
386 trace_pvscsi_cmp_ring_put(cmp_descr_pa);
387 cpu_physical_memory_write(cmp_descr_pa, (void *)cmp_desc,
388 sizeof(*cmp_desc));
389}
390
391static void
392pvscsi_msg_ring_put(PVSCSIState *s, struct PVSCSIRingMsgDesc *msg_desc)
393{
394 hwaddr msg_descr_pa;
395
396 msg_descr_pa = pvscsi_ring_pop_msg_descr(&s->rings);
397 trace_pvscsi_msg_ring_put(msg_descr_pa);
398 cpu_physical_memory_write(msg_descr_pa, (void *)msg_desc,
399 sizeof(*msg_desc));
400}
401
402static void
403pvscsi_process_completion_queue(void *opaque)
404{
405 PVSCSIState *s = opaque;
406 PVSCSIRequest *pvscsi_req;
407 bool has_completed = false;
408
409 while (!QTAILQ_EMPTY(&s->completion_queue)) {
410 pvscsi_req = QTAILQ_FIRST(&s->completion_queue);
411 QTAILQ_REMOVE(&s->completion_queue, pvscsi_req, next);
412 pvscsi_cmp_ring_put(s, &pvscsi_req->cmp);
413 g_free(pvscsi_req);
dcb07809 414 has_completed = true;
881d588a
DF
415 }
416
417 if (has_completed) {
418 pvscsi_ring_flush_cmp(&s->rings);
419 pvscsi_raise_completion_interrupt(s);
420 }
421}
422
423static void
424pvscsi_reset_adapter(PVSCSIState *s)
425{
426 s->resetting++;
427 qbus_reset_all_fn(&s->bus);
428 s->resetting--;
429 pvscsi_process_completion_queue(s);
430 assert(QTAILQ_EMPTY(&s->pending_queue));
431 pvscsi_reset_state(s);
432}
433
434static void
435pvscsi_schedule_completion_processing(PVSCSIState *s)
436{
437 /* Try putting more complete requests on the ring. */
438 if (!QTAILQ_EMPTY(&s->completion_queue)) {
439 qemu_bh_schedule(s->completion_worker);
440 }
441}
442
443static void
444pvscsi_complete_request(PVSCSIState *s, PVSCSIRequest *r)
445{
446 assert(!r->completed);
447
448 trace_pvscsi_complete_request(r->cmp.context, r->cmp.dataLen,
449 r->sense_key);
450 if (r->sreq != NULL) {
451 scsi_req_unref(r->sreq);
452 r->sreq = NULL;
453 }
454 r->completed = 1;
455 QTAILQ_REMOVE(&s->pending_queue, r, next);
456 QTAILQ_INSERT_TAIL(&s->completion_queue, r, next);
457 pvscsi_schedule_completion_processing(s);
458}
459
460static QEMUSGList *pvscsi_get_sg_list(SCSIRequest *r)
461{
462 PVSCSIRequest *req = r->hba_private;
463
464 trace_pvscsi_get_sg_list(req->sgl.nsg, req->sgl.size);
465
466 return &req->sgl;
467}
468
469static void
470pvscsi_get_next_sg_elem(PVSCSISGState *sg)
471{
472 struct PVSCSISGElement elem;
473
474 cpu_physical_memory_read(sg->elemAddr, (void *)&elem, sizeof(elem));
475 if ((elem.flags & ~PVSCSI_KNOWN_FLAGS) != 0) {
476 /*
477 * There is PVSCSI_SGE_FLAG_CHAIN_ELEMENT flag described in
478 * header file but its value is unknown. This flag requires
479 * additional processing, so we put warning here to catch it
480 * some day and make proper implementation
481 */
482 trace_pvscsi_get_next_sg_elem(elem.flags);
483 }
484
485 sg->elemAddr += sizeof(elem);
486 sg->dataAddr = elem.addr;
487 sg->resid = elem.length;
488}
489
490static void
491pvscsi_write_sense(PVSCSIRequest *r, uint8_t *sense, int len)
492{
493 r->cmp.senseLen = MIN(r->req.senseLen, len);
494 r->sense_key = sense[(sense[0] & 2) ? 1 : 2];
495 cpu_physical_memory_write(r->req.senseAddr, sense, r->cmp.senseLen);
496}
497
498static void
499pvscsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid)
500{
501 PVSCSIRequest *pvscsi_req = req->hba_private;
b0f49d13 502 PVSCSIState *s;
881d588a
DF
503
504 if (!pvscsi_req) {
505 trace_pvscsi_command_complete_not_found(req->tag);
506 return;
507 }
b0f49d13 508 s = pvscsi_req->dev;
881d588a
DF
509
510 if (resid) {
511 /* Short transfer. */
512 trace_pvscsi_command_complete_data_run();
513 pvscsi_req->cmp.hostStatus = BTSTAT_DATARUN;
514 }
515
516 pvscsi_req->cmp.scsiStatus = status;
517 if (pvscsi_req->cmp.scsiStatus == CHECK_CONDITION) {
518 uint8_t sense[SCSI_SENSE_BUF_SIZE];
519 int sense_len =
520 scsi_req_get_sense(pvscsi_req->sreq, sense, sizeof(sense));
521
522 trace_pvscsi_command_complete_sense_len(sense_len);
523 pvscsi_write_sense(pvscsi_req, sense, sense_len);
524 }
525 qemu_sglist_destroy(&pvscsi_req->sgl);
526 pvscsi_complete_request(s, pvscsi_req);
527}
528
529static void
530pvscsi_send_msg(PVSCSIState *s, SCSIDevice *dev, uint32_t msg_type)
531{
532 if (s->msg_ring_info_valid && pvscsi_ring_msg_has_room(&s->rings)) {
533 PVSCSIMsgDescDevStatusChanged msg = {0};
534
535 msg.type = msg_type;
536 msg.bus = dev->channel;
537 msg.target = dev->id;
538 msg.lun[1] = dev->lun;
539
540 pvscsi_msg_ring_put(s, (PVSCSIRingMsgDesc *)&msg);
541 pvscsi_ring_flush_msg(&s->rings);
542 pvscsi_raise_message_interrupt(s);
543 }
544}
545
546static void
91c8daad 547pvscsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
881d588a 548{
91c8daad
IM
549 PVSCSIState *s = PVSCSI(hotplug_dev);
550
551 pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_ADDED);
881d588a
DF
552}
553
554static void
91c8daad 555pvscsi_hot_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp)
881d588a 556{
91c8daad
IM
557 PVSCSIState *s = PVSCSI(hotplug_dev);
558
559 pvscsi_send_msg(s, SCSI_DEVICE(dev), PVSCSI_MSG_DEV_REMOVED);
560 qdev_simple_device_unplug_cb(hotplug_dev, dev, errp);
881d588a
DF
561}
562
563static void
564pvscsi_request_cancelled(SCSIRequest *req)
565{
566 PVSCSIRequest *pvscsi_req = req->hba_private;
567 PVSCSIState *s = pvscsi_req->dev;
568
569 if (pvscsi_req->completed) {
570 return;
571 }
572
573 if (pvscsi_req->dev->resetting) {
574 pvscsi_req->cmp.hostStatus = BTSTAT_BUSRESET;
575 } else {
576 pvscsi_req->cmp.hostStatus = BTSTAT_ABORTQUEUE;
577 }
578
579 pvscsi_complete_request(s, pvscsi_req);
580}
581
582static SCSIDevice*
583pvscsi_device_find(PVSCSIState *s, int channel, int target,
584 uint8_t *requested_lun, uint8_t *target_lun)
585{
586 if (requested_lun[0] || requested_lun[2] || requested_lun[3] ||
587 requested_lun[4] || requested_lun[5] || requested_lun[6] ||
588 requested_lun[7] || (target > PVSCSI_MAX_DEVS)) {
589 return NULL;
590 } else {
591 *target_lun = requested_lun[1];
592 return scsi_device_find(&s->bus, channel, target, *target_lun);
593 }
594}
595
596static PVSCSIRequest *
597pvscsi_queue_pending_descriptor(PVSCSIState *s, SCSIDevice **d,
598 struct PVSCSIRingReqDesc *descr)
599{
600 PVSCSIRequest *pvscsi_req;
601 uint8_t lun;
602
603 pvscsi_req = g_malloc0(sizeof(*pvscsi_req));
604 pvscsi_req->dev = s;
605 pvscsi_req->req = *descr;
606 pvscsi_req->cmp.context = pvscsi_req->req.context;
607 QTAILQ_INSERT_TAIL(&s->pending_queue, pvscsi_req, next);
608
609 *d = pvscsi_device_find(s, descr->bus, descr->target, descr->lun, &lun);
610 if (*d) {
611 pvscsi_req->lun = lun;
612 }
613
614 return pvscsi_req;
615}
616
617static void
618pvscsi_convert_sglist(PVSCSIRequest *r)
619{
620 int chunk_size;
621 uint64_t data_length = r->req.dataLen;
622 PVSCSISGState sg = r->sg;
623 while (data_length) {
624 while (!sg.resid) {
625 pvscsi_get_next_sg_elem(&sg);
626 trace_pvscsi_convert_sglist(r->req.context, r->sg.dataAddr,
627 r->sg.resid);
628 }
629 assert(data_length > 0);
630 chunk_size = MIN((unsigned) data_length, sg.resid);
631 if (chunk_size) {
632 qemu_sglist_add(&r->sgl, sg.dataAddr, chunk_size);
633 }
634
635 sg.dataAddr += chunk_size;
636 data_length -= chunk_size;
637 sg.resid -= chunk_size;
638 }
639}
640
641static void
642pvscsi_build_sglist(PVSCSIState *s, PVSCSIRequest *r)
643{
644 PCIDevice *d = PCI_DEVICE(s);
645
df32fd1c 646 pci_dma_sglist_init(&r->sgl, d, 1);
881d588a
DF
647 if (r->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
648 pvscsi_convert_sglist(r);
649 } else {
650 qemu_sglist_add(&r->sgl, r->req.dataAddr, r->req.dataLen);
651 }
652}
653
654static void
655pvscsi_process_request_descriptor(PVSCSIState *s,
656 struct PVSCSIRingReqDesc *descr)
657{
658 SCSIDevice *d;
659 PVSCSIRequest *r = pvscsi_queue_pending_descriptor(s, &d, descr);
660 int64_t n;
661
662 trace_pvscsi_process_req_descr(descr->cdb[0], descr->context);
663
664 if (!d) {
665 r->cmp.hostStatus = BTSTAT_SELTIMEO;
666 trace_pvscsi_process_req_descr_unknown_device();
667 pvscsi_complete_request(s, r);
668 return;
669 }
670
671 if (descr->flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
672 r->sg.elemAddr = descr->dataAddr;
673 }
674
675 r->sreq = scsi_req_new(d, descr->context, r->lun, descr->cdb, r);
676 if (r->sreq->cmd.mode == SCSI_XFER_FROM_DEV &&
677 (descr->flags & PVSCSI_FLAG_CMD_DIR_TODEVICE)) {
678 r->cmp.hostStatus = BTSTAT_BADMSG;
679 trace_pvscsi_process_req_descr_invalid_dir();
680 scsi_req_cancel(r->sreq);
681 return;
682 }
683 if (r->sreq->cmd.mode == SCSI_XFER_TO_DEV &&
684 (descr->flags & PVSCSI_FLAG_CMD_DIR_TOHOST)) {
685 r->cmp.hostStatus = BTSTAT_BADMSG;
686 trace_pvscsi_process_req_descr_invalid_dir();
687 scsi_req_cancel(r->sreq);
688 return;
689 }
690
691 pvscsi_build_sglist(s, r);
692 n = scsi_req_enqueue(r->sreq);
693
694 if (n) {
695 scsi_req_continue(r->sreq);
696 }
697}
698
699static void
700pvscsi_process_io(PVSCSIState *s)
701{
702 PVSCSIRingReqDesc descr;
703 hwaddr next_descr_pa;
704
705 assert(s->rings_info_valid);
706 while ((next_descr_pa = pvscsi_ring_pop_req_descr(&s->rings)) != 0) {
707
708 /* Only read after production index verification */
709 smp_rmb();
710
711 trace_pvscsi_process_io(next_descr_pa);
712 cpu_physical_memory_read(next_descr_pa, &descr, sizeof(descr));
713 pvscsi_process_request_descriptor(s, &descr);
714 }
715
716 pvscsi_ring_flush_req(&s->rings);
717}
718
719static void
720pvscsi_dbg_dump_tx_rings_config(PVSCSICmdDescSetupRings *rc)
721{
722 int i;
723 trace_pvscsi_tx_rings_ppn("Rings State", rc->ringsStatePPN);
724
725 trace_pvscsi_tx_rings_num_pages("Request Ring", rc->reqRingNumPages);
726 for (i = 0; i < rc->reqRingNumPages; i++) {
727 trace_pvscsi_tx_rings_ppn("Request Ring", rc->reqRingPPNs[i]);
728 }
729
730 trace_pvscsi_tx_rings_num_pages("Confirm Ring", rc->cmpRingNumPages);
731 for (i = 0; i < rc->cmpRingNumPages; i++) {
732 trace_pvscsi_tx_rings_ppn("Confirm Ring", rc->reqRingPPNs[i]);
733 }
734}
735
736static uint64_t
737pvscsi_on_cmd_config(PVSCSIState *s)
738{
739 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_CONFIG");
740 return PVSCSI_COMMAND_PROCESSING_FAILED;
741}
742
743static uint64_t
744pvscsi_on_cmd_unplug(PVSCSIState *s)
745{
746 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_DEVICE_UNPLUG");
747 return PVSCSI_COMMAND_PROCESSING_FAILED;
748}
749
750static uint64_t
751pvscsi_on_issue_scsi(PVSCSIState *s)
752{
753 trace_pvscsi_on_cmd_noimpl("PVSCSI_CMD_ISSUE_SCSI");
754 return PVSCSI_COMMAND_PROCESSING_FAILED;
755}
756
757static uint64_t
758pvscsi_on_cmd_setup_rings(PVSCSIState *s)
759{
760 PVSCSICmdDescSetupRings *rc =
761 (PVSCSICmdDescSetupRings *) s->curr_cmd_data;
762
763 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_RINGS");
764
765 pvscsi_dbg_dump_tx_rings_config(rc);
766 pvscsi_ring_init_data(&s->rings, rc);
767 s->rings_info_valid = TRUE;
768 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
769}
770
771static uint64_t
772pvscsi_on_cmd_abort(PVSCSIState *s)
773{
774 PVSCSICmdDescAbortCmd *cmd = (PVSCSICmdDescAbortCmd *) s->curr_cmd_data;
775 PVSCSIRequest *r, *next;
776
777 trace_pvscsi_on_cmd_abort(cmd->context, cmd->target);
778
779 QTAILQ_FOREACH_SAFE(r, &s->pending_queue, next, next) {
780 if (r->req.context == cmd->context) {
781 break;
782 }
783 }
784 if (r) {
785 assert(!r->completed);
786 r->cmp.hostStatus = BTSTAT_ABORTQUEUE;
787 scsi_req_cancel(r->sreq);
788 }
789
790 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
791}
792
793static uint64_t
794pvscsi_on_cmd_unknown(PVSCSIState *s)
795{
796 trace_pvscsi_on_cmd_unknown_data(s->curr_cmd_data[0]);
797 return PVSCSI_COMMAND_PROCESSING_FAILED;
798}
799
800static uint64_t
801pvscsi_on_cmd_reset_device(PVSCSIState *s)
802{
803 uint8_t target_lun = 0;
804 struct PVSCSICmdDescResetDevice *cmd =
805 (struct PVSCSICmdDescResetDevice *) s->curr_cmd_data;
806 SCSIDevice *sdev;
807
808 sdev = pvscsi_device_find(s, 0, cmd->target, cmd->lun, &target_lun);
809
810 trace_pvscsi_on_cmd_reset_dev(cmd->target, (int) target_lun, sdev);
811
812 if (sdev != NULL) {
813 s->resetting++;
814 device_reset(&sdev->qdev);
815 s->resetting--;
816 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
817 }
818
819 return PVSCSI_COMMAND_PROCESSING_FAILED;
820}
821
822static uint64_t
823pvscsi_on_cmd_reset_bus(PVSCSIState *s)
824{
825 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_RESET_BUS");
826
827 s->resetting++;
828 qbus_reset_all_fn(&s->bus);
829 s->resetting--;
830 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
831}
832
833static uint64_t
834pvscsi_on_cmd_setup_msg_ring(PVSCSIState *s)
835{
836 PVSCSICmdDescSetupMsgRing *rc =
837 (PVSCSICmdDescSetupMsgRing *) s->curr_cmd_data;
838
839 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_SETUP_MSG_RING");
840
841 if (!s->use_msg) {
842 return PVSCSI_COMMAND_PROCESSING_FAILED;
843 }
844
845 if (s->rings_info_valid) {
846 pvscsi_ring_init_msg(&s->rings, rc);
847 s->msg_ring_info_valid = TRUE;
848 }
849 return sizeof(PVSCSICmdDescSetupMsgRing) / sizeof(uint32_t);
850}
851
852static uint64_t
853pvscsi_on_cmd_adapter_reset(PVSCSIState *s)
854{
855 trace_pvscsi_on_cmd_arrived("PVSCSI_CMD_ADAPTER_RESET");
856
857 pvscsi_reset_adapter(s);
858 return PVSCSI_COMMAND_PROCESSING_SUCCEEDED;
859}
860
861static const struct {
862 int data_size;
863 uint64_t (*handler_fn)(PVSCSIState *s);
864} pvscsi_commands[] = {
865 [PVSCSI_CMD_FIRST] = {
866 .data_size = 0,
867 .handler_fn = pvscsi_on_cmd_unknown,
868 },
869
870 /* Not implemented, data size defined based on what arrives on windows */
871 [PVSCSI_CMD_CONFIG] = {
872 .data_size = 6 * sizeof(uint32_t),
873 .handler_fn = pvscsi_on_cmd_config,
874 },
875
876 /* Command not implemented, data size is unknown */
877 [PVSCSI_CMD_ISSUE_SCSI] = {
878 .data_size = 0,
879 .handler_fn = pvscsi_on_issue_scsi,
880 },
881
882 /* Command not implemented, data size is unknown */
883 [PVSCSI_CMD_DEVICE_UNPLUG] = {
884 .data_size = 0,
885 .handler_fn = pvscsi_on_cmd_unplug,
886 },
887
888 [PVSCSI_CMD_SETUP_RINGS] = {
889 .data_size = sizeof(PVSCSICmdDescSetupRings),
890 .handler_fn = pvscsi_on_cmd_setup_rings,
891 },
892
893 [PVSCSI_CMD_RESET_DEVICE] = {
894 .data_size = sizeof(struct PVSCSICmdDescResetDevice),
895 .handler_fn = pvscsi_on_cmd_reset_device,
896 },
897
898 [PVSCSI_CMD_RESET_BUS] = {
899 .data_size = 0,
900 .handler_fn = pvscsi_on_cmd_reset_bus,
901 },
902
903 [PVSCSI_CMD_SETUP_MSG_RING] = {
904 .data_size = sizeof(PVSCSICmdDescSetupMsgRing),
905 .handler_fn = pvscsi_on_cmd_setup_msg_ring,
906 },
907
908 [PVSCSI_CMD_ADAPTER_RESET] = {
909 .data_size = 0,
910 .handler_fn = pvscsi_on_cmd_adapter_reset,
911 },
912
913 [PVSCSI_CMD_ABORT_CMD] = {
914 .data_size = sizeof(struct PVSCSICmdDescAbortCmd),
915 .handler_fn = pvscsi_on_cmd_abort,
916 },
917};
918
919static void
920pvscsi_do_command_processing(PVSCSIState *s)
921{
922 size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
923
924 assert(s->curr_cmd < PVSCSI_CMD_LAST);
925 if (bytes_arrived >= pvscsi_commands[s->curr_cmd].data_size) {
926 s->reg_command_status = pvscsi_commands[s->curr_cmd].handler_fn(s);
927 s->curr_cmd = PVSCSI_CMD_FIRST;
928 s->curr_cmd_data_cntr = 0;
929 }
930}
931
932static void
933pvscsi_on_command_data(PVSCSIState *s, uint32_t value)
934{
935 size_t bytes_arrived = s->curr_cmd_data_cntr * sizeof(uint32_t);
936
937 assert(bytes_arrived < sizeof(s->curr_cmd_data));
938 s->curr_cmd_data[s->curr_cmd_data_cntr++] = value;
939
940 pvscsi_do_command_processing(s);
941}
942
943static void
944pvscsi_on_command(PVSCSIState *s, uint64_t cmd_id)
945{
946 if ((cmd_id > PVSCSI_CMD_FIRST) && (cmd_id < PVSCSI_CMD_LAST)) {
947 s->curr_cmd = cmd_id;
948 } else {
949 s->curr_cmd = PVSCSI_CMD_FIRST;
950 trace_pvscsi_on_cmd_unknown(cmd_id);
951 }
952
953 s->curr_cmd_data_cntr = 0;
954 s->reg_command_status = PVSCSI_COMMAND_NOT_ENOUGH_DATA;
955
956 pvscsi_do_command_processing(s);
957}
958
959static void
960pvscsi_io_write(void *opaque, hwaddr addr,
961 uint64_t val, unsigned size)
962{
963 PVSCSIState *s = opaque;
964
965 switch (addr) {
966 case PVSCSI_REG_OFFSET_COMMAND:
967 pvscsi_on_command(s, val);
968 break;
969
970 case PVSCSI_REG_OFFSET_COMMAND_DATA:
971 pvscsi_on_command_data(s, (uint32_t) val);
972 break;
973
974 case PVSCSI_REG_OFFSET_INTR_STATUS:
975 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_STATUS", val);
976 s->reg_interrupt_status &= ~val;
977 pvscsi_update_irq_status(s);
978 pvscsi_schedule_completion_processing(s);
979 break;
980
981 case PVSCSI_REG_OFFSET_INTR_MASK:
982 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_INTR_MASK", val);
983 s->reg_interrupt_enabled = val;
984 pvscsi_update_irq_status(s);
985 break;
986
987 case PVSCSI_REG_OFFSET_KICK_NON_RW_IO:
988 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_NON_RW_IO", val);
989 pvscsi_process_io(s);
990 break;
991
992 case PVSCSI_REG_OFFSET_KICK_RW_IO:
993 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_KICK_RW_IO", val);
994 pvscsi_process_io(s);
995 break;
996
997 case PVSCSI_REG_OFFSET_DEBUG:
998 trace_pvscsi_io_write("PVSCSI_REG_OFFSET_DEBUG", val);
999 break;
1000
1001 default:
1002 trace_pvscsi_io_write_unknown(addr, size, val);
1003 break;
1004 }
1005
1006}
1007
1008static uint64_t
1009pvscsi_io_read(void *opaque, hwaddr addr, unsigned size)
1010{
1011 PVSCSIState *s = opaque;
1012
1013 switch (addr) {
1014 case PVSCSI_REG_OFFSET_INTR_STATUS:
1015 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_STATUS",
1016 s->reg_interrupt_status);
1017 return s->reg_interrupt_status;
1018
1019 case PVSCSI_REG_OFFSET_INTR_MASK:
1020 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_INTR_MASK",
1021 s->reg_interrupt_status);
1022 return s->reg_interrupt_enabled;
1023
1024 case PVSCSI_REG_OFFSET_COMMAND_STATUS:
1025 trace_pvscsi_io_read("PVSCSI_REG_OFFSET_COMMAND_STATUS",
1026 s->reg_interrupt_status);
1027 return s->reg_command_status;
1028
1029 default:
1030 trace_pvscsi_io_read_unknown(addr, size);
1031 return 0;
1032 }
1033}
1034
1035
1036static bool
1037pvscsi_init_msi(PVSCSIState *s)
1038{
1039 int res;
1040 PCIDevice *d = PCI_DEVICE(s);
1041
836fc48c 1042 res = msi_init(d, PVSCSI_MSI_OFFSET(s), PVSCSI_MSIX_NUM_VECTORS,
881d588a
DF
1043 PVSCSI_USE_64BIT, PVSCSI_PER_VECTOR_MASK);
1044 if (res < 0) {
1045 trace_pvscsi_init_msi_fail(res);
1046 s->msi_used = false;
1047 } else {
1048 s->msi_used = true;
1049 }
1050
1051 return s->msi_used;
1052}
1053
1054static void
1055pvscsi_cleanup_msi(PVSCSIState *s)
1056{
1057 PCIDevice *d = PCI_DEVICE(s);
1058
1059 if (s->msi_used) {
1060 msi_uninit(d);
1061 }
1062}
1063
1064static const MemoryRegionOps pvscsi_ops = {
1065 .read = pvscsi_io_read,
1066 .write = pvscsi_io_write,
1067 .endianness = DEVICE_LITTLE_ENDIAN,
1068 .impl = {
1069 .min_access_size = 4,
1070 .max_access_size = 4,
1071 },
1072};
1073
1074static const struct SCSIBusInfo pvscsi_scsi_info = {
1075 .tcq = true,
1076 .max_target = PVSCSI_MAX_DEVS,
1077 .max_channel = 0,
1078 .max_lun = 0,
1079
1080 .get_sg_list = pvscsi_get_sg_list,
1081 .complete = pvscsi_command_complete,
1082 .cancel = pvscsi_request_cancelled,
881d588a
DF
1083};
1084
1085static int
1086pvscsi_init(PCIDevice *pci_dev)
1087{
1088 PVSCSIState *s = PVSCSI(pci_dev);
1089
1090 trace_pvscsi_state("init");
1091
d29d4ff8
SL
1092 /* PCI subsystem ID, subsystem vendor ID, revision */
1093 if (PVSCSI_USE_OLD_PCI_CONFIGURATION(s)) {
1094 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, 0x1000);
1095 } else {
1096 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID,
1097 PCI_VENDOR_ID_VMWARE);
1098 pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID,
1099 PCI_DEVICE_ID_VMWARE_PVSCSI);
1100 pci_config_set_revision(pci_dev->config, 0x2);
1101 }
881d588a
DF
1102
1103 /* PCI latency timer = 255 */
1104 pci_dev->config[PCI_LATENCY_TIMER] = 0xff;
1105
1106 /* Interrupt pin A */
1107 pci_config_set_interrupt_pin(pci_dev->config, 1);
1108
29776739 1109 memory_region_init_io(&s->io_space, OBJECT(s), &pvscsi_ops, s,
881d588a
DF
1110 "pvscsi-io", PVSCSI_MEM_SPACE_SIZE);
1111 pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->io_space);
1112
1113 pvscsi_init_msi(s);
1114
1115 s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s);
1116 if (!s->completion_worker) {
1117 pvscsi_cleanup_msi(s);
881d588a
DF
1118 return -ENOMEM;
1119 }
1120
b1187b51
AF
1121 scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev),
1122 &pvscsi_scsi_info, NULL);
91c8daad
IM
1123 /* override default SCSI bus hotplug-handler, with pvscsi's one */
1124 qbus_set_hotplug_handler(BUS(&s->bus), DEVICE(s), &error_abort);
881d588a
DF
1125 pvscsi_reset_state(s);
1126
1127 return 0;
1128}
1129
1130static void
1131pvscsi_uninit(PCIDevice *pci_dev)
1132{
1133 PVSCSIState *s = PVSCSI(pci_dev);
1134
1135 trace_pvscsi_state("uninit");
1136 qemu_bh_delete(s->completion_worker);
1137
1138 pvscsi_cleanup_msi(s);
881d588a
DF
1139}
1140
1141static void
1142pvscsi_reset(DeviceState *dev)
1143{
1144 PCIDevice *d = PCI_DEVICE(dev);
1145 PVSCSIState *s = PVSCSI(d);
1146
1147 trace_pvscsi_state("reset");
1148 pvscsi_reset_adapter(s);
1149}
1150
1151static void
1152pvscsi_pre_save(void *opaque)
1153{
1154 PVSCSIState *s = (PVSCSIState *) opaque;
1155
1156 trace_pvscsi_state("presave");
1157
1158 assert(QTAILQ_EMPTY(&s->pending_queue));
1159 assert(QTAILQ_EMPTY(&s->completion_queue));
1160}
1161
1162static int
1163pvscsi_post_load(void *opaque, int version_id)
1164{
1165 trace_pvscsi_state("postload");
1166 return 0;
1167}
1168
1169static const VMStateDescription vmstate_pvscsi = {
6783ecf1 1170 .name = "pvscsi",
881d588a
DF
1171 .version_id = 0,
1172 .minimum_version_id = 0,
881d588a
DF
1173 .pre_save = pvscsi_pre_save,
1174 .post_load = pvscsi_post_load,
d49805ae 1175 .fields = (VMStateField[]) {
881d588a
DF
1176 VMSTATE_PCI_DEVICE(parent_obj, PVSCSIState),
1177 VMSTATE_UINT8(msi_used, PVSCSIState),
1178 VMSTATE_UINT32(resetting, PVSCSIState),
1179 VMSTATE_UINT64(reg_interrupt_status, PVSCSIState),
1180 VMSTATE_UINT64(reg_interrupt_enabled, PVSCSIState),
1181 VMSTATE_UINT64(reg_command_status, PVSCSIState),
1182 VMSTATE_UINT64(curr_cmd, PVSCSIState),
1183 VMSTATE_UINT32(curr_cmd_data_cntr, PVSCSIState),
1184 VMSTATE_UINT32_ARRAY(curr_cmd_data, PVSCSIState,
1185 ARRAY_SIZE(((PVSCSIState *)NULL)->curr_cmd_data)),
1186 VMSTATE_UINT8(rings_info_valid, PVSCSIState),
1187 VMSTATE_UINT8(msg_ring_info_valid, PVSCSIState),
1188 VMSTATE_UINT8(use_msg, PVSCSIState),
1189
1190 VMSTATE_UINT64(rings.rs_pa, PVSCSIState),
1191 VMSTATE_UINT32(rings.txr_len_mask, PVSCSIState),
1192 VMSTATE_UINT32(rings.rxr_len_mask, PVSCSIState),
1193 VMSTATE_UINT64_ARRAY(rings.req_ring_pages_pa, PVSCSIState,
1194 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1195 VMSTATE_UINT64_ARRAY(rings.cmp_ring_pages_pa, PVSCSIState,
1196 PVSCSI_SETUP_RINGS_MAX_NUM_PAGES),
1197 VMSTATE_UINT64(rings.consumed_ptr, PVSCSIState),
1198 VMSTATE_UINT64(rings.filled_cmp_ptr, PVSCSIState),
1199
1200 VMSTATE_END_OF_LIST()
1201 }
1202};
1203
881d588a
DF
1204static Property pvscsi_properties[] = {
1205 DEFINE_PROP_UINT8("use_msg", PVSCSIState, use_msg, 1),
952970ba
SL
1206 DEFINE_PROP_BIT("x-old-pci-configuration", PVSCSIState, compat_flags,
1207 PVSCSI_COMPAT_OLD_PCI_CONFIGURATION_BIT, false),
881d588a
DF
1208 DEFINE_PROP_END_OF_LIST(),
1209};
1210
1211static void pvscsi_class_init(ObjectClass *klass, void *data)
1212{
1213 DeviceClass *dc = DEVICE_CLASS(klass);
1214 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
91c8daad 1215 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass);
881d588a
DF
1216
1217 k->init = pvscsi_init;
1218 k->exit = pvscsi_uninit;
1219 k->vendor_id = PCI_VENDOR_ID_VMWARE;
1220 k->device_id = PCI_DEVICE_ID_VMWARE_PVSCSI;
1221 k->class_id = PCI_CLASS_STORAGE_SCSI;
1222 k->subsystem_id = 0x1000;
1223 dc->reset = pvscsi_reset;
1224 dc->vmsd = &vmstate_pvscsi;
1225 dc->props = pvscsi_properties;
125ee0ed 1226 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
91c8daad
IM
1227 hc->unplug = pvscsi_hot_unplug;
1228 hc->plug = pvscsi_hotplug;
881d588a
DF
1229}
1230
1231static const TypeInfo pvscsi_info = {
6783ecf1 1232 .name = TYPE_PVSCSI,
881d588a 1233 .parent = TYPE_PCI_DEVICE,
e2d4f3f7 1234 .class_size = sizeof(PVSCSIClass),
881d588a
DF
1235 .instance_size = sizeof(PVSCSIState),
1236 .class_init = pvscsi_class_init,
91c8daad
IM
1237 .interfaces = (InterfaceInfo[]) {
1238 { TYPE_HOTPLUG_HANDLER },
1239 { }
1240 }
881d588a
DF
1241};
1242
1243static void
1244pvscsi_register_types(void)
1245{
1246 type_register_static(&pvscsi_info);
1247}
1248
1249type_init(pvscsi_register_types);
This page took 0.297066 seconds and 4 git commands to generate.