* and is not licensed separately. See file COPYING for details.
*
* TODO (sorted by decreasing priority)
- * -- Kill first_open (Al Viro fixed the block layer now)
* -- set readonly flag for CDs, set removable flag for CF readers
* -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
* -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
* -- verify the 13 conditions and do bulk resets
* -- kill last_pipe and simply do two-state clearing on both pipes
- * -- verify protocol (bulk) from USB descriptors (maybe...)
* -- highmem
* -- move top_sense and work_bcs into separate allocations (if they survive)
* for cache purists and esoteric architectures.
#define UB_DIR_ILLEGAL2 2
#define UB_DIR_WRITE 3
+/* P3 */
#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \
(((c)==UB_DIR_READ)? 'r': 'n'))
UB_CMDST_DONE /* Final state */
};
-static char *ub_scsi_cmd_stname[] = {
- ". ",
- "Cmd",
- "dat",
- "c2s",
- "sts",
- "clr",
- "crs",
- "Sen",
- "fin"
-};
-
struct ub_scsi_cmd {
unsigned char cdb[UB_MAX_CDB_SIZE];
unsigned char cdb_len;
unsigned char dir; /* 0 - none, 1 - read, 3 - write. */
- unsigned char trace_index;
enum ub_scsi_cmd_state state;
unsigned int tag;
struct ub_scsi_cmd *next;
unsigned int bshift; /* Shift between 512 and hard sects */
};
-/*
- * The SCSI command tracing structure.
- */
-
-#define SCMD_ST_HIST_SZ 8
-#define SCMD_TRACE_SZ 63 /* Less than 4KB of 61-byte lines */
-
-struct ub_scsi_cmd_trace {
- int hcur;
- unsigned int tag;
- unsigned int req_size, act_size;
- unsigned char op;
- unsigned char dir;
- unsigned char key, asc, ascq;
- char st_hst[SCMD_ST_HIST_SZ];
-};
-
-struct ub_scsi_trace {
- int cur;
- struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ];
-};
-
/*
* This is a direct take-off from linux/include/completion.h
* The difference is that I do not wait on this thing, just poll.
int changed; /* Media was changed */
int removable;
int readonly;
- int first_open; /* Kludge. See ub_bd_open. */
struct ub_request urq;
* The USB device instance.
*/
struct ub_dev {
- spinlock_t lock;
+ spinlock_t *lock;
atomic_t poison; /* The USB device is disconnected */
int openc; /* protected by ub_lock! */
/* kref is too implicit for our taste */
wait_queue_head_t reset_wait;
int sg_stat[6];
- struct ub_scsi_trace tr;
};
/*
static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
int stalled_pipe);
static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
-static void ub_reset_enter(struct ub_dev *sc);
+static void ub_reset_enter(struct ub_dev *sc, int try);
static void ub_reset_task(void *arg);
static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun);
static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun,
struct ub_capacity *ret);
+static int ub_sync_reset(struct ub_dev *sc);
+static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe);
static int ub_probe_lun(struct ub_dev *sc, int lnum);
/*
#define UB_MAX_HOSTS 26
static char ub_hostv[UB_MAX_HOSTS];
-static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
-
-/*
- * The SCSI command tracing procedures.
- */
+#define UB_QLOCK_NUM 5
+static spinlock_t ub_qlockv[UB_QLOCK_NUM];
+static int ub_qlock_next = 0;
-static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
-{
- int n;
- struct ub_scsi_cmd_trace *t;
-
- if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0;
- t = &sc->tr.vec[n];
-
- memset(t, 0, sizeof(struct ub_scsi_cmd_trace));
- t->tag = cmd->tag;
- t->op = cmd->cdb[0];
- t->dir = cmd->dir;
- t->req_size = cmd->len;
- t->st_hst[0] = cmd->state;
-
- sc->tr.cur = n;
- cmd->trace_index = n;
-}
-
-static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
-{
- int n;
- struct ub_scsi_cmd_trace *t;
-
- t = &sc->tr.vec[cmd->trace_index];
- if (t->tag == cmd->tag) {
- if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0;
- t->st_hst[n] = cmd->state;
- t->hcur = n;
- }
-}
-
-static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
-{
- struct ub_scsi_cmd_trace *t;
-
- t = &sc->tr.vec[cmd->trace_index];
- if (t->tag == cmd->tag)
- t->act_size = cmd->act_len;
-}
-
-static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
- unsigned char *sense)
-{
- struct ub_scsi_cmd_trace *t;
-
- t = &sc->tr.vec[cmd->trace_index];
- if (t->tag == cmd->tag) {
- t->key = sense[2] & 0x0F;
- t->asc = sense[12];
- t->ascq = sense[13];
- }
-}
-
-static ssize_t ub_diag_show(struct device *dev, struct device_attribute *attr,
- char *page)
-{
- struct usb_interface *intf;
- struct ub_dev *sc;
- struct list_head *p;
- struct ub_lun *lun;
- int cnt;
- unsigned long flags;
- int nc, nh;
- int i, j;
- struct ub_scsi_cmd_trace *t;
-
- intf = to_usb_interface(dev);
- sc = usb_get_intfdata(intf);
- if (sc == NULL)
- return 0;
-
- cnt = 0;
- spin_lock_irqsave(&sc->lock, flags);
-
- cnt += sprintf(page + cnt,
- "poison %d reset %d\n",
- atomic_read(&sc->poison), sc->reset);
- cnt += sprintf(page + cnt,
- "qlen %d qmax %d\n",
- sc->cmd_queue.qlen, sc->cmd_queue.qmax);
- cnt += sprintf(page + cnt,
- "sg %d %d %d %d %d .. %d\n",
- sc->sg_stat[0],
- sc->sg_stat[1],
- sc->sg_stat[2],
- sc->sg_stat[3],
- sc->sg_stat[4],
- sc->sg_stat[5]);
-
- list_for_each (p, &sc->luns) {
- lun = list_entry(p, struct ub_lun, link);
- cnt += sprintf(page + cnt,
- "lun %u changed %d removable %d readonly %d\n",
- lun->num, lun->changed, lun->removable, lun->readonly);
- }
-
- if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0;
- for (j = 0; j < SCMD_TRACE_SZ; j++) {
- t = &sc->tr.vec[nc];
-
- cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op);
- if (t->op == REQUEST_SENSE) {
- cnt += sprintf(page + cnt, " [sense %x %02x %02x]",
- t->key, t->asc, t->ascq);
- } else {
- cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir));
- cnt += sprintf(page + cnt, " [%5d %5d]",
- t->req_size, t->act_size);
- }
- if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0;
- for (i = 0; i < SCMD_ST_HIST_SZ; i++) {
- cnt += sprintf(page + cnt, " %s",
- ub_scsi_cmd_stname[(int)t->st_hst[nh]]);
- if (++nh == SCMD_ST_HIST_SZ) nh = 0;
- }
- cnt += sprintf(page + cnt, "\n");
-
- if (++nc == SCMD_TRACE_SZ) nc = 0;
- }
-
- spin_unlock_irqrestore(&sc->lock, flags);
- return cnt;
-}
-
-static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */
+static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */
/*
* The id allocator.
spin_unlock_irqrestore(&ub_lock, flags);
}
+/*
+ * This is necessitated by the fact that blk_cleanup_queue does not
+ * necesserily destroy the queue. Instead, it may merely decrease q->refcnt.
+ * Since our blk_init_queue() passes a spinlock common with ub_dev,
+ * we have life time issues when ub_cleanup frees ub_dev.
+ */
+static spinlock_t *ub_next_lock(void)
+{
+ unsigned long flags;
+ spinlock_t *ret;
+
+ spin_lock_irqsave(&ub_lock, flags);
+ ret = &ub_qlockv[ub_qlock_next];
+ ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM;
+ spin_unlock_irqrestore(&ub_lock, flags);
+ return ret;
+}
+
/*
* Downcount for deallocation. This rides on two assumptions:
* - once something is poisoned, its refcount cannot grow
if (atomic_read(&sc->poison))
return -ENXIO;
- ub_reset_enter(sc);
+ ub_reset_enter(sc, urq->current_try);
if (urq->current_try >= 3)
return -EIO;
* No exceptions.
*
* Host is assumed locked.
- *
- * XXX We only support Bulk for the moment.
*/
static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
{
add_timer(&sc->work_timer);
cmd->state = UB_CMDST_CMD;
- ub_cmdtr_state(sc, cmd);
return 0;
}
struct ub_dev *sc = (struct ub_dev *) arg;
unsigned long flags;
- spin_lock_irqsave(&sc->lock, flags);
- usb_unlink_urb(&sc->work_urb);
- spin_unlock_irqrestore(&sc->lock, flags);
+ spin_lock_irqsave(sc->lock, flags);
+ if (!ub_is_completed(&sc->work_done))
+ usb_unlink_urb(&sc->work_urb);
+ spin_unlock_irqrestore(sc->lock, flags);
}
/*
struct ub_dev *sc = (struct ub_dev *) _dev;
unsigned long flags;
- spin_lock_irqsave(&sc->lock, flags);
- del_timer(&sc->work_timer);
+ spin_lock_irqsave(sc->lock, flags);
ub_scsi_dispatch(sc);
- spin_unlock_irqrestore(&sc->lock, flags);
+ spin_unlock_irqrestore(sc->lock, flags);
}
static void ub_scsi_dispatch(struct ub_dev *sc)
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
} else if (cmd->state == UB_CMDST_INIT) {
- ub_cmdtr_new(sc, cmd);
if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
break;
cmd->error = rc;
cmd->state = UB_CMDST_DONE;
- ub_cmdtr_state(sc, cmd);
} else {
if (!ub_is_completed(&sc->work_done))
break;
+ del_timer(&sc->work_timer);
ub_scsi_urb_compl(sc, cmd);
}
}
return;
}
cmd->state = UB_CMDST_CLEAR;
- ub_cmdtr_state(sc, cmd);
return;
case -ESHUTDOWN: /* unplug */
case -EILSEQ: /* unplug timeout on uhci */
return;
}
cmd->state = UB_CMDST_CLR2STS;
- ub_cmdtr_state(sc, cmd);
return;
}
if (urb->status == -EOVERFLOW) {
if (urb->status != 0 ||
len != cmd->sgv[cmd->current_sg].length) {
cmd->act_len += len;
- ub_cmdtr_act_len(sc, cmd);
cmd->error = -EIO;
ub_state_stat(sc, cmd);
}
cmd->act_len += urb->actual_length;
- ub_cmdtr_act_len(sc, cmd);
if (++cmd->current_sg < cmd->nsg) {
ub_data_start(sc, cmd);
cmd->error = -EIO; /* A cheap trick... */
cmd->state = UB_CMDST_CLRRS;
- ub_cmdtr_state(sc, cmd);
return;
}
return;
}
cmd->state = UB_CMDST_DONE;
- ub_cmdtr_state(sc, cmd);
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
add_timer(&sc->work_timer);
cmd->state = UB_CMDST_DATA;
- ub_cmdtr_state(sc, cmd);
}
/*
cmd->error = rc;
cmd->state = UB_CMDST_DONE;
- ub_cmdtr_state(sc, cmd);
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
}
cmd->stat_count = 0;
cmd->state = UB_CMDST_STAT;
- ub_cmdtr_state(sc, cmd);
}
/*
return;
cmd->state = UB_CMDST_STAT;
- ub_cmdtr_state(sc, cmd);
}
/*
scmd->tag = sc->tagcnt++;
cmd->state = UB_CMDST_SENSE;
- ub_cmdtr_state(sc, cmd);
ub_cmdq_insert(sc, scmd);
return;
unsigned char *sense = sc->top_sense;
struct ub_scsi_cmd *cmd;
- /*
- * Ignoring scmd->act_len, because the buffer was pre-zeroed.
- */
- ub_cmdtr_sense(sc, scmd, sense);
-
/*
* Find the command which triggered the unit attention or a check,
* save the sense into it, and advance its state machine.
return;
}
+ /*
+ * Ignoring scmd->act_len, because the buffer was pre-zeroed.
+ */
cmd->key = sense[2] & 0x0F;
cmd->asc = sense[12];
cmd->ascq = sense[13];
/*
* Reset management
+ * XXX Move usb_reset_device to khubd. Hogging kevent is not a good thing.
+ * XXX Make usb_sync_reset asynchronous.
*/
-static void ub_reset_enter(struct ub_dev *sc)
+static void ub_reset_enter(struct ub_dev *sc, int try)
{
if (sc->reset) {
/* This happens often on multi-LUN devices. */
return;
}
- sc->reset = 1;
+ sc->reset = try + 1;
#if 0 /* Not needed because the disconnect waits for us. */
unsigned long flags;
if (atomic_read(&sc->poison)) {
printk(KERN_NOTICE "%s: Not resetting disconnected device\n",
sc->name); /* P3 This floods. Remove soon. XXX */
+ } else if ((sc->reset & 1) == 0) {
+ ub_sync_reset(sc);
+ msleep(700); /* usb-storage sleeps 6s (!) */
+ ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
+ ub_probe_clear_stall(sc, sc->send_bulk_pipe);
} else if (sc->dev->actconfig->desc.bNumInterfaces != 1) {
printk(KERN_NOTICE "%s: Not resetting multi-interface device\n",
sc->name); /* P3 This floods. Remove soon. XXX */
* queues of resets or anything. We do need a spinlock though,
* to interact with block layer.
*/
- spin_lock_irqsave(&sc->lock, flags);
+ spin_lock_irqsave(sc->lock, flags);
sc->reset = 0;
tasklet_schedule(&sc->tasklet);
list_for_each(p, &sc->luns) {
blk_start_queue(lun->disk->queue);
}
wake_up(&sc->reset_wait);
- spin_unlock_irqrestore(&sc->lock, flags);
+ spin_unlock_irqrestore(sc->lock, flags);
}
/*
sc->openc++;
spin_unlock_irqrestore(&ub_lock, flags);
- /*
- * This is a workaround for a specific problem in our block layer.
- * In 2.6.9, register_disk duplicates the code from rescan_partitions.
- * However, if we do add_disk with a device which persistently reports
- * a changed media, add_disk calls register_disk, which does do_open,
- * which will call rescan_paritions for changed media. After that,
- * register_disk attempts to do it all again and causes double kobject
- * registration and a eventually an oops on module removal.
- *
- * The bottom line is, Al Viro says that we should not allow
- * bdev->bd_invalidated to be set when doing add_disk no matter what.
- */
- if (lun->first_open) {
- lun->first_open = 0;
- if (lun->changed) {
- rc = -ENOMEDIUM;
- goto err_open;
- }
- }
-
if (lun->removable || lun->readonly)
check_disk_change(inode->i_bdev);
init_completion(&compl);
rc = -ENOMEM;
- if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
+ if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
- memset(cmd, 0, ALLOC_SIZE);
cmd->cdb[0] = TEST_UNIT_READY;
cmd->cdb_len = 6;
cmd->done = ub_probe_done;
cmd->back = &compl;
- spin_lock_irqsave(&sc->lock, flags);
+ spin_lock_irqsave(sc->lock, flags);
cmd->tag = sc->tagcnt++;
rc = ub_submit_scsi(sc, cmd);
- spin_unlock_irqrestore(&sc->lock, flags);
+ spin_unlock_irqrestore(sc->lock, flags);
if (rc != 0) {
printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */
init_completion(&compl);
rc = -ENOMEM;
- if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
+ if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
goto err_alloc;
- memset(cmd, 0, ALLOC_SIZE);
p = (char *)cmd + sizeof(struct ub_scsi_cmd);
cmd->cdb[0] = 0x25;
cmd->done = ub_probe_done;
cmd->back = &compl;
- spin_lock_irqsave(&sc->lock, flags);
+ spin_lock_irqsave(sc->lock, flags);
cmd->tag = sc->tagcnt++;
rc = ub_submit_scsi(sc, cmd);
- spin_unlock_irqrestore(&sc->lock, flags);
+ spin_unlock_irqrestore(sc->lock, flags);
if (rc != 0) {
printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */
complete(cop);
}
+/*
+ * Reset with a Bulk reset.
+ */
+static int ub_sync_reset(struct ub_dev *sc)
+{
+ int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber;
+ struct usb_ctrlrequest *cr;
+ struct completion compl;
+ struct timer_list timer;
+ int rc;
+
+ init_completion(&compl);
+
+ cr = &sc->work_cr;
+ cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE;
+ cr->bRequest = US_BULK_RESET_REQUEST;
+ cr->wValue = cpu_to_le16(0);
+ cr->wIndex = cpu_to_le16(ifnum);
+ cr->wLength = cpu_to_le16(0);
+
+ usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
+ (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
+ sc->work_urb.actual_length = 0;
+ sc->work_urb.error_count = 0;
+ sc->work_urb.status = 0;
+
+ if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
+ printk(KERN_WARNING
+ "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc);
+ return rc;
+ }
+
+ init_timer(&timer);
+ timer.function = ub_probe_timeout;
+ timer.data = (unsigned long) &compl;
+ timer.expires = jiffies + UB_CTRL_TIMEOUT;
+ add_timer(&timer);
+
+ wait_for_completion(&compl);
+
+ del_timer_sync(&timer);
+ usb_kill_urb(&sc->work_urb);
+
+ return sc->work_urb.status;
+}
+
/*
* Get number of LUNs by the way of Bulk GetMaxLUN command.
*/
return -ENXIO;
rc = -ENOMEM;
- if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
+ if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
goto err_core;
- memset(sc, 0, sizeof(struct ub_dev));
- spin_lock_init(&sc->lock);
+ sc->lock = ub_next_lock();
INIT_LIST_HEAD(&sc->luns);
usb_init_urb(&sc->work_urb);
tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
if (ub_get_pipes(sc, sc->dev, intf) != 0)
goto err_dev_desc;
- if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0)
- goto err_diag;
-
/*
* At this point, all USB initialization is done, do upper layer.
* We really hate halfway initialized structures, so from the
nluns = 1;
for (i = 0; i < 3; i++) {
- if ((rc = ub_sync_getmaxlun(sc)) < 0) {
- /*
- * This segment is taken from usb-storage. They say
- * that ZIP-100 needs this, but my own ZIP-100 works
- * fine without this.
- * Still, it does not seem to hurt anything.
- */
- if (rc == -EPIPE) {
- ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
- ub_probe_clear_stall(sc, sc->send_bulk_pipe);
- }
+ if ((rc = ub_sync_getmaxlun(sc)) < 0)
break;
- }
if (rc != 0) {
nluns = rc;
break;
}
return 0;
- /* device_remove_file(&sc->intf->dev, &dev_attr_diag); */
-err_diag:
err_dev_desc:
usb_set_intfdata(intf, NULL);
// usb_put_intf(sc->intf);
int rc;
rc = -ENOMEM;
- if ((lun = kmalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
+ if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL)
goto err_alloc;
- memset(lun, 0, sizeof(struct ub_lun));
lun->num = lnum;
rc = -ENOSR;
lun->removable = 1; /* XXX Query this from the device */
lun->changed = 1; /* ub_revalidate clears only */
- lun->first_open = 1;
ub_revalidate(sc, lun);
rc = -ENOMEM;
disk->driverfs_dev = &sc->intf->dev;
rc = -ENOMEM;
- if ((q = blk_init_queue(ub_request_fn, &sc->lock)) == NULL)
+ if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL)
goto err_blkqinit;
disk->queue = q;
* and the whole queue drains. So, we just use this code to
* print warnings.
*/
- spin_lock_irqsave(&sc->lock, flags);
+ spin_lock_irqsave(sc->lock, flags);
{
struct ub_scsi_cmd *cmd;
int cnt = 0;
while ((cmd = ub_cmdq_peek(sc)) != NULL) {
cmd->error = -ENOTCONN;
cmd->state = UB_CMDST_DONE;
- ub_cmdtr_state(sc, cmd);
ub_cmdq_pop(sc);
(*cmd->done)(sc, cmd);
cnt++;
"%d was queued after shutdown\n", sc->name, cnt);
}
}
- spin_unlock_irqrestore(&sc->lock, flags);
+ spin_unlock_irqrestore(sc->lock, flags);
/*
* Unregister the upper layer.
}
/*
- * Taking a lock on a structure which is about to be freed
- * is very nonsensual. Here it is largely a way to do a debug freeze,
- * and a bracket which shows where the nonsensual code segment ends.
- *
* Testing for -EINPROGRESS is always a bug, so we are bending
* the rules a little.
*/
- spin_lock_irqsave(&sc->lock, flags);
+ spin_lock_irqsave(sc->lock, flags);
if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */
printk(KERN_WARNING "%s: "
"URB is active after disconnect\n", sc->name);
}
- spin_unlock_irqrestore(&sc->lock, flags);
+ spin_unlock_irqrestore(sc->lock, flags);
/*
* There is virtually no chance that other CPU runs times so long
* and no URBs left in transit.
*/
- device_remove_file(&sc->intf->dev, &dev_attr_diag);
usb_set_intfdata(intf, NULL);
// usb_put_intf(sc->intf);
sc->intf = NULL;
static int __init ub_init(void)
{
int rc;
+ int i;
+
+ for (i = 0; i < UB_QLOCK_NUM; i++)
+ spin_lock_init(&ub_qlockv[i]);
if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
goto err_regblkdev;