printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL),
inb(HD_CURRENT) & 0xf, inb(HD_SECTOR));
if (CURRENT)
- printk(", sector=%ld", CURRENT->sector);
+ printk(", sector=%ld", blk_rq_pos(CURRENT));
}
printk("\n");
}
if (req != NULL) {
struct hd_i_struct *disk = req->rq_disk->private_data;
if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) {
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
disk->special_op = disk->recalibrate = 1;
} else if (req->errors % RESET_FREQ == 0)
reset = 1;
bad_rw_intr();
hd_request();
return;
+
ok_to_read:
req = CURRENT;
insw(HD_DATA, req->buffer, 256);
- req->sector++;
- req->buffer += 512;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
#ifdef DEBUG
- printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
- req->rq_disk->disk_name, req->sector, req->nr_sectors,
- req->buffer+512);
+ printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
+ req->rq_disk->disk_name, blk_rq_pos(req) + 1,
+ blk_rq_sectors(req) - 1, req->buffer+512);
#endif
- if (req->current_nr_sectors <= 0)
- end_request(req, 1);
- if (i > 0) {
+ if (__blk_end_request(req, 0, 512)) {
SET_HANDLER(&read_intr);
return;
}
+
(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (elv_next_request(QUEUE))
- hd_request();
- return;
+ hd_request();
}
static void write_intr(void)
continue;
if (!OK_STATUS(i))
break;
- if ((req->nr_sectors <= 1) || (i & DRQ_STAT))
+ if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT))
goto ok_to_write;
} while (--retries > 0);
dump_status("write_intr", i);
bad_rw_intr();
hd_request();
return;
+
ok_to_write:
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += 512;
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- end_request(req, 1);
- if (i > 0) {
+ if (__blk_end_request(req, 0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
- local_irq_enable();
- } else {
+ return;
+ }
+
#if (HD_DELAY > 0)
- last_req = read_timer();
+ last_req = read_timer();
#endif
- hd_request();
- }
- return;
+ hd_request();
}
static void recal_intr(void)
if (!CURRENT)
return;
- disable_irq(HD_IRQ);
- local_irq_enable();
+ spin_lock_irq(hd_queue->queue_lock);
reset = 1;
name = CURRENT->rq_disk->disk_name;
printk("%s: timeout\n", name);
#ifdef DEBUG
printk("%s: too many errors\n", name);
#endif
- end_request(CURRENT, 0);
+ __blk_end_request_cur(CURRENT, -EIO);
}
- local_irq_disable();
hd_request();
- enable_irq(HD_IRQ);
+ spin_unlock_irq(hd_queue->queue_lock);
}
static int do_special_op(struct hd_i_struct *disk, struct request *req)
}
if (disk->head > 16) {
printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
}
disk->special_op = 0;
return 1;
return;
repeat:
del_timer(&device_timer);
- local_irq_enable();
req = CURRENT;
if (!req) {
}
if (reset) {
- local_irq_disable();
reset_hd();
return;
}
disk = req->rq_disk->private_data;
- block = req->sector;
- nsect = req->nr_sectors;
+ block = blk_rq_pos(req);
+ nsect = blk_rq_sectors(req);
if (block >= get_capacity(req->rq_disk) ||
((block+nsect) > get_capacity(req->rq_disk))) {
printk("%s: bad access: block=%d, count=%d\n",
req->rq_disk->disk_name, block, nsect);
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
goto repeat;
}
break;
default:
printk("unknown hd-command\n");
- end_request(req, 0);
+ __blk_end_request_cur(req, -EIO);
break;
}
}
static void do_hd_request(struct request_queue *q)
{
- disable_irq(HD_IRQ);
hd_request();
- enable_irq(HD_IRQ);
}
static int hd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
void (*handler)(void) = do_hd;
+ spin_lock(hd_queue->queue_lock);
+
do_hd = NULL;
del_timer(&device_timer);
if (!handler)
handler = unexpected_hd_interrupt;
handler();
- local_irq_enable();
+
+ spin_unlock(hd_queue->queue_lock);
+
return IRQ_HANDLED;
}