1 // SPDX-License-Identifier: GPL-2.0-only
3 * ms_block.c - Sony MemoryStick (legacy) storage support
7 * Minor portions of the driver were copied from mspro_block.c which is
10 #define DRIVER_NAME "ms_block"
11 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
13 #include <linux/module.h>
14 #include <linux/blk-mq.h>
15 #include <linux/memstick.h>
16 #include <linux/idr.h>
17 #include <linux/hdreg.h>
18 #include <linux/delay.h>
19 #include <linux/slab.h>
20 #include <linux/random.h>
21 #include <linux/bitmap.h>
22 #include <linux/scatterlist.h>
23 #include <linux/jiffies.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
29 static int cache_flush_timeout = 1000;
30 static bool verify_writes;
33 * Copies section of 'sg_from' starting from offset 'offset' and with length
34 * 'len' To another scatterlist of to_nents enties
36 static size_t msb_sg_copy(struct scatterlist *sg_from,
37 struct scatterlist *sg_to, int to_nents, size_t offset, size_t len)
42 if (offset >= sg_from->length) {
43 if (sg_is_last(sg_from))
46 offset -= sg_from->length;
47 sg_from = sg_next(sg_from);
51 copied = min(len, sg_from->length - offset);
52 sg_set_page(sg_to, sg_page(sg_from),
53 copied, sg_from->offset + offset);
58 if (sg_is_last(sg_from) || !len)
61 sg_to = sg_next(sg_to);
63 sg_from = sg_next(sg_from);
66 while (len > sg_from->length && to_nents--) {
67 len -= sg_from->length;
68 copied += sg_from->length;
70 sg_set_page(sg_to, sg_page(sg_from),
71 sg_from->length, sg_from->offset);
73 if (sg_is_last(sg_from) || !len)
76 sg_from = sg_next(sg_from);
77 sg_to = sg_next(sg_to);
80 if (len && to_nents) {
81 sg_set_page(sg_to, sg_page(sg_from), len, sg_from->offset);
90 * Compares section of 'sg' starting from offset 'offset' and with length 'len'
91 * to linear buffer of length 'len' at address 'buffer'
92 * Returns 0 if equal and -1 otherwice
94 static int msb_sg_compare_to_buffer(struct scatterlist *sg,
95 size_t offset, u8 *buffer, size_t len)
97 int retval = 0, cmplen;
98 struct sg_mapping_iter miter;
100 sg_miter_start(&miter, sg, sg_nents(sg),
101 SG_MITER_ATOMIC | SG_MITER_FROM_SG);
103 while (sg_miter_next(&miter) && len > 0) {
104 if (offset >= miter.length) {
105 offset -= miter.length;
109 cmplen = min(miter.length - offset, len);
110 retval = memcmp(miter.addr + offset, buffer, cmplen) ? -1 : 0;
122 sg_miter_stop(&miter);
127 /* Get zone at which block with logical address 'lba' lives
128 * Flash is broken into zones.
129 * Each zone consists of 512 eraseblocks, out of which in first
130 * zone 494 are used and 496 are for all following zones.
131 * Therefore zone #0 hosts blocks 0-493, zone #1 blocks 494-988, etc...
133 static int msb_get_zone_from_lba(int lba)
137 return ((lba - 494) / 496) + 1;
140 /* Get zone of physical block. Trivial */
141 static int msb_get_zone_from_pba(int pba)
143 return pba / MS_BLOCKS_IN_ZONE;
146 /* Debug test to validate free block counts */
147 static int msb_validate_used_block_bitmap(struct msb_data *msb)
149 int total_free_blocks = 0;
155 for (i = 0; i < msb->zone_count; i++)
156 total_free_blocks += msb->free_block_count[i];
158 if (msb->block_count - bitmap_weight(msb->used_blocks_bitmap,
159 msb->block_count) == total_free_blocks)
162 pr_err("BUG: free block counts don't match the bitmap");
163 msb->read_only = true;
167 /* Mark physical block as used */
168 static void msb_mark_block_used(struct msb_data *msb, int pba)
170 int zone = msb_get_zone_from_pba(pba);
172 if (test_bit(pba, msb->used_blocks_bitmap)) {
174 "BUG: attempt to mark already used pba %d as used", pba);
175 msb->read_only = true;
179 if (msb_validate_used_block_bitmap(msb))
182 /* No races because all IO is single threaded */
183 __set_bit(pba, msb->used_blocks_bitmap);
184 msb->free_block_count[zone]--;
187 /* Mark physical block as free */
188 static void msb_mark_block_unused(struct msb_data *msb, int pba)
190 int zone = msb_get_zone_from_pba(pba);
192 if (!test_bit(pba, msb->used_blocks_bitmap)) {
193 pr_err("BUG: attempt to mark already unused pba %d as unused" , pba);
194 msb->read_only = true;
198 if (msb_validate_used_block_bitmap(msb))
201 /* No races because all IO is single threaded */
202 __clear_bit(pba, msb->used_blocks_bitmap);
203 msb->free_block_count[zone]++;
206 /* Invalidate current register window */
207 static void msb_invalidate_reg_window(struct msb_data *msb)
209 msb->reg_addr.w_offset = offsetof(struct ms_register, id);
210 msb->reg_addr.w_length = sizeof(struct ms_id_register);
211 msb->reg_addr.r_offset = offsetof(struct ms_register, id);
212 msb->reg_addr.r_length = sizeof(struct ms_id_register);
213 msb->addr_valid = false;
216 /* Start a state machine */
217 static int msb_run_state_machine(struct msb_data *msb, int (*state_func)
218 (struct memstick_dev *card, struct memstick_request **req))
220 struct memstick_dev *card = msb->card;
222 WARN_ON(msb->state != -1);
223 msb->int_polling = false;
227 memset(&card->current_mrq, 0, sizeof(card->current_mrq));
229 card->next_request = state_func;
230 memstick_new_req(card->host);
231 wait_for_completion(&card->mrq_complete);
233 WARN_ON(msb->state != -1);
234 return msb->exit_error;
237 /* State machines call that to exit */
238 static int msb_exit_state_machine(struct msb_data *msb, int error)
240 WARN_ON(msb->state == -1);
243 msb->exit_error = error;
244 msb->card->next_request = h_msb_default_bad;
246 /* Invalidate reg window on errors */
248 msb_invalidate_reg_window(msb);
250 complete(&msb->card->mrq_complete);
254 /* read INT register */
255 static int msb_read_int_reg(struct msb_data *msb, long timeout)
257 struct memstick_request *mrq = &msb->card->current_mrq;
259 WARN_ON(msb->state == -1);
261 if (!msb->int_polling) {
262 msb->int_timeout = jiffies +
263 msecs_to_jiffies(timeout == -1 ? 500 : timeout);
264 msb->int_polling = true;
265 } else if (time_after(jiffies, msb->int_timeout)) {
266 mrq->data[0] = MEMSTICK_INT_CMDNAK;
270 if ((msb->caps & MEMSTICK_CAP_AUTO_GET_INT) &&
271 mrq->need_card_int && !mrq->error) {
272 mrq->data[0] = mrq->int_reg;
273 mrq->need_card_int = false;
276 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
281 /* Read a register */
282 static int msb_read_regs(struct msb_data *msb, int offset, int len)
284 struct memstick_request *req = &msb->card->current_mrq;
286 if (msb->reg_addr.r_offset != offset ||
287 msb->reg_addr.r_length != len || !msb->addr_valid) {
289 msb->reg_addr.r_offset = offset;
290 msb->reg_addr.r_length = len;
291 msb->addr_valid = true;
293 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
294 &msb->reg_addr, sizeof(msb->reg_addr));
298 memstick_init_req(req, MS_TPC_READ_REG, NULL, len);
302 /* Write a card register */
303 static int msb_write_regs(struct msb_data *msb, int offset, int len, void *buf)
305 struct memstick_request *req = &msb->card->current_mrq;
307 if (msb->reg_addr.w_offset != offset ||
308 msb->reg_addr.w_length != len || !msb->addr_valid) {
310 msb->reg_addr.w_offset = offset;
311 msb->reg_addr.w_length = len;
312 msb->addr_valid = true;
314 memstick_init_req(req, MS_TPC_SET_RW_REG_ADRS,
315 &msb->reg_addr, sizeof(msb->reg_addr));
319 memstick_init_req(req, MS_TPC_WRITE_REG, buf, len);
323 /* Handler for absence of IO */
324 static int h_msb_default_bad(struct memstick_dev *card,
325 struct memstick_request **mrq)
331 * This function is a handler for reads of one page from device.
332 * Writes output to msb->current_sg, takes sector address from msb->reg.param
333 * Can also be used to read extra data only. Set params accordintly.
335 static int h_msb_read_page(struct memstick_dev *card,
336 struct memstick_request **out_mrq)
338 struct msb_data *msb = memstick_get_drvdata(card);
339 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
340 struct scatterlist sg[2];
344 dbg("read_page, unknown error");
345 return msb_exit_state_machine(msb, mrq->error);
348 switch (msb->state) {
349 case MSB_RP_SEND_BLOCK_ADDRESS:
350 /* msb_write_regs sometimes "fails" because it needs to update
351 * the reg window, and thus it returns request for that.
352 * Then we stay in this state and retry
354 if (!msb_write_regs(msb,
355 offsetof(struct ms_register, param),
356 sizeof(struct ms_param_register),
357 (unsigned char *)&msb->regs.param))
360 msb->state = MSB_RP_SEND_READ_COMMAND;
363 case MSB_RP_SEND_READ_COMMAND:
364 command = MS_CMD_BLOCK_READ;
365 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
366 msb->state = MSB_RP_SEND_INT_REQ;
369 case MSB_RP_SEND_INT_REQ:
370 msb->state = MSB_RP_RECEIVE_INT_REQ_RESULT;
371 /* If dont actually need to send the int read request (only in
372 * serial mode), then just fall through
374 if (msb_read_int_reg(msb, -1))
378 case MSB_RP_RECEIVE_INT_REQ_RESULT:
379 intreg = mrq->data[0];
380 msb->regs.status.interrupt = intreg;
382 if (intreg & MEMSTICK_INT_CMDNAK)
383 return msb_exit_state_machine(msb, -EIO);
385 if (!(intreg & MEMSTICK_INT_CED)) {
386 msb->state = MSB_RP_SEND_INT_REQ;
390 msb->int_polling = false;
391 msb->state = (intreg & MEMSTICK_INT_ERR) ?
392 MSB_RP_SEND_READ_STATUS_REG : MSB_RP_SEND_OOB_READ;
395 case MSB_RP_SEND_READ_STATUS_REG:
396 /* read the status register to understand source of the INT_ERR */
397 if (!msb_read_regs(msb,
398 offsetof(struct ms_register, status),
399 sizeof(struct ms_status_register)))
402 msb->state = MSB_RP_RECEIVE_STATUS_REG;
405 case MSB_RP_RECEIVE_STATUS_REG:
406 msb->regs.status = *(struct ms_status_register *)mrq->data;
407 msb->state = MSB_RP_SEND_OOB_READ;
410 case MSB_RP_SEND_OOB_READ:
411 if (!msb_read_regs(msb,
412 offsetof(struct ms_register, extra_data),
413 sizeof(struct ms_extra_data_register)))
416 msb->state = MSB_RP_RECEIVE_OOB_READ;
419 case MSB_RP_RECEIVE_OOB_READ:
420 msb->regs.extra_data =
421 *(struct ms_extra_data_register *) mrq->data;
422 msb->state = MSB_RP_SEND_READ_DATA;
425 case MSB_RP_SEND_READ_DATA:
426 /* Skip that state if we only read the oob */
427 if (msb->regs.param.cp == MEMSTICK_CP_EXTRA) {
428 msb->state = MSB_RP_RECEIVE_READ_DATA;
432 sg_init_table(sg, ARRAY_SIZE(sg));
433 msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
434 msb->current_sg_offset,
437 memstick_init_req_sg(mrq, MS_TPC_READ_LONG_DATA, sg);
438 msb->state = MSB_RP_RECEIVE_READ_DATA;
441 case MSB_RP_RECEIVE_READ_DATA:
442 if (!(msb->regs.status.interrupt & MEMSTICK_INT_ERR)) {
443 msb->current_sg_offset += msb->page_size;
444 return msb_exit_state_machine(msb, 0);
447 if (msb->regs.status.status1 & MEMSTICK_UNCORR_ERROR) {
448 dbg("read_page: uncorrectable error");
449 return msb_exit_state_machine(msb, -EBADMSG);
452 if (msb->regs.status.status1 & MEMSTICK_CORR_ERROR) {
453 dbg("read_page: correctable error");
454 msb->current_sg_offset += msb->page_size;
455 return msb_exit_state_machine(msb, -EUCLEAN);
457 dbg("read_page: INT error, but no status error bits");
458 return msb_exit_state_machine(msb, -EIO);
466 * Handler of writes of exactly one block.
467 * Takes address from msb->regs.param.
468 * Writes same extra data to blocks, also taken
469 * from msb->regs.extra
470 * Returns -EBADMSG if write fails due to uncorrectable error, or -EIO if
471 * device refuses to take the command or something else
473 static int h_msb_write_block(struct memstick_dev *card,
474 struct memstick_request **out_mrq)
476 struct msb_data *msb = memstick_get_drvdata(card);
477 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
478 struct scatterlist sg[2];
482 return msb_exit_state_machine(msb, mrq->error);
485 switch (msb->state) {
487 /* HACK: Jmicon handling of TPCs between 8 and
488 * sizeof(memstick_request.data) is broken due to hardware
489 * bug in PIO mode that is used for these TPCs
490 * Therefore split the write
493 case MSB_WB_SEND_WRITE_PARAMS:
494 if (!msb_write_regs(msb,
495 offsetof(struct ms_register, param),
496 sizeof(struct ms_param_register),
500 msb->state = MSB_WB_SEND_WRITE_OOB;
503 case MSB_WB_SEND_WRITE_OOB:
504 if (!msb_write_regs(msb,
505 offsetof(struct ms_register, extra_data),
506 sizeof(struct ms_extra_data_register),
507 &msb->regs.extra_data))
509 msb->state = MSB_WB_SEND_WRITE_COMMAND;
513 case MSB_WB_SEND_WRITE_COMMAND:
514 command = MS_CMD_BLOCK_WRITE;
515 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
516 msb->state = MSB_WB_SEND_INT_REQ;
519 case MSB_WB_SEND_INT_REQ:
520 msb->state = MSB_WB_RECEIVE_INT_REQ;
521 if (msb_read_int_reg(msb, -1))
525 case MSB_WB_RECEIVE_INT_REQ:
526 intreg = mrq->data[0];
527 msb->regs.status.interrupt = intreg;
529 /* errors mean out of here, and fast... */
530 if (intreg & (MEMSTICK_INT_CMDNAK))
531 return msb_exit_state_machine(msb, -EIO);
533 if (intreg & MEMSTICK_INT_ERR)
534 return msb_exit_state_machine(msb, -EBADMSG);
537 /* for last page we need to poll CED */
538 if (msb->current_page == msb->pages_in_block) {
539 if (intreg & MEMSTICK_INT_CED)
540 return msb_exit_state_machine(msb, 0);
541 msb->state = MSB_WB_SEND_INT_REQ;
546 /* for non-last page we need BREQ before writing next chunk */
547 if (!(intreg & MEMSTICK_INT_BREQ)) {
548 msb->state = MSB_WB_SEND_INT_REQ;
552 msb->int_polling = false;
553 msb->state = MSB_WB_SEND_WRITE_DATA;
556 case MSB_WB_SEND_WRITE_DATA:
557 sg_init_table(sg, ARRAY_SIZE(sg));
559 if (msb_sg_copy(msb->current_sg, sg, ARRAY_SIZE(sg),
560 msb->current_sg_offset,
561 msb->page_size) < msb->page_size)
562 return msb_exit_state_machine(msb, -EIO);
564 memstick_init_req_sg(mrq, MS_TPC_WRITE_LONG_DATA, sg);
565 mrq->need_card_int = 1;
566 msb->state = MSB_WB_RECEIVE_WRITE_CONFIRMATION;
569 case MSB_WB_RECEIVE_WRITE_CONFIRMATION:
571 msb->current_sg_offset += msb->page_size;
572 msb->state = MSB_WB_SEND_INT_REQ;
582 * This function is used to send simple IO requests to device that consist
583 * of register write + command
585 static int h_msb_send_command(struct memstick_dev *card,
586 struct memstick_request **out_mrq)
588 struct msb_data *msb = memstick_get_drvdata(card);
589 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
593 dbg("send_command: unknown error");
594 return msb_exit_state_machine(msb, mrq->error);
597 switch (msb->state) {
599 /* HACK: see h_msb_write_block */
600 case MSB_SC_SEND_WRITE_PARAMS: /* write param register*/
601 if (!msb_write_regs(msb,
602 offsetof(struct ms_register, param),
603 sizeof(struct ms_param_register),
606 msb->state = MSB_SC_SEND_WRITE_OOB;
609 case MSB_SC_SEND_WRITE_OOB:
610 if (!msb->command_need_oob) {
611 msb->state = MSB_SC_SEND_COMMAND;
615 if (!msb_write_regs(msb,
616 offsetof(struct ms_register, extra_data),
617 sizeof(struct ms_extra_data_register),
618 &msb->regs.extra_data))
621 msb->state = MSB_SC_SEND_COMMAND;
624 case MSB_SC_SEND_COMMAND:
625 memstick_init_req(mrq, MS_TPC_SET_CMD, &msb->command_value, 1);
626 msb->state = MSB_SC_SEND_INT_REQ;
629 case MSB_SC_SEND_INT_REQ:
630 msb->state = MSB_SC_RECEIVE_INT_REQ;
631 if (msb_read_int_reg(msb, -1))
635 case MSB_SC_RECEIVE_INT_REQ:
636 intreg = mrq->data[0];
638 if (intreg & MEMSTICK_INT_CMDNAK)
639 return msb_exit_state_machine(msb, -EIO);
640 if (intreg & MEMSTICK_INT_ERR)
641 return msb_exit_state_machine(msb, -EBADMSG);
643 if (!(intreg & MEMSTICK_INT_CED)) {
644 msb->state = MSB_SC_SEND_INT_REQ;
648 return msb_exit_state_machine(msb, 0);
654 /* Small handler for card reset */
655 static int h_msb_reset(struct memstick_dev *card,
656 struct memstick_request **out_mrq)
658 u8 command = MS_CMD_RESET;
659 struct msb_data *msb = memstick_get_drvdata(card);
660 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
663 return msb_exit_state_machine(msb, mrq->error);
665 switch (msb->state) {
667 memstick_init_req(mrq, MS_TPC_SET_CMD, &command, 1);
668 mrq->need_card_int = 0;
669 msb->state = MSB_RS_CONFIRM;
672 return msb_exit_state_machine(msb, 0);
677 /* This handler is used to do serial->parallel switch */
678 static int h_msb_parallel_switch(struct memstick_dev *card,
679 struct memstick_request **out_mrq)
681 struct msb_data *msb = memstick_get_drvdata(card);
682 struct memstick_request *mrq = *out_mrq = &card->current_mrq;
683 struct memstick_host *host = card->host;
686 dbg("parallel_switch: error");
687 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
688 return msb_exit_state_machine(msb, mrq->error);
691 switch (msb->state) {
692 case MSB_PS_SEND_SWITCH_COMMAND:
693 /* Set the parallel interface on memstick side */
694 msb->regs.param.system |= MEMSTICK_SYS_PAM;
696 if (!msb_write_regs(msb,
697 offsetof(struct ms_register, param),
699 (unsigned char *)&msb->regs.param))
702 msb->state = MSB_PS_SWICH_HOST;
705 case MSB_PS_SWICH_HOST:
706 /* Set parallel interface on our side + send a dummy request
707 * to see if card responds
709 host->set_param(host, MEMSTICK_INTERFACE, MEMSTICK_PAR4);
710 memstick_init_req(mrq, MS_TPC_GET_INT, NULL, 1);
711 msb->state = MSB_PS_CONFIRM;
715 return msb_exit_state_machine(msb, 0);
721 static int msb_switch_to_parallel(struct msb_data *msb);
723 /* Reset the card, to guard against hw errors beeing treated as bad blocks */
724 static int msb_reset(struct msb_data *msb, bool full)
727 bool was_parallel = msb->regs.param.system & MEMSTICK_SYS_PAM;
728 struct memstick_dev *card = msb->card;
729 struct memstick_host *host = card->host;
733 msb->regs.param.system = MEMSTICK_SYS_BAMD;
736 error = host->set_param(host,
737 MEMSTICK_POWER, MEMSTICK_POWER_OFF);
741 msb_invalidate_reg_window(msb);
743 error = host->set_param(host,
744 MEMSTICK_POWER, MEMSTICK_POWER_ON);
748 error = host->set_param(host,
749 MEMSTICK_INTERFACE, MEMSTICK_SERIAL);
752 dbg("Failed to reset the host controller");
753 msb->read_only = true;
758 error = msb_run_state_machine(msb, h_msb_reset);
760 dbg("Failed to reset the card");
761 msb->read_only = true;
765 /* Set parallel mode */
767 msb_switch_to_parallel(msb);
771 /* Attempts to switch interface to parallel mode */
772 static int msb_switch_to_parallel(struct msb_data *msb)
776 error = msb_run_state_machine(msb, h_msb_parallel_switch);
778 pr_err("Switch to parallel failed");
779 msb->regs.param.system &= ~MEMSTICK_SYS_PAM;
780 msb_reset(msb, true);
784 msb->caps |= MEMSTICK_CAP_AUTO_GET_INT;
788 /* Changes overwrite flag on a page */
789 static int msb_set_overwrite_flag(struct msb_data *msb,
790 u16 pba, u8 page, u8 flag)
795 msb->regs.param.block_address = cpu_to_be16(pba);
796 msb->regs.param.page_address = page;
797 msb->regs.param.cp = MEMSTICK_CP_OVERWRITE;
798 msb->regs.extra_data.overwrite_flag = flag;
799 msb->command_value = MS_CMD_BLOCK_WRITE;
800 msb->command_need_oob = true;
802 dbg_verbose("changing overwrite flag to %02x for sector %d, page %d",
804 return msb_run_state_machine(msb, h_msb_send_command);
807 static int msb_mark_bad(struct msb_data *msb, int pba)
809 pr_notice("marking pba %d as bad", pba);
810 msb_reset(msb, true);
811 return msb_set_overwrite_flag(
812 msb, pba, 0, 0xFF & ~MEMSTICK_OVERWRITE_BKST);
815 static int msb_mark_page_bad(struct msb_data *msb, int pba, int page)
817 dbg("marking page %d of pba %d as bad", page, pba);
818 msb_reset(msb, true);
819 return msb_set_overwrite_flag(msb,
820 pba, page, ~MEMSTICK_OVERWRITE_PGST0);
823 /* Erases one physical block */
824 static int msb_erase_block(struct msb_data *msb, u16 pba)
831 dbg_verbose("erasing pba %d", pba);
833 for (try = 1; try < 3; try++) {
834 msb->regs.param.block_address = cpu_to_be16(pba);
835 msb->regs.param.page_address = 0;
836 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
837 msb->command_value = MS_CMD_BLOCK_ERASE;
838 msb->command_need_oob = false;
841 error = msb_run_state_machine(msb, h_msb_send_command);
842 if (!error || msb_reset(msb, true))
847 pr_err("erase failed, marking pba %d as bad", pba);
848 msb_mark_bad(msb, pba);
851 dbg_verbose("erase success, marking pba %d as unused", pba);
852 msb_mark_block_unused(msb, pba);
853 __set_bit(pba, msb->erased_blocks_bitmap);
857 /* Reads one page from device */
858 static int msb_read_page(struct msb_data *msb,
859 u16 pba, u8 page, struct ms_extra_data_register *extra,
860 struct scatterlist *sg, int offset)
864 if (pba == MS_BLOCK_INVALID) {
866 struct sg_mapping_iter miter;
867 size_t len = msb->page_size;
869 dbg_verbose("read unmapped sector. returning 0xFF");
871 local_irq_save(flags);
872 sg_miter_start(&miter, sg, sg_nents(sg),
873 SG_MITER_ATOMIC | SG_MITER_TO_SG);
875 while (sg_miter_next(&miter) && len > 0) {
879 if (offset && offset >= miter.length) {
880 offset -= miter.length;
884 chunklen = min(miter.length - offset, len);
885 memset(miter.addr + offset, 0xFF, chunklen);
890 sg_miter_stop(&miter);
891 local_irq_restore(flags);
897 memset(extra, 0xFF, sizeof(*extra));
901 if (pba >= msb->block_count) {
902 pr_err("BUG: attempt to read beyond the end of the card at pba %d", pba);
906 for (try = 1; try < 3; try++) {
907 msb->regs.param.block_address = cpu_to_be16(pba);
908 msb->regs.param.page_address = page;
909 msb->regs.param.cp = MEMSTICK_CP_PAGE;
911 msb->current_sg = sg;
912 msb->current_sg_offset = offset;
913 error = msb_run_state_machine(msb, h_msb_read_page);
916 if (error == -EUCLEAN) {
917 pr_notice("correctable error on pba %d, page %d",
923 *extra = msb->regs.extra_data;
925 if (!error || msb_reset(msb, true))
931 if (error == -EBADMSG) {
932 pr_err("uncorrectable error on read of pba %d, page %d",
935 if (msb->regs.extra_data.overwrite_flag &
936 MEMSTICK_OVERWRITE_PGST0)
937 msb_mark_page_bad(msb, pba, page);
942 pr_err("read of pba %d, page %d failed with error %d",
947 /* Reads oob of page only */
948 static int msb_read_oob(struct msb_data *msb, u16 pba, u16 page,
949 struct ms_extra_data_register *extra)
954 msb->regs.param.block_address = cpu_to_be16(pba);
955 msb->regs.param.page_address = page;
956 msb->regs.param.cp = MEMSTICK_CP_EXTRA;
958 if (pba > msb->block_count) {
959 pr_err("BUG: attempt to read beyond the end of card at pba %d", pba);
963 error = msb_run_state_machine(msb, h_msb_read_page);
964 *extra = msb->regs.extra_data;
966 if (error == -EUCLEAN) {
967 pr_notice("correctable error on pba %d, page %d",
975 /* Reads a block and compares it with data contained in scatterlist orig_sg */
976 static int msb_verify_block(struct msb_data *msb, u16 pba,
977 struct scatterlist *orig_sg, int offset)
979 struct scatterlist sg;
982 sg_init_one(&sg, msb->block_buffer, msb->block_size);
984 while (page < msb->pages_in_block) {
986 error = msb_read_page(msb, pba, page,
987 NULL, &sg, page * msb->page_size);
993 if (msb_sg_compare_to_buffer(orig_sg, offset,
994 msb->block_buffer, msb->block_size))
999 /* Writes exectly one block + oob */
1000 static int msb_write_block(struct msb_data *msb,
1001 u16 pba, u32 lba, struct scatterlist *sg, int offset)
1003 int error, current_try = 1;
1005 BUG_ON(sg->length < msb->page_size);
1010 if (pba == MS_BLOCK_INVALID) {
1012 "BUG: write: attempt to write MS_BLOCK_INVALID block");
1016 if (pba >= msb->block_count || lba >= msb->logical_block_count) {
1018 "BUG: write: attempt to write beyond the end of device");
1022 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1023 pr_err("BUG: write: lba zone mismatch");
1027 if (pba == msb->boot_block_locations[0] ||
1028 pba == msb->boot_block_locations[1]) {
1029 pr_err("BUG: write: attempt to write to boot blocks!");
1038 msb->regs.param.cp = MEMSTICK_CP_BLOCK;
1039 msb->regs.param.page_address = 0;
1040 msb->regs.param.block_address = cpu_to_be16(pba);
1042 msb->regs.extra_data.management_flag = 0xFF;
1043 msb->regs.extra_data.overwrite_flag = 0xF8;
1044 msb->regs.extra_data.logical_address = cpu_to_be16(lba);
1046 msb->current_sg = sg;
1047 msb->current_sg_offset = offset;
1048 msb->current_page = 0;
1050 error = msb_run_state_machine(msb, h_msb_write_block);
1052 /* Sector we just wrote to is assumed erased since its pba
1053 * was erased. If it wasn't erased, write will succeed
1054 * and will just clear the bits that were set in the block
1055 * thus test that what we have written,
1056 * matches what we expect.
1057 * We do trust the blocks that we erased
1059 if (!error && (verify_writes ||
1060 !test_bit(pba, msb->erased_blocks_bitmap)))
1061 error = msb_verify_block(msb, pba, sg, offset);
1066 if (current_try > 1 || msb_reset(msb, true))
1069 pr_err("write failed, trying to erase the pba %d", pba);
1070 error = msb_erase_block(msb, pba);
1079 /* Finds a free block for write replacement */
1080 static u16 msb_get_free_block(struct msb_data *msb, int zone)
1083 int pba = zone * MS_BLOCKS_IN_ZONE;
1086 get_random_bytes(&pos, sizeof(pos));
1088 if (!msb->free_block_count[zone]) {
1089 pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
1090 msb->read_only = true;
1091 return MS_BLOCK_INVALID;
1094 pos %= msb->free_block_count[zone];
1096 dbg_verbose("have %d choices for a free block, selected randomly: %d",
1097 msb->free_block_count[zone], pos);
1099 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1100 msb->block_count, pba);
1101 for (i = 0; i < pos; ++i)
1102 pba = find_next_zero_bit(msb->used_blocks_bitmap,
1103 msb->block_count, pba + 1);
1105 dbg_verbose("result of the free blocks scan: pba %d", pba);
1107 if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
1108 pr_err("BUG: can't get a free block");
1109 msb->read_only = true;
1110 return MS_BLOCK_INVALID;
1113 msb_mark_block_used(msb, pba);
1117 static int msb_update_block(struct msb_data *msb, u16 lba,
1118 struct scatterlist *sg, int offset)
1123 pba = msb->lba_to_pba_table[lba];
1124 dbg_verbose("start of a block update at lba %d, pba %d", lba, pba);
1126 if (pba != MS_BLOCK_INVALID) {
1127 dbg_verbose("setting the update flag on the block");
1128 msb_set_overwrite_flag(msb, pba, 0,
1129 0xFF & ~MEMSTICK_OVERWRITE_UDST);
1132 for (try = 0; try < 3; try++) {
1133 new_pba = msb_get_free_block(msb,
1134 msb_get_zone_from_lba(lba));
1136 if (new_pba == MS_BLOCK_INVALID) {
1141 dbg_verbose("block update: writing updated block to the pba %d",
1143 error = msb_write_block(msb, new_pba, lba, sg, offset);
1144 if (error == -EBADMSG) {
1145 msb_mark_bad(msb, new_pba);
1152 dbg_verbose("block update: erasing the old block");
1153 msb_erase_block(msb, pba);
1154 msb->lba_to_pba_table[lba] = new_pba;
1159 pr_err("block update error after %d tries, switching to r/o mode", try);
1160 msb->read_only = true;
1165 /* Converts endiannes in the boot block for easy use */
1166 static void msb_fix_boot_page_endianness(struct ms_boot_page *p)
1168 p->header.block_id = be16_to_cpu(p->header.block_id);
1169 p->header.format_reserved = be16_to_cpu(p->header.format_reserved);
1170 p->entry.disabled_block.start_addr
1171 = be32_to_cpu(p->entry.disabled_block.start_addr);
1172 p->entry.disabled_block.data_size
1173 = be32_to_cpu(p->entry.disabled_block.data_size);
1174 p->entry.cis_idi.start_addr
1175 = be32_to_cpu(p->entry.cis_idi.start_addr);
1176 p->entry.cis_idi.data_size
1177 = be32_to_cpu(p->entry.cis_idi.data_size);
1178 p->attr.block_size = be16_to_cpu(p->attr.block_size);
1179 p->attr.number_of_blocks = be16_to_cpu(p->attr.number_of_blocks);
1180 p->attr.number_of_effective_blocks
1181 = be16_to_cpu(p->attr.number_of_effective_blocks);
1182 p->attr.page_size = be16_to_cpu(p->attr.page_size);
1183 p->attr.memory_manufacturer_code
1184 = be16_to_cpu(p->attr.memory_manufacturer_code);
1185 p->attr.memory_device_code = be16_to_cpu(p->attr.memory_device_code);
1186 p->attr.implemented_capacity
1187 = be16_to_cpu(p->attr.implemented_capacity);
1188 p->attr.controller_number = be16_to_cpu(p->attr.controller_number);
1189 p->attr.controller_function = be16_to_cpu(p->attr.controller_function);
1192 static int msb_read_boot_blocks(struct msb_data *msb)
1195 struct scatterlist sg;
1196 struct ms_extra_data_register extra;
1197 struct ms_boot_page *page;
1199 msb->boot_block_locations[0] = MS_BLOCK_INVALID;
1200 msb->boot_block_locations[1] = MS_BLOCK_INVALID;
1201 msb->boot_block_count = 0;
1203 dbg_verbose("Start of a scan for the boot blocks");
1205 if (!msb->boot_page) {
1206 page = kmalloc_array(2, sizeof(struct ms_boot_page),
1211 msb->boot_page = page;
1213 page = msb->boot_page;
1215 msb->block_count = MS_BLOCK_MAX_BOOT_ADDR;
1217 for (pba = 0; pba < MS_BLOCK_MAX_BOOT_ADDR; pba++) {
1219 sg_init_one(&sg, page, sizeof(*page));
1220 if (msb_read_page(msb, pba, 0, &extra, &sg, 0)) {
1221 dbg("boot scan: can't read pba %d", pba);
1225 if (extra.management_flag & MEMSTICK_MANAGEMENT_SYSFLG) {
1226 dbg("management flag doesn't indicate boot block %d",
1231 if (be16_to_cpu(page->header.block_id) != MS_BLOCK_BOOT_ID) {
1232 dbg("the pba at %d doesn't contain boot block ID", pba);
1236 msb_fix_boot_page_endianness(page);
1237 msb->boot_block_locations[msb->boot_block_count] = pba;
1240 msb->boot_block_count++;
1242 if (msb->boot_block_count == 2)
1246 if (!msb->boot_block_count) {
1247 pr_err("media doesn't contain master page, aborting");
1251 dbg_verbose("End of scan for boot blocks");
1255 static int msb_read_bad_block_table(struct msb_data *msb, int block_nr)
1257 struct ms_boot_page *boot_block;
1258 struct scatterlist sg;
1262 int data_size, data_offset, page, page_offset, size_to_read;
1265 BUG_ON(block_nr > 1);
1266 boot_block = &msb->boot_page[block_nr];
1267 pba = msb->boot_block_locations[block_nr];
1269 if (msb->boot_block_locations[block_nr] == MS_BLOCK_INVALID)
1272 data_size = boot_block->entry.disabled_block.data_size;
1273 data_offset = sizeof(struct ms_boot_page) +
1274 boot_block->entry.disabled_block.start_addr;
1278 page = data_offset / msb->page_size;
1279 page_offset = data_offset % msb->page_size;
1281 DIV_ROUND_UP(data_size + page_offset, msb->page_size) *
1284 dbg("reading bad block of boot block at pba %d, offset %d len %d",
1285 pba, data_offset, data_size);
1287 buffer = kzalloc(size_to_read, GFP_KERNEL);
1291 /* Read the buffer */
1292 sg_init_one(&sg, buffer, size_to_read);
1294 while (offset < size_to_read) {
1295 error = msb_read_page(msb, pba, page, NULL, &sg, offset);
1300 offset += msb->page_size;
1302 if (page == msb->pages_in_block) {
1304 "bad block table extends beyond the boot block");
1309 /* Process the bad block table */
1310 for (i = page_offset; i < data_size / sizeof(u16); i++) {
1312 u16 bad_block = be16_to_cpu(buffer[i]);
1314 if (bad_block >= msb->block_count) {
1315 dbg("bad block table contains invalid block %d",
1320 if (test_bit(bad_block, msb->used_blocks_bitmap)) {
1321 dbg("duplicate bad block %d in the table",
1326 dbg("block %d is marked as factory bad", bad_block);
1327 msb_mark_block_used(msb, bad_block);
1334 static int msb_ftl_initialize(struct msb_data *msb)
1338 if (msb->ftl_initialized)
1341 msb->zone_count = msb->block_count / MS_BLOCKS_IN_ZONE;
1342 msb->logical_block_count = msb->zone_count * 496 - 2;
1344 msb->used_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1345 msb->erased_blocks_bitmap = bitmap_zalloc(msb->block_count, GFP_KERNEL);
1346 msb->lba_to_pba_table =
1347 kmalloc_array(msb->logical_block_count, sizeof(u16),
1350 if (!msb->used_blocks_bitmap || !msb->lba_to_pba_table ||
1351 !msb->erased_blocks_bitmap) {
1352 bitmap_free(msb->used_blocks_bitmap);
1353 bitmap_free(msb->erased_blocks_bitmap);
1354 kfree(msb->lba_to_pba_table);
1358 for (i = 0; i < msb->zone_count; i++)
1359 msb->free_block_count[i] = MS_BLOCKS_IN_ZONE;
1361 memset(msb->lba_to_pba_table, MS_BLOCK_INVALID,
1362 msb->logical_block_count * sizeof(u16));
1364 dbg("initial FTL tables created. Zone count = %d, Logical block count = %d",
1365 msb->zone_count, msb->logical_block_count);
1367 msb->ftl_initialized = true;
1371 static int msb_ftl_scan(struct msb_data *msb)
1373 u16 pba, lba, other_block;
1374 u8 overwrite_flag, management_flag, other_overwrite_flag;
1376 struct ms_extra_data_register extra;
1377 u8 *overwrite_flags = kzalloc(msb->block_count, GFP_KERNEL);
1379 if (!overwrite_flags)
1382 dbg("Start of media scanning");
1383 for (pba = 0; pba < msb->block_count; pba++) {
1385 if (pba == msb->boot_block_locations[0] ||
1386 pba == msb->boot_block_locations[1]) {
1387 dbg_verbose("pba %05d -> [boot block]", pba);
1388 msb_mark_block_used(msb, pba);
1392 if (test_bit(pba, msb->used_blocks_bitmap)) {
1393 dbg_verbose("pba %05d -> [factory bad]", pba);
1397 memset(&extra, 0, sizeof(extra));
1398 error = msb_read_oob(msb, pba, 0, &extra);
1400 /* can't trust the page if we can't read the oob */
1401 if (error == -EBADMSG) {
1403 "oob of pba %d damaged, will try to erase it", pba);
1404 msb_mark_block_used(msb, pba);
1405 msb_erase_block(msb, pba);
1408 pr_err("unknown error %d on read of oob of pba %d - aborting",
1411 kfree(overwrite_flags);
1415 lba = be16_to_cpu(extra.logical_address);
1416 management_flag = extra.management_flag;
1417 overwrite_flag = extra.overwrite_flag;
1418 overwrite_flags[pba] = overwrite_flag;
1420 /* Skip bad blocks */
1421 if (!(overwrite_flag & MEMSTICK_OVERWRITE_BKST)) {
1422 dbg("pba %05d -> [BAD]", pba);
1423 msb_mark_block_used(msb, pba);
1427 /* Skip system/drm blocks */
1428 if ((management_flag & MEMSTICK_MANAGEMENT_FLAG_NORMAL) !=
1429 MEMSTICK_MANAGEMENT_FLAG_NORMAL) {
1430 dbg("pba %05d -> [reserved management flag %02x]",
1431 pba, management_flag);
1432 msb_mark_block_used(msb, pba);
1436 /* Erase temporary tables */
1437 if (!(management_flag & MEMSTICK_MANAGEMENT_ATFLG)) {
1438 dbg("pba %05d -> [temp table] - will erase", pba);
1440 msb_mark_block_used(msb, pba);
1441 msb_erase_block(msb, pba);
1445 if (lba == MS_BLOCK_INVALID) {
1446 dbg_verbose("pba %05d -> [free]", pba);
1450 msb_mark_block_used(msb, pba);
1452 /* Block has LBA not according to zoning*/
1453 if (msb_get_zone_from_lba(lba) != msb_get_zone_from_pba(pba)) {
1454 pr_notice("pba %05d -> [bad lba %05d] - will erase",
1456 msb_erase_block(msb, pba);
1460 /* No collisions - great */
1461 if (msb->lba_to_pba_table[lba] == MS_BLOCK_INVALID) {
1462 dbg_verbose("pba %05d -> [lba %05d]", pba, lba);
1463 msb->lba_to_pba_table[lba] = pba;
1467 other_block = msb->lba_to_pba_table[lba];
1468 other_overwrite_flag = overwrite_flags[other_block];
1470 pr_notice("Collision between pba %d and pba %d",
1473 if (!(overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1474 pr_notice("pba %d is marked as stable, use it", pba);
1475 msb_erase_block(msb, other_block);
1476 msb->lba_to_pba_table[lba] = pba;
1480 if (!(other_overwrite_flag & MEMSTICK_OVERWRITE_UDST)) {
1481 pr_notice("pba %d is marked as stable, use it",
1483 msb_erase_block(msb, pba);
1487 pr_notice("collision between blocks %d and %d, without stable flag set on both, erasing pba %d",
1488 pba, other_block, other_block);
1490 msb_erase_block(msb, other_block);
1491 msb->lba_to_pba_table[lba] = pba;
1494 dbg("End of media scanning");
1495 kfree(overwrite_flags);
1499 static void msb_cache_flush_timer(struct timer_list *t)
1501 struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
1503 msb->need_flush_cache = true;
1504 queue_work(msb->io_queue, &msb->io_work);
1508 static void msb_cache_discard(struct msb_data *msb)
1510 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1513 del_timer_sync(&msb->cache_flush_timer);
1515 dbg_verbose("Discarding the write cache");
1516 msb->cache_block_lba = MS_BLOCK_INVALID;
1517 bitmap_zero(&msb->valid_cache_bitmap, msb->pages_in_block);
1520 static int msb_cache_init(struct msb_data *msb)
1522 timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
1525 msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
1529 msb_cache_discard(msb);
1533 static int msb_cache_flush(struct msb_data *msb)
1535 struct scatterlist sg;
1536 struct ms_extra_data_register extra;
1537 int page, offset, error;
1543 if (msb->cache_block_lba == MS_BLOCK_INVALID)
1546 lba = msb->cache_block_lba;
1547 pba = msb->lba_to_pba_table[lba];
1549 dbg_verbose("Flushing the write cache of pba %d (LBA %d)",
1550 pba, msb->cache_block_lba);
1552 sg_init_one(&sg, msb->cache , msb->block_size);
1554 /* Read all missing pages in cache */
1555 for (page = 0; page < msb->pages_in_block; page++) {
1557 if (test_bit(page, &msb->valid_cache_bitmap))
1560 offset = page * msb->page_size;
1562 dbg_verbose("reading non-present sector %d of cache block %d",
1564 error = msb_read_page(msb, pba, page, &extra, &sg, offset);
1566 /* Bad pages are copied with 00 page status */
1567 if (error == -EBADMSG) {
1568 pr_err("read error on sector %d, contents probably damaged", page);
1575 if ((extra.overwrite_flag & MEMSTICK_OV_PG_NORMAL) !=
1576 MEMSTICK_OV_PG_NORMAL) {
1577 dbg("page %d is marked as bad", page);
1581 set_bit(page, &msb->valid_cache_bitmap);
1584 /* Write the cache now */
1585 error = msb_update_block(msb, msb->cache_block_lba, &sg, 0);
1586 pba = msb->lba_to_pba_table[msb->cache_block_lba];
1588 /* Mark invalid pages */
1590 for (page = 0; page < msb->pages_in_block; page++) {
1592 if (test_bit(page, &msb->valid_cache_bitmap))
1595 dbg("marking page %d as containing damaged data",
1597 msb_set_overwrite_flag(msb,
1598 pba , page, 0xFF & ~MEMSTICK_OV_PG_NORMAL);
1602 msb_cache_discard(msb);
1606 static int msb_cache_write(struct msb_data *msb, int lba,
1607 int page, bool add_to_cache_only, struct scatterlist *sg, int offset)
1610 struct scatterlist sg_tmp[10];
1615 if (msb->cache_block_lba == MS_BLOCK_INVALID ||
1616 lba != msb->cache_block_lba)
1617 if (add_to_cache_only)
1620 /* If we need to write different block */
1621 if (msb->cache_block_lba != MS_BLOCK_INVALID &&
1622 lba != msb->cache_block_lba) {
1623 dbg_verbose("first flush the cache");
1624 error = msb_cache_flush(msb);
1629 if (msb->cache_block_lba == MS_BLOCK_INVALID) {
1630 msb->cache_block_lba = lba;
1631 mod_timer(&msb->cache_flush_timer,
1632 jiffies + msecs_to_jiffies(cache_flush_timeout));
1635 dbg_verbose("Write of LBA %d page %d to cache ", lba, page);
1637 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1638 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp), offset, msb->page_size);
1640 sg_copy_to_buffer(sg_tmp, sg_nents(sg_tmp),
1641 msb->cache + page * msb->page_size, msb->page_size);
1643 set_bit(page, &msb->valid_cache_bitmap);
1647 static int msb_cache_read(struct msb_data *msb, int lba,
1648 int page, struct scatterlist *sg, int offset)
1650 int pba = msb->lba_to_pba_table[lba];
1651 struct scatterlist sg_tmp[10];
1654 if (lba == msb->cache_block_lba &&
1655 test_bit(page, &msb->valid_cache_bitmap)) {
1657 dbg_verbose("Read of LBA %d (pba %d) sector %d from cache",
1660 sg_init_table(sg_tmp, ARRAY_SIZE(sg_tmp));
1661 msb_sg_copy(sg, sg_tmp, ARRAY_SIZE(sg_tmp),
1662 offset, msb->page_size);
1663 sg_copy_from_buffer(sg_tmp, sg_nents(sg_tmp),
1664 msb->cache + msb->page_size * page,
1667 dbg_verbose("Read of LBA %d (pba %d) sector %d from device",
1670 error = msb_read_page(msb, pba, page, NULL, sg, offset);
1674 msb_cache_write(msb, lba, page, true, sg, offset);
1679 /* Emulated geometry table
1680 * This table content isn't that importaint,
1681 * One could put here different values, providing that they still
1683 * 64 MB entry is what windows reports for my 64M memstick
1686 static const struct chs_entry chs_table[] = {
1687 /* size sectors cylynders heads */
1693 {128, 16, 991, 16 },
1697 /* Load information about the card */
1698 static int msb_init_card(struct memstick_dev *card)
1700 struct msb_data *msb = memstick_get_drvdata(card);
1701 struct memstick_host *host = card->host;
1702 struct ms_boot_page *boot_block;
1703 int error = 0, i, raw_size_in_megs;
1707 if (card->id.class >= MEMSTICK_CLASS_ROM &&
1708 card->id.class <= MEMSTICK_CLASS_ROM)
1709 msb->read_only = true;
1712 error = msb_reset(msb, false);
1716 /* Due to a bug in Jmicron driver written by Alex Dubov,
1717 * its serial mode barely works,
1718 * so we switch to parallel mode right away
1720 if (host->caps & MEMSTICK_CAP_PAR4)
1721 msb_switch_to_parallel(msb);
1723 msb->page_size = sizeof(struct ms_boot_page);
1725 /* Read the boot page */
1726 error = msb_read_boot_blocks(msb);
1730 boot_block = &msb->boot_page[0];
1732 /* Save intersting attributes from boot page */
1733 msb->block_count = boot_block->attr.number_of_blocks;
1734 msb->page_size = boot_block->attr.page_size;
1736 msb->pages_in_block = boot_block->attr.block_size * 2;
1737 msb->block_size = msb->page_size * msb->pages_in_block;
1739 if ((size_t)msb->page_size > PAGE_SIZE) {
1740 /* this isn't supported by linux at all, anyway*/
1741 dbg("device page %d size isn't supported", msb->page_size);
1745 msb->block_buffer = kzalloc(msb->block_size, GFP_KERNEL);
1746 if (!msb->block_buffer)
1749 raw_size_in_megs = (msb->block_size * msb->block_count) >> 20;
1751 for (i = 0; chs_table[i].size; i++) {
1753 if (chs_table[i].size != raw_size_in_megs)
1756 msb->geometry.cylinders = chs_table[i].cyl;
1757 msb->geometry.heads = chs_table[i].head;
1758 msb->geometry.sectors = chs_table[i].sec;
1762 if (boot_block->attr.transfer_supporting == 1)
1763 msb->caps |= MEMSTICK_CAP_PAR4;
1765 if (boot_block->attr.device_type & 0x03)
1766 msb->read_only = true;
1768 dbg("Total block count = %d", msb->block_count);
1769 dbg("Each block consists of %d pages", msb->pages_in_block);
1770 dbg("Page size = %d bytes", msb->page_size);
1771 dbg("Parallel mode supported: %d", !!(msb->caps & MEMSTICK_CAP_PAR4));
1772 dbg("Read only: %d", msb->read_only);
1775 /* Now we can switch the interface */
1776 if (host->caps & msb->caps & MEMSTICK_CAP_PAR4)
1777 msb_switch_to_parallel(msb);
1780 error = msb_cache_init(msb);
1784 error = msb_ftl_initialize(msb);
1789 /* Read the bad block table */
1790 error = msb_read_bad_block_table(msb, 0);
1792 if (error && error != -ENOMEM) {
1793 dbg("failed to read bad block table from primary boot block, trying from backup");
1794 error = msb_read_bad_block_table(msb, 1);
1800 /* *drum roll* Scan the media */
1801 error = msb_ftl_scan(msb);
1803 pr_err("Scan of media failed");
1811 static int msb_do_write_request(struct msb_data *msb, int lba,
1812 int page, struct scatterlist *sg, size_t len, int *sucessfuly_written)
1816 *sucessfuly_written = 0;
1818 while (offset < len) {
1819 if (page == 0 && len - offset >= msb->block_size) {
1821 if (msb->cache_block_lba == lba)
1822 msb_cache_discard(msb);
1824 dbg_verbose("Writing whole lba %d", lba);
1825 error = msb_update_block(msb, lba, sg, offset);
1829 offset += msb->block_size;
1830 *sucessfuly_written += msb->block_size;
1835 error = msb_cache_write(msb, lba, page, false, sg, offset);
1839 offset += msb->page_size;
1840 *sucessfuly_written += msb->page_size;
1843 if (page == msb->pages_in_block) {
1851 static int msb_do_read_request(struct msb_data *msb, int lba,
1852 int page, struct scatterlist *sg, int len, int *sucessfuly_read)
1856 *sucessfuly_read = 0;
1858 while (offset < len) {
1860 error = msb_cache_read(msb, lba, page, sg, offset);
1864 offset += msb->page_size;
1865 *sucessfuly_read += msb->page_size;
1868 if (page == msb->pages_in_block) {
1876 static void msb_io_work(struct work_struct *work)
1878 struct msb_data *msb = container_of(work, struct msb_data, io_work);
1879 int page, error, len;
1881 struct scatterlist *sg = msb->prealloc_sg;
1882 struct request *req;
1884 dbg_verbose("IO: work started");
1887 spin_lock_irq(&msb->q_lock);
1889 if (msb->need_flush_cache) {
1890 msb->need_flush_cache = false;
1891 spin_unlock_irq(&msb->q_lock);
1892 msb_cache_flush(msb);
1898 dbg_verbose("IO: no more requests exiting");
1899 spin_unlock_irq(&msb->q_lock);
1903 spin_unlock_irq(&msb->q_lock);
1905 /* process the request */
1906 dbg_verbose("IO: processing new request");
1907 blk_rq_map_sg(msb->queue, req, sg);
1909 lba = blk_rq_pos(req);
1911 sector_div(lba, msb->page_size / 512);
1912 page = sector_div(lba, msb->pages_in_block);
1914 if (rq_data_dir(msb->req) == READ)
1915 error = msb_do_read_request(msb, lba, page, sg,
1916 blk_rq_bytes(req), &len);
1918 error = msb_do_write_request(msb, lba, page, sg,
1919 blk_rq_bytes(req), &len);
1921 if (len && !blk_update_request(req, BLK_STS_OK, len)) {
1922 __blk_mq_end_request(req, BLK_STS_OK);
1923 spin_lock_irq(&msb->q_lock);
1925 spin_unlock_irq(&msb->q_lock);
1928 if (error && msb->req) {
1929 blk_status_t ret = errno_to_blk_status(error);
1931 dbg_verbose("IO: ending one sector of the request with error");
1932 blk_mq_end_request(req, ret);
1933 spin_lock_irq(&msb->q_lock);
1935 spin_unlock_irq(&msb->q_lock);
1939 dbg_verbose("IO: request still pending");
1943 static DEFINE_IDR(msb_disk_idr); /*set of used disk numbers */
1944 static DEFINE_MUTEX(msb_disk_lock); /* protects against races in open/release */
1946 static void msb_data_clear(struct msb_data *msb)
1948 kfree(msb->boot_page);
1949 bitmap_free(msb->used_blocks_bitmap);
1950 bitmap_free(msb->erased_blocks_bitmap);
1951 kfree(msb->lba_to_pba_table);
1956 static int msb_bd_getgeo(struct block_device *bdev,
1957 struct hd_geometry *geo)
1959 struct msb_data *msb = bdev->bd_disk->private_data;
1960 *geo = msb->geometry;
1964 static void msb_bd_free_disk(struct gendisk *disk)
1966 struct msb_data *msb = disk->private_data;
1968 mutex_lock(&msb_disk_lock);
1969 idr_remove(&msb_disk_idr, msb->disk_id);
1970 mutex_unlock(&msb_disk_lock);
1975 static blk_status_t msb_queue_rq(struct blk_mq_hw_ctx *hctx,
1976 const struct blk_mq_queue_data *bd)
1978 struct memstick_dev *card = hctx->queue->queuedata;
1979 struct msb_data *msb = memstick_get_drvdata(card);
1980 struct request *req = bd->rq;
1982 dbg_verbose("Submit request");
1984 spin_lock_irq(&msb->q_lock);
1986 if (msb->card_dead) {
1987 dbg("Refusing requests on removed card");
1989 WARN_ON(!msb->io_queue_stopped);
1991 spin_unlock_irq(&msb->q_lock);
1992 blk_mq_start_request(req);
1993 return BLK_STS_IOERR;
1997 spin_unlock_irq(&msb->q_lock);
1998 return BLK_STS_DEV_RESOURCE;
2001 blk_mq_start_request(req);
2004 if (!msb->io_queue_stopped)
2005 queue_work(msb->io_queue, &msb->io_work);
2007 spin_unlock_irq(&msb->q_lock);
2011 static int msb_check_card(struct memstick_dev *card)
2013 struct msb_data *msb = memstick_get_drvdata(card);
2015 return (msb->card_dead == 0);
2018 static void msb_stop(struct memstick_dev *card)
2020 struct msb_data *msb = memstick_get_drvdata(card);
2021 unsigned long flags;
2023 dbg("Stopping all msblock IO");
2025 blk_mq_stop_hw_queues(msb->queue);
2026 spin_lock_irqsave(&msb->q_lock, flags);
2027 msb->io_queue_stopped = true;
2028 spin_unlock_irqrestore(&msb->q_lock, flags);
2030 del_timer_sync(&msb->cache_flush_timer);
2031 flush_workqueue(msb->io_queue);
2033 spin_lock_irqsave(&msb->q_lock, flags);
2035 blk_mq_requeue_request(msb->req, false);
2038 spin_unlock_irqrestore(&msb->q_lock, flags);
2041 static void msb_start(struct memstick_dev *card)
2043 struct msb_data *msb = memstick_get_drvdata(card);
2044 unsigned long flags;
2046 dbg("Resuming IO from msblock");
2048 msb_invalidate_reg_window(msb);
2050 spin_lock_irqsave(&msb->q_lock, flags);
2051 if (!msb->io_queue_stopped || msb->card_dead) {
2052 spin_unlock_irqrestore(&msb->q_lock, flags);
2055 spin_unlock_irqrestore(&msb->q_lock, flags);
2057 /* Kick cache flush anyway, its harmless */
2058 msb->need_flush_cache = true;
2059 msb->io_queue_stopped = false;
2061 blk_mq_start_hw_queues(msb->queue);
2063 queue_work(msb->io_queue, &msb->io_work);
2067 static const struct block_device_operations msb_bdops = {
2068 .owner = THIS_MODULE,
2069 .getgeo = msb_bd_getgeo,
2070 .free_disk = msb_bd_free_disk,
2073 static const struct blk_mq_ops msb_mq_ops = {
2074 .queue_rq = msb_queue_rq,
2077 /* Registers the block device */
2078 static int msb_init_disk(struct memstick_dev *card)
2080 struct msb_data *msb = memstick_get_drvdata(card);
2081 struct queue_limits lim = {
2082 .logical_block_size = msb->page_size,
2083 .max_hw_sectors = MS_BLOCK_MAX_PAGES,
2084 .max_segments = MS_BLOCK_MAX_SEGS,
2085 .max_segment_size = MS_BLOCK_MAX_PAGES * msb->page_size,
2088 unsigned long capacity;
2090 mutex_lock(&msb_disk_lock);
2091 msb->disk_id = idr_alloc(&msb_disk_idr, card, 0, 256, GFP_KERNEL);
2092 mutex_unlock(&msb_disk_lock);
2094 if (msb->disk_id < 0)
2095 return msb->disk_id;
2097 rc = blk_mq_alloc_sq_tag_set(&msb->tag_set, &msb_mq_ops, 2,
2098 BLK_MQ_F_SHOULD_MERGE);
2100 goto out_release_id;
2102 msb->disk = blk_mq_alloc_disk(&msb->tag_set, &lim, card);
2103 if (IS_ERR(msb->disk)) {
2104 rc = PTR_ERR(msb->disk);
2105 goto out_free_tag_set;
2107 msb->queue = msb->disk->queue;
2109 sprintf(msb->disk->disk_name, "msblk%d", msb->disk_id);
2110 msb->disk->fops = &msb_bdops;
2111 msb->disk->private_data = msb;
2113 capacity = msb->pages_in_block * msb->logical_block_count;
2114 capacity *= (msb->page_size / 512);
2115 set_capacity(msb->disk, capacity);
2116 dbg("Set total disk size to %lu sectors", capacity);
2118 msb->io_queue = alloc_ordered_workqueue("ms_block", WQ_MEM_RECLAIM);
2119 if (!msb->io_queue) {
2121 goto out_cleanup_disk;
2124 INIT_WORK(&msb->io_work, msb_io_work);
2125 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2128 set_disk_ro(msb->disk, 1);
2131 rc = device_add_disk(&card->dev, msb->disk, NULL);
2133 goto out_destroy_workqueue;
2137 out_destroy_workqueue:
2138 destroy_workqueue(msb->io_queue);
2140 put_disk(msb->disk);
2142 blk_mq_free_tag_set(&msb->tag_set);
2144 mutex_lock(&msb_disk_lock);
2145 idr_remove(&msb_disk_idr, msb->disk_id);
2146 mutex_unlock(&msb_disk_lock);
2150 static int msb_probe(struct memstick_dev *card)
2152 struct msb_data *msb;
2155 msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2158 memstick_set_drvdata(card, msb);
2160 spin_lock_init(&msb->q_lock);
2162 rc = msb_init_card(card);
2166 rc = msb_init_disk(card);
2168 card->check = msb_check_card;
2169 card->stop = msb_stop;
2170 card->start = msb_start;
2174 memstick_set_drvdata(card, NULL);
2175 msb_data_clear(msb);
2180 static void msb_remove(struct memstick_dev *card)
2182 struct msb_data *msb = memstick_get_drvdata(card);
2183 unsigned long flags;
2185 if (!msb->io_queue_stopped)
2188 dbg("Removing the disk device");
2190 /* Take care of unhandled + new requests from now on */
2191 spin_lock_irqsave(&msb->q_lock, flags);
2192 msb->card_dead = true;
2193 spin_unlock_irqrestore(&msb->q_lock, flags);
2194 blk_mq_start_hw_queues(msb->queue);
2196 /* Remove the disk */
2197 del_gendisk(msb->disk);
2198 blk_mq_free_tag_set(&msb->tag_set);
2201 mutex_lock(&msb_disk_lock);
2202 msb_data_clear(msb);
2203 mutex_unlock(&msb_disk_lock);
2205 put_disk(msb->disk);
2206 memstick_set_drvdata(card, NULL);
2211 static int msb_suspend(struct memstick_dev *card, pm_message_t state)
2217 static int msb_resume(struct memstick_dev *card)
2219 struct msb_data *msb = memstick_get_drvdata(card);
2220 struct msb_data *new_msb = NULL;
2221 bool card_dead = true;
2223 #ifndef CONFIG_MEMSTICK_UNSAFE_RESUME
2224 msb->card_dead = true;
2227 mutex_lock(&card->host->lock);
2229 new_msb = kzalloc(sizeof(struct msb_data), GFP_KERNEL);
2233 new_msb->card = card;
2234 memstick_set_drvdata(card, new_msb);
2235 spin_lock_init(&new_msb->q_lock);
2236 sg_init_table(msb->prealloc_sg, MS_BLOCK_MAX_SEGS+1);
2238 if (msb_init_card(card))
2241 if (msb->block_size != new_msb->block_size)
2244 if (memcmp(msb->boot_page, new_msb->boot_page,
2245 sizeof(struct ms_boot_page)))
2248 if (msb->logical_block_count != new_msb->logical_block_count ||
2249 memcmp(msb->lba_to_pba_table, new_msb->lba_to_pba_table,
2250 msb->logical_block_count))
2253 if (msb->block_count != new_msb->block_count ||
2254 !bitmap_equal(msb->used_blocks_bitmap, new_msb->used_blocks_bitmap,
2261 dbg("Card was removed/replaced during suspend");
2263 msb->card_dead = card_dead;
2264 memstick_set_drvdata(card, msb);
2267 msb_data_clear(new_msb);
2272 mutex_unlock(&card->host->lock);
2277 #define msb_suspend NULL
2278 #define msb_resume NULL
2280 #endif /* CONFIG_PM */
2282 static struct memstick_device_id msb_id_tbl[] = {
2283 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2284 MEMSTICK_CLASS_FLASH},
2286 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2287 MEMSTICK_CLASS_ROM},
2289 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2292 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_LEGACY, MEMSTICK_CATEGORY_STORAGE,
2295 {MEMSTICK_MATCH_ALL, MEMSTICK_TYPE_DUO, MEMSTICK_CATEGORY_STORAGE_DUO,
2296 MEMSTICK_CLASS_DUO},
2299 MODULE_DEVICE_TABLE(memstick, msb_id_tbl);
2302 static struct memstick_driver msb_driver = {
2304 .name = DRIVER_NAME,
2305 .owner = THIS_MODULE
2307 .id_table = msb_id_tbl,
2309 .remove = msb_remove,
2310 .suspend = msb_suspend,
2311 .resume = msb_resume
2314 static int __init msb_init(void)
2316 int rc = memstick_register_driver(&msb_driver);
2319 pr_err("failed to register memstick driver (error %d)\n", rc);
2324 static void __exit msb_exit(void)
2326 memstick_unregister_driver(&msb_driver);
2327 idr_destroy(&msb_disk_idr);
2330 module_init(msb_init);
2331 module_exit(msb_exit);
2333 module_param(cache_flush_timeout, int, S_IRUGO);
2334 MODULE_PARM_DESC(cache_flush_timeout,
2335 "Cache flush timeout in msec (1000 default)");
2336 module_param(debug, int, S_IRUGO | S_IWUSR);
2337 MODULE_PARM_DESC(debug, "Debug level (0-2)");
2339 module_param(verify_writes, bool, S_IRUGO);
2340 MODULE_PARM_DESC(verify_writes, "Read back and check all data that is written");
2342 MODULE_LICENSE("GPL");
2343 MODULE_AUTHOR("Maxim Levitsky");
2344 MODULE_DESCRIPTION("Sony MemoryStick block device driver");