Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
io context: fix ref counting
block: make the end_io functions be non-GPL exports
block: fix improper kobject release in blk_integrity_unregister
block: always assign default lock to queues
mg_disk: Add missing ready status check on mg_write()
mg_disk: fix issue with data integrity on error in mg_write()
mg_disk: fix reading invalid status when use polling driver
mg_disk: remove prohibited sleep operation

+72 -60
+6 -13
block/blk-core.c
··· 575 575 return NULL; 576 576 } 577 577 578 - /* 579 - * if caller didn't supply a lock, they get per-queue locking with 580 - * our embedded lock 581 - */ 582 - if (!lock) 583 - lock = &q->__queue_lock; 584 - 585 578 q->request_fn = rfn; 586 579 q->prep_rq_fn = NULL; 587 580 q->unplug_fn = generic_unplug_device; ··· 2136 2143 { 2137 2144 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2138 2145 } 2139 - EXPORT_SYMBOL_GPL(blk_end_request); 2146 + EXPORT_SYMBOL(blk_end_request); 2140 2147 2141 2148 /** 2142 2149 * blk_end_request_all - Helper function for drives to finish the request. ··· 2157 2164 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2158 2165 BUG_ON(pending); 2159 2166 } 2160 - EXPORT_SYMBOL_GPL(blk_end_request_all); 2167 + EXPORT_SYMBOL(blk_end_request_all); 2161 2168 2162 2169 /** 2163 2170 * blk_end_request_cur - Helper function to finish the current request chunk. ··· 2175 2182 { 2176 2183 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2177 2184 } 2178 - EXPORT_SYMBOL_GPL(blk_end_request_cur); 2185 + EXPORT_SYMBOL(blk_end_request_cur); 2179 2186 2180 2187 /** 2181 2188 * __blk_end_request - Helper function for drivers to complete the request. ··· 2194 2201 { 2195 2202 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2196 2203 } 2197 - EXPORT_SYMBOL_GPL(__blk_end_request); 2204 + EXPORT_SYMBOL(__blk_end_request); 2198 2205 2199 2206 /** 2200 2207 * __blk_end_request_all - Helper function for drives to finish the request. ··· 2215 2222 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2216 2223 BUG_ON(pending); 2217 2224 } 2218 - EXPORT_SYMBOL_GPL(__blk_end_request_all); 2225 + EXPORT_SYMBOL(__blk_end_request_all); 2219 2226 2220 2227 /** 2221 2228 * __blk_end_request_cur - Helper function to finish the current request chunk. ··· 2234 2241 { 2235 2242 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2236 2243 } 2237 - EXPORT_SYMBOL_GPL(__blk_end_request_cur); 2244 + EXPORT_SYMBOL(__blk_end_request_cur); 2238 2245 2239 2246 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2240 2247 struct bio *bio)
+1
block/blk-integrity.c
··· 379 379 380 380 kobject_uevent(&bi->kobj, KOBJ_REMOVE); 381 381 kobject_del(&bi->kobj); 382 + kobject_put(&bi->kobj); 382 383 kmem_cache_free(integrity_cachep, bi); 383 384 disk->integrity = NULL; 384 385 }
+7
block/blk-settings.c
··· 165 165 blk_set_default_limits(&q->limits); 166 166 167 167 /* 168 + * If the caller didn't supply a lock, fall back to our embedded 169 + * per-queue locks 170 + */ 171 + if (!q->queue_lock) 172 + q->queue_lock = &q->__queue_lock; 173 + 174 + /* 168 175 * by default assume old behaviour and bounce for any highmem page 169 176 */ 170 177 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+57 -46
drivers/block/mg_disk.c
··· 36 36 37 37 /* Register offsets */ 38 38 #define MG_BUFF_OFFSET 0x8000 39 - #define MG_STORAGE_BUFFER_SIZE 0x200 40 39 #define MG_REG_OFFSET 0xC000 41 40 #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ 42 41 #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ ··· 218 219 host->error = MG_ERR_NONE; 219 220 expire = jiffies + msecs_to_jiffies(msec); 220 221 222 + /* These 2 times dummy status read prevents reading invalid 223 + * status. A very little time (3 times of mflash operating clk) 224 + * is required for busy bit is set. Use dummy read instead of 225 + * busy wait, because mflash's PLL is machine dependent. 226 + */ 227 + if (prv_data->use_polling) { 228 + status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 229 + status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 230 + } 231 + 221 232 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 222 233 223 234 do { ··· 254 245 mg_dump_status("not ready", status, host); 255 246 return MG_ERR_INV_STAT; 256 247 } 257 - if (prv_data->use_polling) 258 - msleep(1); 259 248 260 249 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 261 250 } while (time_before(cur_jiffies, expire)); ··· 476 469 return MG_ERR_NONE; 477 470 } 478 471 472 + static void mg_read_one(struct mg_host *host, struct request *req) 473 + { 474 + u16 *buff = (u16 *)req->buffer; 475 + u32 i; 476 + 477 + for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) 478 + *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 479 + (i << 1)); 480 + } 481 + 479 482 static void mg_read(struct request *req) 480 483 { 481 - u32 j; 482 484 struct mg_host *host = req->rq_disk->private_data; 483 485 484 486 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), ··· 498 482 blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 499 483 500 484 do { 501 - u16 *buff = (u16 *)req->buffer; 502 - 503 485 if (mg_wait(host, ATA_DRQ, 504 486 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { 505 487 mg_bad_rw_intr(host); 506 488 return; 507 489 } 508 - for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) 509 - *buff++ = inw((unsigned long)host->dev_base + 510 - MG_BUFF_OFFSET + (j << 1)); 490 + 491 + mg_read_one(host, req); 511 492 512 493 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 513 494 MG_REG_COMMAND); 514 495 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 515 496 } 516 497 498 + static void mg_write_one(struct mg_host *host, struct request *req) 499 + { 500 + u16 *buff = (u16 *)req->buffer; 501 + u32 i; 502 + 503 + for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) 504 + outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET + 505 + (i << 1)); 506 + } 507 + 517 508 static void mg_write(struct request *req) 518 509 { 519 - u32 j; 520 510 struct mg_host *host = req->rq_disk->private_data; 511 + unsigned int rem = blk_rq_sectors(req); 521 512 522 - if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 513 + if (mg_out(host, blk_rq_pos(req), rem, 523 514 MG_CMD_WR, NULL) != MG_ERR_NONE) { 524 515 mg_bad_rw_intr(host); 525 516 return; 526 517 } 527 518 528 519 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 529 - blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 520 + rem, blk_rq_pos(req), req->buffer); 521 + 522 + if (mg_wait(host, ATA_DRQ, 523 + MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 524 + mg_bad_rw_intr(host); 525 + return; 526 + } 530 527 531 528 do { 532 - u16 *buff = (u16 *)req->buffer; 533 - 534 - if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 535 - mg_bad_rw_intr(host); 536 - return; 537 - } 538 - for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) 539 - outw(*buff++, (unsigned long)host->dev_base + 540 - MG_BUFF_OFFSET + (j << 1)); 529 + mg_write_one(host, req); 541 530 542 531 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 543 532 MG_REG_COMMAND); 533 + 534 + rem--; 535 + if (rem > 1 && mg_wait(host, ATA_DRQ, 536 + MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 537 + mg_bad_rw_intr(host); 538 + return; 539 + } else if (mg_wait(host, MG_STAT_READY, 540 + MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 541 + mg_bad_rw_intr(host); 542 + return; 543 + } 544 544 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 545 545 } 546 546 ··· 564 532 { 565 533 struct request *req = host->req; 566 534 u32 i; 567 - u16 *buff; 568 535 569 536 /* check status */ 570 537 do { ··· 581 550 return; 582 551 583 552 ok_to_read: 584 - /* get current segment of request */ 585 - buff = (u16 *)req->buffer; 586 - 587 - /* read 1 sector */ 588 - for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) 589 - *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 590 - (i << 1)); 553 + mg_read_one(host, req); 591 554 592 555 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 593 556 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); ··· 600 575 static void mg_write_intr(struct mg_host *host) 601 576 { 602 577 struct request *req = host->req; 603 - u32 i, j; 604 - u16 *buff; 578 + u32 i; 605 579 bool rem; 606 580 607 581 /* check status */ ··· 621 597 ok_to_write: 622 598 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { 623 599 /* write 1 sector and set handler if remains */ 624 - buff = (u16 *)req->buffer; 625 - for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { 626 - outw(*buff, (unsigned long)host->dev_base + 627 - MG_BUFF_OFFSET + (j << 1)); 628 - buff++; 629 - } 600 + mg_write_one(host, req); 630 601 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 631 602 blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 632 603 host->mg_do_intr = mg_write_intr; ··· 686 667 unsigned int sect_num, 687 668 unsigned int sect_cnt) 688 669 { 689 - u16 *buff; 690 - u32 i; 691 - 692 670 switch (rq_data_dir(req)) { 693 671 case READ: 694 672 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) ··· 709 693 mg_bad_rw_intr(host); 710 694 return host->error; 711 695 } 712 - buff = (u16 *)req->buffer; 713 - for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { 714 - outw(*buff, (unsigned long)host->dev_base + 715 - MG_BUFF_OFFSET + (i << 1)); 716 - buff++; 717 - } 696 + mg_write_one(host, req); 718 697 mod_timer(&host->timer, jiffies + 3 * HZ); 719 698 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 720 699 MG_REG_COMMAND);
+1 -1
include/linux/iocontext.h
··· 92 92 * a race). 93 93 */ 94 94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 95 - atomic_long_inc(&ioc->refcount); 95 + atomic_inc(&ioc->nr_tasks); 96 96 return ioc; 97 97 } 98 98