Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'nfs-for-3.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client fixes from Trond Myklebust:
"Highlights:

- more fixes for read/write codepath regressions
* sleeping while holding the inode lock
* stricter enforcement of page contiguity when coalescing requests
* fix up error handling in the page coalescing code

- don't busy wait on SIGKILL in the file locking code"

* tag 'nfs-for-3.17-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
nfs: Don't busy-wait on SIGKILL in __nfs_iocounter_wait
nfs: can_coalesce_requests must enforce contiguity
nfs: disallow duplicate pages in pgio page vectors
nfs: don't sleep with inode lock in lock_and_join_requests
nfs: fix error handling in lock_and_join_requests
nfs: use blocking page_group_lock in add_request
nfs: fix nonblocking calls to nfs_page_group_lock
nfs: change nfs_page_group_lock argument

+77 -29
+59 -25
fs/nfs/pagelist.c
··· 116 116 if (atomic_read(&c->io_count) == 0) 117 117 break; 118 118 ret = nfs_wait_bit_killable(&q.key); 119 - } while (atomic_read(&c->io_count) != 0); 119 + } while (atomic_read(&c->io_count) != 0 && !ret); 120 120 finish_wait(wq, &q.wait); 121 121 return ret; 122 122 } ··· 139 139 /* 140 140 * nfs_page_group_lock - lock the head of the page group 141 141 * @req - request in group that is to be locked 142 + * @nonblock - if true don't block waiting for lock 142 143 * 143 144 * this lock must be held if modifying the page group list 144 145 * 145 - * returns result from wait_on_bit_lock: 0 on success, < 0 on error 146 + * return 0 on success, < 0 on error: -EDELAY if nonblocking or the 147 + * result from wait_on_bit_lock 148 + * 149 + * NOTE: calling with nonblock=false should always have set the 150 + * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock 151 + * with TASK_UNINTERRUPTIBLE), so there is no need to check the result. 146 152 */ 147 153 int 148 - nfs_page_group_lock(struct nfs_page *req, bool wait) 154 + nfs_page_group_lock(struct nfs_page *req, bool nonblock) 149 155 { 150 156 struct nfs_page *head = req->wb_head; 151 - int ret; 152 157 153 158 WARN_ON_ONCE(head != head->wb_head); 154 159 155 - do { 156 - ret = wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, 157 - TASK_UNINTERRUPTIBLE); 158 - } while (wait && ret != 0); 160 + if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) 161 + return 0; 159 162 160 - WARN_ON_ONCE(ret > 0); 161 - return ret; 163 + if (!nonblock) 164 + return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, 165 + TASK_UNINTERRUPTIBLE); 166 + 167 + return -EAGAIN; 168 + } 169 + 170 + /* 171 + * nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it 172 + * @req - a request in the group 173 + * 174 + * This is a blocking call to wait for the group lock to be cleared. 175 + */ 176 + void 177 + nfs_page_group_lock_wait(struct nfs_page *req) 178 + { 179 + struct nfs_page *head = req->wb_head; 180 + 181 + WARN_ON_ONCE(head != head->wb_head); 182 + 183 + wait_on_bit(&head->wb_flags, PG_HEADLOCK, 184 + TASK_UNINTERRUPTIBLE); 162 185 } 163 186 164 187 /* ··· 242 219 { 243 220 bool ret; 244 221 245 - nfs_page_group_lock(req, true); 222 + nfs_page_group_lock(req, false); 246 223 ret = nfs_page_group_sync_on_bit_locked(req, bit); 247 224 nfs_page_group_unlock(req); 248 225 ··· 724 701 struct nfs_pgio_header *hdr) 725 702 { 726 703 struct nfs_page *req; 727 - struct page **pages; 704 + struct page **pages, 705 + *last_page; 728 706 struct list_head *head = &desc->pg_list; 729 707 struct nfs_commit_info cinfo; 730 - unsigned int pagecount; 708 + unsigned int pagecount, pageused; 731 709 732 710 pagecount = nfs_page_array_len(desc->pg_base, desc->pg_count); 733 711 if (!nfs_pgarray_set(&hdr->page_array, pagecount)) ··· 736 712 737 713 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); 738 714 pages = hdr->page_array.pagevec; 715 + last_page = NULL; 716 + pageused = 0; 739 717 while (!list_empty(head)) { 740 718 req = nfs_list_entry(head->next); 741 719 nfs_list_remove_request(req); 742 720 nfs_list_add_request(req, &hdr->pages); 743 - *pages++ = req->wb_page; 721 + 722 + if (WARN_ON_ONCE(pageused >= pagecount)) 723 + return nfs_pgio_error(desc, hdr); 724 + 725 + if (!last_page || last_page != req->wb_page) { 726 + *pages++ = last_page = req->wb_page; 727 + pageused++; 728 + } 744 729 } 730 + if (WARN_ON_ONCE(pageused != pagecount)) 731 + return nfs_pgio_error(desc, hdr); 745 732 746 733 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 747 734 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) ··· 823 788 return false; 824 789 if (req_offset(req) != req_offset(prev) + prev->wb_bytes) 825 790 return false; 791 + if (req->wb_page == prev->wb_page) { 792 + if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) 793 + return false; 794 + } else { 795 + if (req->wb_pgbase != 0 || 796 + prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 797 + return false; 798 + } 826 799 } 827 800 size = pgio->pg_ops->pg_test(pgio, prev, req); 828 801 WARN_ON_ONCE(size > req->wb_bytes); ··· 901 858 struct nfs_page *subreq; 902 859 unsigned int bytes_left = 0; 903 860 unsigned int offset, pgbase; 904 - int ret; 905 861 906 - ret = nfs_page_group_lock(req, false); 907 - if (ret < 0) { 908 - desc->pg_error = ret; 909 - return 0; 910 - } 862 + nfs_page_group_lock(req, false); 911 863 912 864 subreq = req; 913 865 bytes_left = subreq->wb_bytes; ··· 924 886 if (desc->pg_recoalesce) 925 887 return 0; 926 888 /* retry add_request for this subreq */ 927 - ret = nfs_page_group_lock(req, false); 928 - if (ret < 0) { 929 - desc->pg_error = ret; 930 - return 0; 931 - } 889 + nfs_page_group_lock(req, false); 932 890 continue; 933 891 } 934 892
+17 -4
fs/nfs/write.c
··· 241 241 unsigned int pos = 0; 242 242 unsigned int len = nfs_page_length(req->wb_page); 243 243 244 - nfs_page_group_lock(req, true); 244 + nfs_page_group_lock(req, false); 245 245 246 246 do { 247 247 tmp = nfs_page_group_search_locked(req->wb_head, pos); ··· 478 478 return NULL; 479 479 } 480 480 481 - /* lock each request in the page group */ 482 - ret = nfs_page_group_lock(head, false); 483 - if (ret < 0) 481 + /* holding inode lock, so always make a non-blocking call to try the 482 + * page group lock */ 483 + ret = nfs_page_group_lock(head, true); 484 + if (ret < 0) { 485 + spin_unlock(&inode->i_lock); 486 + 487 + if (!nonblock && ret == -EAGAIN) { 488 + nfs_page_group_lock_wait(head); 489 + nfs_release_request(head); 490 + goto try_again; 491 + } 492 + 493 + nfs_release_request(head); 484 494 return ERR_PTR(ret); 495 + } 496 + 497 + /* lock each request in the page group */ 485 498 subreq = head; 486 499 do { 487 500 /*
+1
include/linux/nfs_page.h
··· 123 123 extern void nfs_unlock_request(struct nfs_page *req); 124 124 extern void nfs_unlock_and_release_request(struct nfs_page *); 125 125 extern int nfs_page_group_lock(struct nfs_page *, bool); 126 + extern void nfs_page_group_lock_wait(struct nfs_page *); 126 127 extern void nfs_page_group_unlock(struct nfs_page *); 127 128 extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int); 128 129