Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'gfs2-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

- Prevent rename() from failing with -ESTALE when there are locking
conflicts and retry the operation instead

- Don't fail when fiemap triggers a page fault (xfstest generic/742)

- Fix another locking request cancellation bug

- Minor other fixes and cleanups

* tag 'gfs2-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
gfs2: fiemap page fault fix
gfs2: fix memory leaks in gfs2_fill_super error path
gfs2: Fix use-after-free in iomap inline data write path
gfs2: Fix slab-use-after-free in qd_put
gfs2: Introduce glock_{type,number,sbd} helpers
gfs2: gfs2_glock_hold cleanup
gfs: Use fixed GL_GLOCK_MIN_HOLD time
gfs2: Fix gfs2_log_get_bio argument type
gfs2: gfs2_chain_bio start sector fix
gfs2: Initialize bio->bi_opf early
gfs2: Rename gfs2_log_submit_{bio -> write}
gfs2: Do not cancel internal demote requests
gfs2: run_queue cleanup
gfs2: Retries missing in gfs2_{rename,exchange}
gfs2: glock cancelation flag fix

+277 -186
+12 -1
fs/gfs2/bmap.c
··· 1127 1127 goto out_unlock; 1128 1128 break; 1129 1129 default: 1130 - goto out_unlock; 1130 + goto out; 1131 1131 } 1132 1132 1133 1133 ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp); 1134 + if (ret) 1135 + goto out_unlock; 1136 + 1137 + out: 1138 + if (iomap->type == IOMAP_INLINE) { 1139 + iomap->private = metapath_dibh(&mp); 1140 + get_bh(iomap->private); 1141 + } 1134 1142 1135 1143 out_unlock: 1136 1144 release_metapath(&mp); ··· 1151 1143 { 1152 1144 struct gfs2_inode *ip = GFS2_I(inode); 1153 1145 struct gfs2_sbd *sdp = GFS2_SB(inode); 1146 + 1147 + if (iomap->private) 1148 + brelse(iomap->private); 1154 1149 1155 1150 switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) { 1156 1151 case IOMAP_WRITE:
+112 -78
fs/gfs2/glock.c
··· 60 60 61 61 typedef void (*glock_examiner) (struct gfs2_glock * gl); 62 62 63 - static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 63 + static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, 64 + unsigned int target, bool may_cancel); 64 65 static void request_demote(struct gfs2_glock *gl, unsigned int state, 65 66 unsigned long delay, bool remote); 66 67 ··· 147 146 } 148 147 149 148 void gfs2_glock_free(struct gfs2_glock *gl) { 150 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 149 + struct gfs2_sbd *sdp = glock_sbd(gl); 151 150 152 151 __gfs2_glock_free(gl); 153 152 if (atomic_dec_and_test(&sdp->sd_glock_disposal)) ··· 155 154 } 156 155 157 156 void gfs2_glock_free_later(struct gfs2_glock *gl) { 158 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 157 + struct gfs2_sbd *sdp = glock_sbd(gl); 159 158 160 159 spin_lock(&lru_lock); 161 160 list_add(&gl->gl_lru, &sdp->sd_dead_glocks); ··· 185 184 186 185 struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl) 187 186 { 188 - GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); 189 - lockref_get(&gl->gl_lockref); 187 + if (!lockref_get_not_dead(&gl->gl_lockref)) 188 + GLOCK_BUG_ON(gl, 1); 190 189 return gl; 191 190 } 192 191 ··· 219 218 * work queue. 220 219 */ 221 220 static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { 222 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 221 + struct gfs2_sbd *sdp = glock_sbd(gl); 223 222 224 223 if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) { 225 224 /* ··· 235 234 236 235 static void __gfs2_glock_put(struct gfs2_glock *gl) 237 236 { 238 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 237 + struct gfs2_sbd *sdp = glock_sbd(gl); 239 238 struct address_space *mapping = gfs2_glock2aspace(gl); 240 239 241 240 lockref_mark_dead(&gl->gl_lockref); ··· 357 356 smp_mb__after_atomic(); 358 357 wake_up_bit(&gh->gh_iflags, HIF_WAIT); 359 358 if (gh->gh_flags & GL_ASYNC) { 360 - struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd; 359 + struct gfs2_sbd *sdp = glock_sbd(gh->gh_gl); 361 360 362 361 wake_up(&sdp->sd_async_glock_wait); 363 362 } ··· 459 458 460 459 static void do_promote(struct gfs2_glock *gl) 461 460 { 462 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 461 + struct gfs2_sbd *sdp = glock_sbd(gl); 463 462 struct gfs2_holder *gh, *current_gh; 464 463 465 464 if (gfs2_withdrawn(sdp)) { ··· 539 538 540 539 static void gfs2_set_demote(int nr, struct gfs2_glock *gl) 541 540 { 542 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 541 + struct gfs2_sbd *sdp = glock_sbd(gl); 543 542 544 543 set_bit(nr, &gl->gl_flags); 545 544 smp_mb(); ··· 601 600 switch(gl->gl_state) { 602 601 /* Unlocked due to conversion deadlock, try again */ 603 602 case LM_ST_UNLOCKED: 604 - do_xmote(gl, gh, gl->gl_target); 603 + do_xmote(gl, gh, gl->gl_target, 604 + !test_bit(GLF_DEMOTE_IN_PROGRESS, 605 + &gl->gl_flags)); 605 606 break; 606 607 /* Conversion fails, unlock and try again */ 607 608 case LM_ST_SHARED: 608 609 case LM_ST_DEFERRED: 609 - do_xmote(gl, gh, LM_ST_UNLOCKED); 610 + do_xmote(gl, gh, LM_ST_UNLOCKED, false); 610 611 break; 611 612 default: /* Everything else */ 612 - fs_err(gl->gl_name.ln_sbd, 613 + fs_err(glock_sbd(gl), 613 614 "glock %u:%llu requested=%u ret=%u\n", 614 - gl->gl_name.ln_type, gl->gl_name.ln_number, 615 + glock_type(gl), glock_number(gl), 615 616 gl->gl_req, ret); 616 617 GLOCK_BUG_ON(gl, 1); 617 618 } ··· 641 638 } 642 639 out: 643 640 if (!test_bit(GLF_CANCELING, &gl->gl_flags)) 644 - clear_bit(GLF_LOCK, &gl->gl_flags); 641 + clear_and_wake_up_bit(GLF_LOCK, &gl->gl_flags); 645 642 } 646 643 647 644 /** ··· 649 646 * @gl: The lock state 650 647 * @gh: The holder (only for promotes) 651 648 * @target: The target lock state 649 + * @may_cancel: Operation may be canceled 652 650 * 653 651 */ 654 652 655 653 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, 656 - unsigned int target) 654 + unsigned int target, bool may_cancel) 657 655 __releases(&gl->gl_lockref.lock) 658 656 __acquires(&gl->gl_lockref.lock) 659 657 { 660 658 const struct gfs2_glock_operations *glops = gl->gl_ops; 661 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 659 + struct gfs2_sbd *sdp = glock_sbd(gl); 662 660 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 663 661 int ret; 664 662 ··· 707 703 } 708 704 709 705 if (ls->ls_ops->lm_lock) { 710 - set_bit(GLF_PENDING_REPLY, &gl->gl_flags); 711 706 spin_unlock(&gl->gl_lockref.lock); 712 707 ret = ls->ls_ops->lm_lock(gl, target, gh ? gh->gh_flags : 0); 713 708 spin_lock(&gl->gl_lockref.lock); 714 709 715 710 if (!ret) { 711 + if (may_cancel) { 712 + set_bit(GLF_MAY_CANCEL, &gl->gl_flags); 713 + smp_mb__after_atomic(); 714 + wake_up_bit(&gl->gl_flags, GLF_LOCK); 715 + } 716 716 /* The operation will be completed asynchronously. */ 717 717 gl->gl_lockref.count++; 718 718 return; 719 719 } 720 - clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); 721 720 722 721 if (ret == -ENODEV) { 723 722 /* ··· 760 753 761 754 if (test_bit(GLF_LOCK, &gl->gl_flags)) 762 755 return; 763 - set_bit(GLF_LOCK, &gl->gl_flags); 764 756 765 757 /* 766 - * The GLF_DEMOTE_IN_PROGRESS flag is only set intermittently during 767 - * locking operations. We have just started a locking operation by 768 - * setting the GLF_LOCK flag, so the GLF_DEMOTE_IN_PROGRESS flag must 769 - * be cleared. 758 + * The GLF_DEMOTE_IN_PROGRESS flag must only be set when the GLF_LOCK 759 + * flag is set as well. 770 760 */ 771 761 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); 772 762 ··· 774 770 } 775 771 776 772 if (find_first_holder(gl)) 777 - goto out_unlock; 773 + return; 778 774 if (nonblock) 779 775 goto out_sched; 780 776 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); 781 777 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); 782 778 gl->gl_target = gl->gl_demote_state; 783 - do_xmote(gl, NULL, gl->gl_target); 779 + set_bit(GLF_LOCK, &gl->gl_flags); 780 + do_xmote(gl, NULL, gl->gl_target, false); 784 781 return; 785 782 } 786 783 787 784 promote: 788 785 do_promote(gl); 789 786 if (find_first_holder(gl)) 790 - goto out_unlock; 787 + return; 791 788 gh = find_first_waiter(gl); 792 789 if (!gh) 793 - goto out_unlock; 790 + return; 794 791 if (nonblock) 795 792 goto out_sched; 796 793 gl->gl_target = gh->gh_state; 797 794 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) 798 795 do_error(gl, 0); /* Fail queued try locks */ 799 - do_xmote(gl, gh, gl->gl_target); 796 + set_bit(GLF_LOCK, &gl->gl_flags); 797 + do_xmote(gl, gh, gl->gl_target, true); 800 798 return; 801 799 802 800 out_sched: 803 - clear_bit(GLF_LOCK, &gl->gl_flags); 804 801 gl->gl_lockref.count++; 805 802 gfs2_glock_queue_work(gl, 0); 806 - return; 807 - 808 - out_unlock: 809 - clear_bit(GLF_LOCK, &gl->gl_flags); 810 803 } 811 804 812 805 /** ··· 819 818 prev_object = gl->gl_object; 820 819 gl->gl_object = object; 821 820 spin_unlock(&gl->gl_lockref.lock); 822 - if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL)) 821 + if (gfs2_assert_warn(glock_sbd(gl), prev_object == NULL)) 823 822 gfs2_dump_glock(NULL, gl, true); 824 823 } 825 824 ··· 836 835 prev_object = gl->gl_object; 837 836 gl->gl_object = NULL; 838 837 spin_unlock(&gl->gl_lockref.lock); 839 - if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object)) 838 + if (gfs2_assert_warn(glock_sbd(gl), prev_object == object)) 840 839 gfs2_dump_glock(NULL, gl, true); 841 840 } 842 841 ··· 926 925 927 926 bool gfs2_queue_try_to_evict(struct gfs2_glock *gl) 928 927 { 929 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 928 + struct gfs2_sbd *sdp = glock_sbd(gl); 930 929 931 930 if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) 932 931 return false; ··· 935 934 936 935 bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later) 937 936 { 938 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 937 + struct gfs2_sbd *sdp = glock_sbd(gl); 939 938 unsigned long delay; 940 939 941 940 if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags)) ··· 948 947 { 949 948 struct delayed_work *dwork = to_delayed_work(work); 950 949 struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete); 951 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 950 + struct gfs2_sbd *sdp = glock_sbd(gl); 952 951 bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags); 953 952 954 953 /* ··· 961 960 gfs2_try_to_evict(gl); 962 961 963 962 if (verify_delete) { 964 - u64 no_addr = gl->gl_name.ln_number; 963 + u64 no_addr = glock_number(gl); 965 964 struct inode *inode; 966 965 967 966 inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino, ··· 995 994 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 996 995 gl->gl_state != LM_ST_UNLOCKED && 997 996 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 998 - if (gl->gl_name.ln_type == LM_TYPE_INODE) { 997 + if (glock_type(gl) == LM_TYPE_INODE) { 999 998 unsigned long holdtime, now = jiffies; 1000 999 1001 1000 holdtime = gl->gl_tchange + gl->gl_hold_time; ··· 1137 1136 gl->gl_object = NULL; 1138 1137 gl->gl_hold_time = GL_GLOCK_DFT_HOLD; 1139 1138 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func); 1140 - if (gl->gl_name.ln_type == LM_TYPE_IOPEN) 1139 + if (glock_type(gl) == LM_TYPE_IOPEN) 1141 1140 INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func); 1142 1141 1143 1142 mapping = gfs2_glock2aspace(gl); ··· 1285 1284 * gfs2_glock_async_wait - wait on multiple asynchronous glock acquisitions 1286 1285 * @num_gh: the number of holders in the array 1287 1286 * @ghs: the glock holder array 1287 + * @retries: number of retries attempted so far 1288 1288 * 1289 1289 * Returns: 0 on success, meaning all glocks have been granted and are held. 1290 1290 * -ESTALE if the request timed out, meaning all glocks were released, 1291 1291 * and the caller should retry the operation. 1292 1292 */ 1293 1293 1294 - int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) 1294 + int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs, 1295 + unsigned int retries) 1295 1296 { 1296 - struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; 1297 - int i, ret = 0, timeout = 0; 1297 + struct gfs2_sbd *sdp = glock_sbd(ghs[0].gh_gl); 1298 1298 unsigned long start_time = jiffies; 1299 + int i, ret = 0; 1300 + long timeout; 1299 1301 1300 1302 might_sleep(); 1301 - /* 1302 - * Total up the (minimum hold time * 2) of all glocks and use that to 1303 - * determine the max amount of time we should wait. 1304 - */ 1305 - for (i = 0; i < num_gh; i++) 1306 - timeout += ghs[i].gh_gl->gl_hold_time << 1; 1307 1303 1308 - if (!wait_event_timeout(sdp->sd_async_glock_wait, 1304 + timeout = GL_GLOCK_MIN_HOLD; 1305 + if (retries) { 1306 + unsigned int max_shift; 1307 + long incr; 1308 + 1309 + /* Add a random delay and increase the timeout exponentially. */ 1310 + max_shift = BITS_PER_LONG - 2 - __fls(GL_GLOCK_HOLD_INCR); 1311 + incr = min(GL_GLOCK_HOLD_INCR << min(retries - 1, max_shift), 1312 + 10 * HZ - GL_GLOCK_MIN_HOLD); 1313 + schedule_timeout_interruptible(get_random_long() % (incr / 3)); 1314 + if (signal_pending(current)) 1315 + goto interrupted; 1316 + timeout += (incr / 3) + get_random_long() % (incr / 3); 1317 + } 1318 + 1319 + if (!wait_event_interruptible_timeout(sdp->sd_async_glock_wait, 1309 1320 !glocks_pending(num_gh, ghs), timeout)) { 1310 1321 ret = -ESTALE; /* request timed out. */ 1311 1322 goto out; 1312 1323 } 1324 + if (signal_pending(current)) 1325 + goto interrupted; 1313 1326 1314 1327 for (i = 0; i < num_gh; i++) { 1315 1328 struct gfs2_holder *gh = &ghs[i]; ··· 1347 1332 } 1348 1333 } 1349 1334 return ret; 1335 + 1336 + interrupted: 1337 + ret = -EINTR; 1338 + goto out; 1350 1339 } 1351 1340 1352 1341 /** ··· 1437 1418 static inline void add_to_queue(struct gfs2_holder *gh) 1438 1419 { 1439 1420 struct gfs2_glock *gl = gh->gh_gl; 1440 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1421 + struct gfs2_sbd *sdp = glock_sbd(gl); 1441 1422 struct gfs2_holder *gh2; 1442 1423 1443 1424 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL); ··· 1470 1451 fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip); 1471 1452 fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid)); 1472 1453 fs_err(sdp, "lock type: %d req lock state : %d\n", 1473 - gh2->gh_gl->gl_name.ln_type, gh2->gh_state); 1454 + glock_type(gh2->gh_gl), gh2->gh_state); 1474 1455 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip); 1475 1456 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid)); 1476 1457 fs_err(sdp, "lock type: %d req lock state : %d\n", 1477 - gh->gh_gl->gl_name.ln_type, gh->gh_state); 1458 + glock_type(gh->gh_gl), gh->gh_state); 1478 1459 gfs2_dump_glock(NULL, gl, true); 1479 1460 BUG(); 1480 1461 } ··· 1491 1472 int gfs2_glock_nq(struct gfs2_holder *gh) 1492 1473 { 1493 1474 struct gfs2_glock *gl = gh->gh_gl; 1494 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1475 + struct gfs2_sbd *sdp = glock_sbd(gl); 1495 1476 int error; 1496 1477 1497 1478 if (gfs2_withdrawn(sdp)) ··· 1564 1545 list_del_init(&gh->gh_list); 1565 1546 clear_bit(HIF_HOLDER, &gh->gh_iflags); 1566 1547 trace_gfs2_glock_queue(gh, 0); 1548 + if (test_bit(HIF_WAIT, &gh->gh_iflags)) 1549 + gfs2_holder_wake(gh); 1567 1550 1568 1551 /* 1569 1552 * If there hasn't been a demote request we are done. ··· 1580 1559 gl->gl_lockref.count++; 1581 1560 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 1582 1561 !test_bit(GLF_DEMOTE, &gl->gl_flags) && 1583 - gl->gl_name.ln_type == LM_TYPE_INODE) 1562 + glock_type(gl) == LM_TYPE_INODE) 1584 1563 delay = gl->gl_hold_time; 1585 1564 gfs2_glock_queue_work(gl, delay); 1586 1565 } ··· 1595 1574 { 1596 1575 struct gfs2_glock *gl = gh->gh_gl; 1597 1576 1577 + again: 1598 1578 spin_lock(&gl->gl_lockref.lock); 1599 1579 if (!gfs2_holder_queued(gh)) { 1600 1580 /* ··· 1610 1588 test_bit(GLF_LOCK, &gl->gl_flags) && 1611 1589 !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) && 1612 1590 !test_bit(GLF_CANCELING, &gl->gl_flags)) { 1591 + if (!test_bit(GLF_MAY_CANCEL, &gl->gl_flags)) { 1592 + struct wait_queue_head *wq; 1593 + DEFINE_WAIT(wait); 1594 + 1595 + wq = bit_waitqueue(&gl->gl_flags, GLF_LOCK); 1596 + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); 1597 + spin_unlock(&gl->gl_lockref.lock); 1598 + schedule(); 1599 + finish_wait(wq, &wait); 1600 + goto again; 1601 + } 1602 + 1613 1603 set_bit(GLF_CANCELING, &gl->gl_flags); 1614 1604 spin_unlock(&gl->gl_lockref.lock); 1615 - gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl); 1605 + glock_sbd(gl)->sd_lockstruct.ls_ops->lm_cancel(gl); 1616 1606 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); 1617 1607 spin_lock(&gl->gl_lockref.lock); 1618 1608 clear_bit(GLF_CANCELING, &gl->gl_flags); 1619 - clear_bit(GLF_LOCK, &gl->gl_flags); 1609 + clear_and_wake_up_bit(GLF_LOCK, &gl->gl_flags); 1620 1610 if (!gfs2_holder_queued(gh)) 1621 1611 goto out; 1622 1612 } ··· 1798 1764 gfs2_glock_hold(gl); 1799 1765 spin_lock(&gl->gl_lockref.lock); 1800 1766 if (!list_empty(&gl->gl_holders) && 1801 - gl->gl_name.ln_type == LM_TYPE_INODE) { 1767 + glock_type(gl) == LM_TYPE_INODE) { 1802 1768 unsigned long now = jiffies; 1803 1769 unsigned long holdtime; 1804 1770 ··· 1855 1821 1856 1822 void gfs2_glock_complete(struct gfs2_glock *gl, int ret) 1857 1823 { 1858 - struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 1824 + struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct; 1859 1825 1860 1826 spin_lock(&gl->gl_lockref.lock); 1861 - clear_bit(GLF_PENDING_REPLY, &gl->gl_flags); 1827 + clear_bit(GLF_MAY_CANCEL, &gl->gl_flags); 1862 1828 gl->gl_reply = ret; 1863 1829 1864 1830 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) { ··· 1883 1849 gla = list_entry(a, struct gfs2_glock, gl_lru); 1884 1850 glb = list_entry(b, struct gfs2_glock, gl_lru); 1885 1851 1886 - if (gla->gl_name.ln_number > glb->gl_name.ln_number) 1852 + if (glock_number(gla) > glock_number(glb)) 1887 1853 return 1; 1888 - if (gla->gl_name.ln_number < glb->gl_name.ln_number) 1854 + if (glock_number(gla) < glock_number(glb)) 1889 1855 return -1; 1890 1856 1891 1857 return 0; ··· 1893 1859 1894 1860 static bool can_free_glock(struct gfs2_glock *gl) 1895 1861 { 1896 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1862 + struct gfs2_sbd *sdp = glock_sbd(gl); 1897 1863 1898 1864 return !test_bit(GLF_LOCK, &gl->gl_flags) && 1899 1865 !gl->gl_lockref.count && ··· 2015 1981 rhashtable_walk_start(&iter); 2016 1982 2017 1983 while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) { 2018 - if (gl->gl_name.ln_sbd == sdp) 1984 + if (glock_sbd(gl) == sdp) 2019 1985 examiner(gl); 2020 1986 } 2021 1987 ··· 2035 2001 2036 2002 static void flush_delete_work(struct gfs2_glock *gl) 2037 2003 { 2038 - if (gl->gl_name.ln_type == LM_TYPE_IOPEN) { 2039 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2004 + if (glock_type(gl) == LM_TYPE_IOPEN) { 2005 + struct gfs2_sbd *sdp = glock_sbd(gl); 2040 2006 2041 2007 if (cancel_delayed_work(&gl->gl_delete)) { 2042 2008 queue_delayed_work(sdp->sd_delete_wq, ··· 2265 2231 *p++ = 'y'; 2266 2232 if (test_bit(GLF_LFLUSH, gflags)) 2267 2233 *p++ = 'f'; 2268 - if (test_bit(GLF_PENDING_REPLY, gflags)) 2269 - *p++ = 'R'; 2234 + if (test_bit(GLF_MAY_CANCEL, gflags)) 2235 + *p++ = 'c'; 2270 2236 if (test_bit(GLF_HAVE_REPLY, gflags)) 2271 2237 *p++ = 'r'; 2272 2238 if (test_bit(GLF_INITIAL, gflags)) ··· 2321 2287 unsigned long long dtime; 2322 2288 const struct gfs2_holder *gh; 2323 2289 char gflags_buf[32]; 2324 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 2290 + struct gfs2_sbd *sdp = glock_sbd(gl); 2325 2291 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; 2326 2292 unsigned long nrpages = 0; 2327 2293 ··· 2340 2306 gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d " 2341 2307 "v:%d r:%d m:%ld p:%lu\n", 2342 2308 fs_id_buf, state2str(gl->gl_state), 2343 - gl->gl_name.ln_type, 2344 - (unsigned long long)gl->gl_name.ln_number, 2309 + glock_type(gl), 2310 + (unsigned long long) glock_number(gl), 2345 2311 gflags2str(gflags_buf, gl), 2346 2312 state2str(gl->gl_target), 2347 2313 state2str(gl->gl_demote_state), dtime, ··· 2361 2327 struct gfs2_glock *gl = iter_ptr; 2362 2328 2363 2329 seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n", 2364 - gl->gl_name.ln_type, 2365 - (unsigned long long)gl->gl_name.ln_number, 2330 + glock_type(gl), 2331 + (unsigned long long) glock_number(gl), 2366 2332 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT], 2367 2333 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR], 2368 2334 (unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB], ··· 2478 2444 gl = NULL; 2479 2445 break; 2480 2446 } 2481 - if (gl->gl_name.ln_sbd != gi->sdp) 2447 + if (glock_sbd(gl) != gi->sdp) 2482 2448 continue; 2483 2449 if (n <= 1) { 2484 2450 if (!lockref_get_not_dead(&gl->gl_lockref)) ··· 2774 2740 gl = GFS2_I(inode)->i_iopen_gh.gh_gl; 2775 2741 if (gl) { 2776 2742 seq_printf(seq, "%d %u %u/%llx\n", 2777 - i->tgid, i->fd, gl->gl_name.ln_type, 2778 - (unsigned long long)gl->gl_name.ln_number); 2743 + i->tgid, i->fd, glock_type(gl), 2744 + (unsigned long long) glock_number(gl)); 2779 2745 } 2780 2746 gfs2_glockfd_seq_show_flock(seq, i); 2781 2747 inode_unlock_shared(inode);
+5 -4
fs/gfs2/glock.h
··· 126 126 127 127 #define GL_GLOCK_MAX_HOLD (long)(HZ / 5) 128 128 #define GL_GLOCK_DFT_HOLD (long)(HZ / 5) 129 - #define GL_GLOCK_MIN_HOLD (long)(10) 129 + #define GL_GLOCK_MIN_HOLD (long)(HZ / 100) 130 130 #define GL_GLOCK_HOLD_INCR (long)(HZ / 20) 131 131 #define GL_GLOCK_HOLD_DECR (long)(HZ / 40) 132 132 ··· 204 204 int gfs2_instantiate(struct gfs2_holder *gh); 205 205 int gfs2_glock_holder_ready(struct gfs2_holder *gh); 206 206 int gfs2_glock_wait(struct gfs2_holder *gh); 207 - int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs); 207 + int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs, 208 + unsigned int retries); 208 209 void gfs2_glock_dq(struct gfs2_holder *gh); 209 210 void gfs2_glock_dq_wait(struct gfs2_holder *gh); 210 211 void gfs2_glock_dq_uninit(struct gfs2_holder *gh); ··· 222 221 BUG(); } } while(0) 223 222 #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \ 224 223 gfs2_dump_glock(NULL, gl, true); \ 225 - gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \ 224 + gfs2_assert_warn(glock_sbd(gl), (x)); } } \ 226 225 while (0) 227 226 #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \ 228 227 gfs2_dump_glock(NULL, gl, true); \ 229 - gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \ 228 + gfs2_assert_withdraw(glock_sbd(gl), (x)); } } \ 230 229 while (0) 231 230 232 231 __printf(2, 3)
+17 -17
fs/gfs2/glops.c
··· 32 32 33 33 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) 34 34 { 35 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 35 + struct gfs2_sbd *sdp = glock_sbd(gl); 36 36 37 37 fs_err(sdp, 38 38 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " ··· 40 40 bh, (unsigned long long)bh->b_blocknr, bh->b_state, 41 41 bh->b_folio->mapping, bh->b_folio->flags.f); 42 42 fs_err(sdp, "AIL glock %u:%llu mapping %p\n", 43 - gl->gl_name.ln_type, gl->gl_name.ln_number, 43 + glock_type(gl), glock_number(gl), 44 44 gfs2_glock2aspace(gl)); 45 45 gfs2_lm(sdp, "AIL error\n"); 46 46 gfs2_withdraw(sdp); ··· 58 58 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 59 59 unsigned int nr_revokes) 60 60 { 61 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 61 + struct gfs2_sbd *sdp = glock_sbd(gl); 62 62 struct list_head *head = &gl->gl_ail_list; 63 63 struct gfs2_bufdata *bd, *tmp; 64 64 struct buffer_head *bh; ··· 86 86 87 87 static int gfs2_ail_empty_gl(struct gfs2_glock *gl) 88 88 { 89 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 89 + struct gfs2_sbd *sdp = glock_sbd(gl); 90 90 struct gfs2_trans tr; 91 91 unsigned int revokes; 92 92 int ret = 0; ··· 139 139 140 140 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 141 141 { 142 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 142 + struct gfs2_sbd *sdp = glock_sbd(gl); 143 143 unsigned int revokes = atomic_read(&gl->gl_ail_count); 144 144 int ret; 145 145 ··· 163 163 164 164 static int gfs2_rgrp_metasync(struct gfs2_glock *gl) 165 165 { 166 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 166 + struct gfs2_sbd *sdp = glock_sbd(gl); 167 167 struct address_space *metamapping = gfs2_aspace(sdp); 168 168 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 169 169 const unsigned bsize = sdp->sd_sb.sb_bsize; ··· 191 191 192 192 static int rgrp_go_sync(struct gfs2_glock *gl) 193 193 { 194 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 194 + struct gfs2_sbd *sdp = glock_sbd(gl); 195 195 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 196 196 int error; 197 197 ··· 220 220 221 221 static void rgrp_go_inval(struct gfs2_glock *gl, int flags) 222 222 { 223 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 223 + struct gfs2_sbd *sdp = glock_sbd(gl); 224 224 struct address_space *mapping = gfs2_aspace(sdp); 225 225 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 226 226 const unsigned bsize = sdp->sd_sb.sb_bsize; ··· 290 290 filemap_fdatawrite(metamapping); 291 291 error = filemap_fdatawait(metamapping); 292 292 if (error) 293 - gfs2_io_error(gl->gl_name.ln_sbd); 293 + gfs2_io_error(glock_sbd(gl)); 294 294 return error; 295 295 } 296 296 ··· 317 317 318 318 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); 319 319 320 - gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 320 + gfs2_log_flush(glock_sbd(gl), gl, GFS2_LOG_HEAD_FLUSH_NORMAL | 321 321 GFS2_LFC_INODE_GO_SYNC); 322 322 filemap_fdatawrite(metamapping); 323 323 if (isreg) { ··· 359 359 { 360 360 struct gfs2_inode *ip = gfs2_glock2inode(gl); 361 361 362 - gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count)); 362 + gfs2_assert_withdraw(glock_sbd(gl), !atomic_read(&gl->gl_ail_count)); 363 363 364 364 if (flags & DIO_METADATA) { 365 365 struct address_space *mapping = gfs2_glock2aspace(gl); ··· 372 372 } 373 373 } 374 374 375 - if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { 376 - gfs2_log_flush(gl->gl_name.ln_sbd, NULL, 375 + if (ip == GFS2_I(glock_sbd(gl)->sd_rindex)) { 376 + gfs2_log_flush(glock_sbd(gl), NULL, 377 377 GFS2_LOG_HEAD_FLUSH_NORMAL | 378 378 GFS2_LFC_INODE_GO_INVAL); 379 - gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; 379 + glock_sbd(gl)->sd_rindex_uptodate = 0; 380 380 } 381 381 if (ip && S_ISREG(ip->i_inode.i_mode)) 382 382 truncate_inode_pages(ip->i_inode.i_mapping, 0); ··· 567 567 568 568 static void freeze_go_callback(struct gfs2_glock *gl, bool remote) 569 569 { 570 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 570 + struct gfs2_sbd *sdp = glock_sbd(gl); 571 571 struct super_block *sb = sdp->sd_vfs; 572 572 573 573 if (!remote || ··· 596 596 */ 597 597 static int freeze_go_xmote_bh(struct gfs2_glock *gl) 598 598 { 599 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 599 + struct gfs2_sbd *sdp = glock_sbd(gl); 600 600 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); 601 601 struct gfs2_glock *j_gl = ip->i_gl; 602 602 struct gfs2_log_header_host head; ··· 626 626 static void iopen_go_callback(struct gfs2_glock *gl, bool remote) 627 627 { 628 628 struct gfs2_inode *ip = gl->gl_object; 629 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 629 + struct gfs2_sbd *sdp = glock_sbd(gl); 630 630 631 631 if (!remote || test_bit(SDF_KILL, &sdp->sd_flags)) 632 632 return;
+15 -3
fs/gfs2/incore.h
··· 326 326 GLF_BLOCKING = 15, 327 327 GLF_TRY_TO_EVICT = 17, /* iopen glocks only */ 328 328 GLF_VERIFY_DELETE = 18, /* iopen glocks only */ 329 - GLF_PENDING_REPLY = 19, 329 + GLF_MAY_CANCEL = 19, 330 330 GLF_DEFER_DELETE = 20, /* iopen glocks only */ 331 331 GLF_CANCELING = 21, 332 332 }; ··· 368 368 struct rcu_head gl_rcu; 369 369 struct rhash_head gl_node; 370 370 }; 371 + 372 + static inline unsigned int glock_type(const struct gfs2_glock *gl) 373 + { 374 + return gl->gl_name.ln_type; 375 + } 376 + 377 + static inline u64 glock_number(const struct gfs2_glock *gl) 378 + { 379 + return gl->gl_name.ln_number; 380 + } 371 381 372 382 enum { 373 383 GIF_QD_LOCKED = 1, ··· 849 839 struct dentry *debugfs_dir; /* debugfs directory */ 850 840 }; 851 841 842 + #define glock_sbd(gl) ((gl)->gl_name.ln_sbd) 843 + 852 844 #define GFS2_BAD_INO 1 853 845 854 846 static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp) ··· 865 853 866 854 static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) 867 855 { 868 - const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 856 + const struct gfs2_sbd *sdp = glock_sbd(gl); 869 857 preempt_disable(); 870 - this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++; 858 + this_cpu_ptr(sdp->sd_lkstats)->lkstats[glock_type(gl)].stats[which]++; 871 859 preempt_enable(); 872 860 } 873 861
+30 -4
fs/gfs2/inode.c
··· 1495 1495 unsigned int num_gh; 1496 1496 int dir_rename = 0; 1497 1497 struct gfs2_diradd da = { .nr_blocks = 0, .save_loc = 0, }; 1498 - unsigned int x; 1498 + unsigned int retries = 0, x; 1499 1499 int error; 1500 1500 1501 1501 gfs2_holder_mark_uninitialized(&r_gh); ··· 1545 1545 num_gh++; 1546 1546 } 1547 1547 1548 + again: 1548 1549 for (x = 0; x < num_gh; x++) { 1549 1550 error = gfs2_glock_nq(ghs + x); 1550 1551 if (error) 1551 1552 goto out_gunlock; 1552 1553 } 1553 - error = gfs2_glock_async_wait(num_gh, ghs); 1554 + error = gfs2_glock_async_wait(num_gh, ghs, retries); 1555 + if (error == -ESTALE) { 1556 + retries++; 1557 + goto again; 1558 + } 1554 1559 if (error) 1555 1560 goto out_gunlock; 1556 1561 ··· 1744 1739 struct gfs2_sbd *sdp = GFS2_SB(odir); 1745 1740 struct gfs2_holder ghs[4], r_gh; 1746 1741 unsigned int num_gh; 1747 - unsigned int x; 1742 + unsigned int retries = 0, x; 1748 1743 umode_t old_mode = oip->i_inode.i_mode; 1749 1744 umode_t new_mode = nip->i_inode.i_mode; 1750 1745 int error; ··· 1788 1783 gfs2_holder_init(nip->i_gl, LM_ST_EXCLUSIVE, GL_ASYNC, ghs + num_gh); 1789 1784 num_gh++; 1790 1785 1786 + again: 1791 1787 for (x = 0; x < num_gh; x++) { 1792 1788 error = gfs2_glock_nq(ghs + x); 1793 1789 if (error) 1794 1790 goto out_gunlock; 1795 1791 } 1796 1792 1797 - error = gfs2_glock_async_wait(num_gh, ghs); 1793 + error = gfs2_glock_async_wait(num_gh, ghs, retries); 1794 + if (error == -ESTALE) { 1795 + retries++; 1796 + goto again; 1797 + } 1798 1798 if (error) 1799 1799 goto out_gunlock; 1800 1800 ··· 2192 2182 return 0; 2193 2183 } 2194 2184 2185 + static bool fault_in_fiemap(struct fiemap_extent_info *fi) 2186 + { 2187 + struct fiemap_extent __user *dest = fi->fi_extents_start; 2188 + size_t size = sizeof(*dest) * fi->fi_extents_max; 2189 + 2190 + return fault_in_safe_writeable((char __user *)dest, size) == 0; 2191 + } 2192 + 2195 2193 static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2196 2194 u64 start, u64 len) 2197 2195 { ··· 2209 2191 2210 2192 inode_lock_shared(inode); 2211 2193 2194 + retry: 2212 2195 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 2213 2196 if (ret) 2214 2197 goto out; 2215 2198 2199 + pagefault_disable(); 2216 2200 ret = iomap_fiemap(inode, fieinfo, start, len, &gfs2_iomap_ops); 2201 + pagefault_enable(); 2217 2202 2218 2203 gfs2_glock_dq_uninit(&gh); 2204 + 2205 + if (ret == -EFAULT && fault_in_fiemap(fieinfo)) { 2206 + fieinfo->fi_extents_mapped = 0; 2207 + goto retry; 2208 + } 2219 2209 2220 2210 out: 2221 2211 inode_unlock_shared(inode);
+14 -14
fs/gfs2/lock_dlm.c
··· 74 74 bool blocking) 75 75 { 76 76 struct gfs2_pcpu_lkstats *lks; 77 - const unsigned gltype = gl->gl_name.ln_type; 77 + const unsigned gltype = glock_type(gl); 78 78 unsigned index = blocking ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT; 79 79 s64 rtt; 80 80 81 81 preempt_disable(); 82 82 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); 83 - lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); 83 + lks = this_cpu_ptr(glock_sbd(gl)->sd_lkstats); 84 84 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ 85 85 gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ 86 86 preempt_enable(); ··· 100 100 static inline void gfs2_update_request_times(struct gfs2_glock *gl) 101 101 { 102 102 struct gfs2_pcpu_lkstats *lks; 103 - const unsigned gltype = gl->gl_name.ln_type; 103 + const unsigned gltype = glock_type(gl); 104 104 ktime_t dstamp; 105 105 s64 irt; 106 106 ··· 108 108 dstamp = gl->gl_dstamp; 109 109 gl->gl_dstamp = ktime_get_real(); 110 110 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); 111 - lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); 111 + lks = this_cpu_ptr(glock_sbd(gl)->sd_lkstats); 112 112 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ 113 113 gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ 114 114 preempt_enable(); ··· 195 195 gfs2_glock_cb(gl, LM_ST_SHARED); 196 196 break; 197 197 default: 198 - fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); 198 + fs_err(glock_sbd(gl), "unknown bast mode %d\n", mode); 199 199 BUG(); 200 200 } 201 201 } ··· 276 276 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, 277 277 unsigned int flags) 278 278 { 279 - struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 279 + struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct; 280 280 bool blocking; 281 281 int cur, req; 282 282 u32 lkf; ··· 284 284 int error; 285 285 286 286 gl->gl_req = req_state; 287 - cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); 288 - req = make_mode(gl->gl_name.ln_sbd, req_state); 287 + cur = make_mode(glock_sbd(gl), gl->gl_state); 288 + req = make_mode(glock_sbd(gl), req_state); 289 289 blocking = !down_conversion(cur, req) && 290 290 !(flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)); 291 291 lkf = make_flags(gl, flags, req, blocking); ··· 296 296 if (test_bit(GLF_INITIAL, &gl->gl_flags)) { 297 297 memset(strname, ' ', GDLM_STRNAME_BYTES - 1); 298 298 strname[GDLM_STRNAME_BYTES - 1] = '\0'; 299 - gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); 300 - gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); 299 + gfs2_reverse_hex(strname + 7, glock_type(gl)); 300 + gfs2_reverse_hex(strname + 23, glock_number(gl)); 301 301 gl->gl_dstamp = ktime_get_real(); 302 302 } else { 303 303 gfs2_update_request_times(gl); ··· 323 323 324 324 static void gdlm_put_lock(struct gfs2_glock *gl) 325 325 { 326 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 326 + struct gfs2_sbd *sdp = glock_sbd(gl); 327 327 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 328 328 uint32_t flags = 0; 329 329 int error; ··· 375 375 376 376 if (error) { 377 377 fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", 378 - gl->gl_name.ln_type, 379 - (unsigned long long)gl->gl_name.ln_number, error); 378 + glock_type(gl), 379 + (unsigned long long) glock_number(gl), error); 380 380 } 381 381 } 382 382 383 383 static void gdlm_cancel(struct gfs2_glock *gl) 384 384 { 385 - struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; 385 + struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct; 386 386 387 387 down_read(&ls->ls_sem); 388 388 if (likely(ls->ls_dlm != NULL)) {
+4 -3
fs/gfs2/log.c
··· 888 888 sb->s_blocksize - LH_V1_SIZE - 4); 889 889 lh->lh_crc = cpu_to_be32(crc); 890 890 891 - gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock); 892 - gfs2_log_submit_bio(&jd->jd_log_bio, REQ_OP_WRITE | op_flags); 891 + gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock, 892 + REQ_OP_WRITE | op_flags); 893 + gfs2_log_submit_write(&jd->jd_log_bio); 893 894 } 894 895 895 896 /** ··· 1097 1096 if (gfs2_withdrawn(sdp)) 1098 1097 goto out_withdraw; 1099 1098 if (sdp->sd_jdesc) 1100 - gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE); 1099 + gfs2_log_submit_write(&sdp->sd_jdesc->jd_log_bio); 1101 1100 if (gfs2_withdrawn(sdp)) 1102 1101 goto out_withdraw; 1103 1102
+27 -24
fs/gfs2/lops.c
··· 65 65 66 66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) 67 67 { 68 - return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; 68 + return glock_type(bd->bd_gl) == LM_TYPE_RGRP; 69 69 } 70 70 71 71 static void maybe_release_space(struct gfs2_bufdata *bd) 72 72 { 73 73 struct gfs2_glock *gl = bd->bd_gl; 74 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 74 + struct gfs2_sbd *sdp = glock_sbd(gl); 75 75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); 76 - unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; 76 + unsigned int index = bd->bd_bh->b_blocknr - glock_number(gl); 77 77 struct gfs2_bitmap *bi = rgd->rd_bits + index; 78 78 79 79 rgrp_lock_local(rgd); ··· 229 229 } 230 230 231 231 /** 232 - * gfs2_log_submit_bio - Submit any pending log bio 232 + * gfs2_log_submit_write - Submit a pending log write bio 233 233 * @biop: Address of the bio pointer 234 - * @opf: REQ_OP | op_flags 235 234 * 236 235 * Submit any pending part-built or full bio to the block device. If 237 236 * there is no pending bio, then this is a no-op. 238 237 */ 239 238 240 - void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf) 239 + void gfs2_log_submit_write(struct bio **biop) 241 240 { 242 241 struct bio *bio = *biop; 243 242 if (bio) { 244 243 struct gfs2_sbd *sdp = bio->bi_private; 245 244 atomic_inc(&sdp->sd_log_in_flight); 246 - bio->bi_opf = opf; 247 245 submit_bio(bio); 248 246 *biop = NULL; 249 247 } ··· 252 254 * @sdp: The super block 253 255 * @blkno: The device block number we want to write to 254 256 * @end_io: The bi_end_io callback 257 + * @opf: REQ_OP | op_flags 255 258 * 256 259 * Allocate a new bio, initialize it with the given parameters and return it. 257 260 * ··· 260 261 */ 261 262 262 263 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno, 263 - bio_end_io_t *end_io) 264 + bio_end_io_t *end_io, blk_opf_t opf) 264 265 { 265 266 struct super_block *sb = sdp->sd_vfs; 266 - struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO); 267 + struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, opf, GFP_NOIO); 267 268 268 269 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift; 269 270 bio->bi_end_io = end_io; ··· 277 278 * @sdp: The super block 278 279 * @blkno: The device block number we want to write to 279 280 * @biop: The bio to get or allocate 280 - * @op: REQ_OP 281 + * @opf: REQ_OP | op_flags 281 282 * @end_io: The bi_end_io callback 282 283 * @flush: Always flush the current bio and allocate a new one? 283 284 * ··· 290 291 */ 291 292 292 293 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, 293 - struct bio **biop, enum req_op op, 294 + struct bio **biop, blk_opf_t opf, 294 295 bio_end_io_t *end_io, bool flush) 295 296 { 296 297 struct bio *bio = *biop; ··· 302 303 nblk >>= sdp->sd_fsb2bb_shift; 303 304 if (blkno == nblk && !flush) 304 305 return bio; 305 - gfs2_log_submit_bio(biop, op); 306 + gfs2_log_submit_write(biop); 306 307 } 307 308 308 - *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); 309 + *biop = gfs2_log_alloc_bio(sdp, blkno, end_io, opf); 309 310 return *biop; 310 311 } 311 312 ··· 317 318 * @size: the size of the data to write 318 319 * @offset: the offset within the page 319 320 * @blkno: block number of the log entry 321 + * @opf: REQ_OP | op_flags 320 322 * 321 323 * Try and add the page segment to the current bio. If that fails, 322 324 * submit the current bio to the device and create a new one, and ··· 326 326 327 327 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 328 328 struct page *page, unsigned size, unsigned offset, 329 - u64 blkno) 329 + u64 blkno, blk_opf_t opf) 330 330 { 331 331 struct bio *bio; 332 332 int ret; 333 333 334 - bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE, 334 + bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, opf, 335 335 gfs2_end_log_write, false); 336 336 ret = bio_add_page(bio, page, size, offset); 337 337 if (ret == 0) { 338 338 bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, 339 - REQ_OP_WRITE, gfs2_end_log_write, true); 339 + opf, gfs2_end_log_write, true); 340 340 ret = bio_add_page(bio, page, size, offset); 341 341 WARN_ON(ret == 0); 342 342 } ··· 359 359 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 360 360 gfs2_log_incr_head(sdp); 361 361 gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0), 362 - bh->b_size, bh_offset(bh), dblock); 362 + bh->b_size, bh_offset(bh), dblock, REQ_OP_WRITE); 363 363 } 364 364 365 365 /** ··· 380 380 381 381 dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head); 382 382 gfs2_log_incr_head(sdp); 383 - gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock); 383 + gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock, 384 + REQ_OP_WRITE); 384 385 } 385 386 386 387 /** ··· 478 477 folio_put_refs(folio, 2); 479 478 } 480 479 481 - static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs) 480 + static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs, 481 + sector_t sector, blk_opf_t opf) 482 482 { 483 483 struct bio *new; 484 484 485 - new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO); 485 + new = bio_alloc(prev->bi_bdev, nr_iovecs, opf, GFP_NOIO); 486 486 bio_clone_blkg_association(new, prev); 487 - new->bi_iter.bi_sector = bio_end_sector(prev); 487 + new->bi_iter.bi_sector = sector; 488 488 bio_chain(new, prev); 489 489 submit_bio(prev); 490 490 return new; ··· 548 546 unsigned int blocks = 549 547 (PAGE_SIZE - off) >> bsize_shift; 550 548 551 - bio = gfs2_chain_bio(bio, blocks); 549 + bio = gfs2_chain_bio(bio, blocks, sector, 550 + REQ_OP_READ); 552 551 goto add_block_to_new_bio; 553 552 } 554 553 } ··· 559 556 submit_bio(bio); 560 557 } 561 558 562 - bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); 563 - bio->bi_opf = REQ_OP_READ; 559 + bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read, 560 + REQ_OP_READ); 564 561 add_block_to_new_bio: 565 562 bio_add_folio_nofail(bio, folio, bsize, off); 566 563 block_added:
+2 -2
fs/gfs2/lops.h
··· 16 16 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn); 17 17 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, 18 18 struct page *page, unsigned size, unsigned offset, 19 - u64 blkno); 20 - void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf); 19 + u64 blkno, blk_opf_t opf); 20 + void gfs2_log_submit_write(struct bio **biop); 21 21 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); 22 22 int gfs2_find_jhead(struct gfs2_jdesc *jd, 23 23 struct gfs2_log_header_host *head);
+3 -3
fs/gfs2/meta_io.c
··· 126 126 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) 127 127 { 128 128 struct address_space *mapping = gfs2_glock2aspace(gl); 129 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 129 + struct gfs2_sbd *sdp = glock_sbd(gl); 130 130 struct folio *folio; 131 131 struct buffer_head *bh; 132 132 unsigned int shift; ··· 259 259 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 260 260 int rahead, struct buffer_head **bhp) 261 261 { 262 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 262 + struct gfs2_sbd *sdp = glock_sbd(gl); 263 263 struct buffer_head *bh, *bhs[2]; 264 264 int num = 0; 265 265 ··· 513 513 514 514 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen) 515 515 { 516 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 516 + struct gfs2_sbd *sdp = glock_sbd(gl); 517 517 struct buffer_head *first_bh, *bh; 518 518 u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >> 519 519 sdp->sd_sb.sb_bsize_shift;
+1 -1
fs/gfs2/meta_io.h
··· 43 43 if (mapping->a_ops == &gfs2_meta_aops) { 44 44 struct gfs2_glock_aspace *gla = 45 45 container_of(mapping, struct gfs2_glock_aspace, mapping); 46 - return gla->glock.gl_name.ln_sbd; 46 + return glock_sbd(&gla->glock); 47 47 } else 48 48 return inode->i_sb->s_fs_info; 49 49 }
+1 -1
fs/gfs2/ops_fstype.c
··· 1276 1276 1277 1277 if (error) { 1278 1278 gfs2_freeze_unlock(sdp); 1279 - gfs2_destroy_threads(sdp); 1280 1279 fs_err(sdp, "can't make FS RW: %d\n", error); 1281 1280 goto fail_per_node; 1282 1281 } ··· 1285 1286 1286 1287 fail_per_node: 1287 1288 init_per_node(sdp, UNDO); 1289 + gfs2_destroy_threads(sdp); 1288 1290 fail_inodes: 1289 1291 init_inodes(sdp, UNDO); 1290 1292 fail_sb:
+3 -2
fs/gfs2/quota.c
··· 334 334 lockref_mark_dead(&qd->qd_lockref); 335 335 spin_unlock(&qd->qd_lockref.lock); 336 336 337 + list_lru_del_obj(&gfs2_qd_lru, &qd->qd_lru); 337 338 gfs2_qd_dispose(qd); 338 339 return; 339 340 } ··· 979 978 gfs2_glock_dq_uninit(&ghs[qx]); 980 979 inode_unlock(&ip->i_inode); 981 980 kfree(ghs); 982 - gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, 981 + gfs2_log_flush(glock_sbd(ip->i_gl), ip->i_gl, 983 982 GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC); 984 983 if (!error) { 985 984 for (x = 0; x < num_qd; x++) { ··· 1028 1027 struct gfs2_holder i_gh; 1029 1028 int error; 1030 1029 1031 - gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd); 1030 + gfs2_assert_warn(sdp, sdp == glock_sbd(qd->qd_gl)); 1032 1031 restart: 1033 1032 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); 1034 1033 if (error)
+1 -1
fs/gfs2/rgrp.c
··· 1923 1923 static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops) 1924 1924 { 1925 1925 const struct gfs2_glock *gl = rgd->rd_gl; 1926 - const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 1926 + const struct gfs2_sbd *sdp = glock_sbd(gl); 1927 1927 struct gfs2_lkstats *st; 1928 1928 u64 r_dcount, l_dcount; 1929 1929 u64 l_srttb, a_srttb = 0;
+3 -1
fs/gfs2/super.c
··· 147 147 } 148 148 149 149 error = gfs2_quota_init(sdp); 150 - if (!error && gfs2_withdrawn(sdp)) 150 + if (!error && gfs2_withdrawn(sdp)) { 151 + gfs2_quota_cleanup(sdp); 151 152 error = -EIO; 153 + } 152 154 if (!error) 153 155 set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); 154 156 return error;
+25 -25
fs/gfs2/trace_gfs2.h
··· 52 52 {(1UL << GLF_DEMOTE_IN_PROGRESS), "p" }, \ 53 53 {(1UL << GLF_DIRTY), "y" }, \ 54 54 {(1UL << GLF_LFLUSH), "f" }, \ 55 - {(1UL << GLF_PENDING_REPLY), "R" }, \ 55 + {(1UL << GLF_MAY_CANCEL), "c" }, \ 56 56 {(1UL << GLF_HAVE_REPLY), "r" }, \ 57 57 {(1UL << GLF_INITIAL), "a" }, \ 58 58 {(1UL << GLF_HAVE_FROZEN_REPLY), "F" }, \ ··· 111 111 ), 112 112 113 113 TP_fast_assign( 114 - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 115 - __entry->glnum = gl->gl_name.ln_number; 116 - __entry->gltype = gl->gl_name.ln_type; 114 + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; 115 + __entry->glnum = glock_number(gl); 116 + __entry->gltype = glock_type(gl); 117 117 __entry->cur_state = glock_trace_state(gl->gl_state); 118 118 __entry->new_state = glock_trace_state(new_state); 119 119 __entry->tgt_state = glock_trace_state(gl->gl_target); ··· 147 147 ), 148 148 149 149 TP_fast_assign( 150 - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 151 - __entry->gltype = gl->gl_name.ln_type; 152 - __entry->glnum = gl->gl_name.ln_number; 150 + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; 151 + __entry->gltype = glock_type(gl); 152 + __entry->glnum = glock_number(gl); 153 153 __entry->cur_state = glock_trace_state(gl->gl_state); 154 154 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); 155 155 ), ··· 181 181 ), 182 182 183 183 TP_fast_assign( 184 - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 185 - __entry->gltype = gl->gl_name.ln_type; 186 - __entry->glnum = gl->gl_name.ln_number; 184 + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; 185 + __entry->gltype = glock_type(gl); 186 + __entry->glnum = glock_number(gl); 187 187 __entry->cur_state = glock_trace_state(gl->gl_state); 188 188 __entry->dmt_state = glock_trace_state(gl->gl_demote_state); 189 189 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); ··· 215 215 ), 216 216 217 217 TP_fast_assign( 218 - __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; 219 - __entry->glnum = gh->gh_gl->gl_name.ln_number; 220 - __entry->gltype = gh->gh_gl->gl_name.ln_type; 218 + __entry->dev = glock_sbd(gh->gh_gl)->sd_vfs->s_dev; 219 + __entry->glnum = glock_number(gh->gh_gl); 220 + __entry->gltype = glock_type(gh->gh_gl); 221 221 __entry->state = glock_trace_state(gh->gh_state); 222 222 ), 223 223 ··· 243 243 ), 244 244 245 245 TP_fast_assign( 246 - __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev; 247 - __entry->glnum = gh->gh_gl->gl_name.ln_number; 248 - __entry->gltype = gh->gh_gl->gl_name.ln_type; 246 + __entry->dev = glock_sbd(gh->gh_gl)->sd_vfs->s_dev; 247 + __entry->glnum = glock_number(gh->gh_gl); 248 + __entry->gltype = glock_type(gh->gh_gl); 249 249 __entry->queue = queue; 250 250 __entry->state = glock_trace_state(gh->gh_state); 251 251 ), ··· 282 282 ), 283 283 284 284 TP_fast_assign( 285 - __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 286 - __entry->glnum = gl->gl_name.ln_number; 287 - __entry->gltype = gl->gl_name.ln_type; 285 + __entry->dev = glock_sbd(gl)->sd_vfs->s_dev; 286 + __entry->glnum = glock_number(gl); 287 + __entry->gltype = glock_type(gl); 288 288 __entry->status = gl->gl_lksb.sb_status; 289 289 __entry->flags = gl->gl_lksb.sb_flags; 290 290 __entry->tdiff = tdiff; ··· 337 337 ), 338 338 339 339 TP_fast_assign( 340 - __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev; 340 + __entry->dev = glock_sbd(bd->bd_gl)->sd_vfs->s_dev; 341 341 __entry->pin = pin; 342 342 __entry->len = bd->bd_bh->b_size; 343 343 __entry->block = bd->bd_bh->b_blocknr; 344 - __entry->ino = bd->bd_gl->gl_name.ln_number; 344 + __entry->ino = glock_number(bd->bd_gl); 345 345 ), 346 346 347 347 TP_printk("%u,%u log %s %llu/%lu inode %llu", ··· 458 458 ), 459 459 460 460 TP_fast_assign( 461 - __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 461 + __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev; 462 462 __entry->lblock = lblock; 463 463 __entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0; 464 464 __entry->inum = ip->i_no_addr; ··· 494 494 ), 495 495 496 496 TP_fast_assign( 497 - __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 497 + __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev; 498 498 __entry->inum = ip->i_no_addr; 499 499 __entry->pos = pos; 500 500 __entry->length = length; ··· 526 526 ), 527 527 528 528 TP_fast_assign( 529 - __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev; 529 + __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev; 530 530 __entry->inum = ip->i_no_addr; 531 531 __entry->offset = iomap->offset; 532 532 __entry->length = iomap->length; ··· 568 568 ), 569 569 570 570 TP_fast_assign( 571 - __entry->dev = rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev; 571 + __entry->dev = glock_sbd(rgd->rd_gl)->sd_vfs->s_dev; 572 572 __entry->start = block; 573 573 __entry->inum = ip->i_no_addr; 574 574 __entry->len = len;
+2 -2
fs/gfs2/trans.c
··· 197 197 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) 198 198 { 199 199 struct gfs2_trans *tr = current->journal_info; 200 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 200 + struct gfs2_sbd *sdp = glock_sbd(gl); 201 201 struct gfs2_bufdata *bd; 202 202 203 203 lock_buffer(bh); ··· 255 255 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) 256 256 { 257 257 258 - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; 258 + struct gfs2_sbd *sdp = glock_sbd(gl); 259 259 struct super_block *sb = sdp->sd_vfs; 260 260 struct gfs2_bufdata *bd; 261 261 struct gfs2_meta_header *mh;