Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

gfs2: Get rid of gfs2_log_[un]lock helpers

These two helpers only hide the locking operation; they do not make
the code more readable.

Created with:

sed -i -e 's:gfs2_log_unlock(sdp):spin_unlock(\&sdp->sd_log_lock):' \
-e 's:gfs2_log_lock(sdp):spin_lock(\&sdp->sd_log_lock):'

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>

+37 -61
+5 -5
fs/gfs2/aops.c
··· 583 583 struct gfs2_bufdata *bd; 584 584 585 585 lock_buffer(bh); 586 - gfs2_log_lock(sdp); 586 + spin_lock(&sdp->sd_log_lock); 587 587 clear_buffer_dirty(bh); 588 588 bd = bh->b_private; 589 589 if (bd) { ··· 599 599 clear_buffer_mapped(bh); 600 600 clear_buffer_req(bh); 601 601 clear_buffer_new(bh); 602 - gfs2_log_unlock(sdp); 602 + spin_unlock(&sdp->sd_log_lock); 603 603 unlock_buffer(bh); 604 604 } 605 605 ··· 667 667 * again. 668 668 */ 669 669 670 - gfs2_log_lock(sdp); 670 + spin_lock(&sdp->sd_log_lock); 671 671 bh = head; 672 672 do { 673 673 if (atomic_read(&bh->b_count)) ··· 699 699 700 700 bh = bh->b_this_page; 701 701 } while (bh != head); 702 - gfs2_log_unlock(sdp); 702 + spin_unlock(&sdp->sd_log_lock); 703 703 704 704 return try_to_free_buffers(folio); 705 705 706 706 cannot_release: 707 - gfs2_log_unlock(sdp); 707 + spin_unlock(&sdp->sd_log_lock); 708 708 return false; 709 709 } 710 710
+4 -4
fs/gfs2/glops.c
··· 64 64 struct buffer_head *bh; 65 65 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); 66 66 67 - gfs2_log_lock(sdp); 67 + spin_lock(&sdp->sd_log_lock); 68 68 spin_lock(&sdp->sd_ail_lock); 69 69 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 70 70 if (nr_revokes == 0) ··· 80 80 } 81 81 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 82 82 spin_unlock(&sdp->sd_ail_lock); 83 - gfs2_log_unlock(sdp); 83 + spin_unlock(&sdp->sd_log_lock); 84 84 } 85 85 86 86 ··· 109 109 * If none of these conditions are true, our revokes are all 110 110 * flushed and we can return. 111 111 */ 112 - gfs2_log_lock(sdp); 112 + spin_lock(&sdp->sd_log_lock); 113 113 have_revokes = !list_empty(&sdp->sd_log_revokes); 114 114 log_in_flight = atomic_read(&sdp->sd_log_in_flight); 115 - gfs2_log_unlock(sdp); 115 + spin_unlock(&sdp->sd_log_lock); 116 116 if (have_revokes) 117 117 goto flush; 118 118 if (log_in_flight)
+6 -6
fs/gfs2/log.c
··· 800 800 /* number of revokes we still have room for */ 801 801 unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available); 802 802 803 - gfs2_log_lock(sdp); 803 + spin_lock(&sdp->sd_log_lock); 804 804 gfs2_ail1_empty(sdp, max_revokes); 805 - gfs2_log_unlock(sdp); 805 + spin_unlock(&sdp->sd_log_lock); 806 806 } 807 807 808 808 /** ··· 1110 1110 goto out_withdraw; 1111 1111 lops_after_commit(sdp, tr); 1112 1112 1113 - gfs2_log_lock(sdp); 1113 + spin_lock(&sdp->sd_log_lock); 1114 1114 sdp->sd_log_blks_reserved = 0; 1115 1115 1116 1116 spin_lock(&sdp->sd_ail_lock); ··· 1119 1119 tr = NULL; 1120 1120 } 1121 1121 spin_unlock(&sdp->sd_ail_lock); 1122 - gfs2_log_unlock(sdp); 1122 + spin_unlock(&sdp->sd_log_lock); 1123 1123 1124 1124 if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) { 1125 1125 if (!sdp->sd_log_idle) { ··· 1200 1200 unsigned int unused; 1201 1201 unsigned int maxres; 1202 1202 1203 - gfs2_log_lock(sdp); 1203 + spin_lock(&sdp->sd_log_lock); 1204 1204 1205 1205 if (sdp->sd_log_tr) { 1206 1206 gfs2_merge_trans(sdp, tr); ··· 1218 1218 gfs2_log_release(sdp, unused); 1219 1219 sdp->sd_log_blks_reserved = reserved; 1220 1220 1221 - gfs2_log_unlock(sdp); 1221 + spin_unlock(&sdp->sd_log_lock); 1222 1222 } 1223 1223 1224 1224 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
-24
fs/gfs2/log.h
··· 20 20 */ 21 21 #define GFS2_LOG_FLUSH_MIN_BLOCKS 4 22 22 23 - /** 24 - * gfs2_log_lock - acquire the right to mess with the log manager 25 - * @sdp: the filesystem 26 - * 27 - */ 28 - 29 - static inline void gfs2_log_lock(struct gfs2_sbd *sdp) 30 - __acquires(&sdp->sd_log_lock) 31 - { 32 - spin_lock(&sdp->sd_log_lock); 33 - } 34 - 35 - /** 36 - * gfs2_log_unlock - release the right to mess with the log manager 37 - * @sdp: the filesystem 38 - * 39 - */ 40 - 41 - static inline void gfs2_log_unlock(struct gfs2_sbd *sdp) 42 - __releases(&sdp->sd_log_lock) 43 - { 44 - spin_unlock(&sdp->sd_log_lock); 45 - } 46 - 47 23 static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip) 48 24 { 49 25 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+8 -8
fs/gfs2/lops.c
··· 648 648 unsigned n; 649 649 __be64 *ptr; 650 650 651 - gfs2_log_lock(sdp); 651 + spin_lock(&sdp->sd_log_lock); 652 652 list_sort(NULL, blist, blocknr_cmp); 653 653 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); 654 654 while(total) { 655 655 num = total; 656 656 if (total > limit) 657 657 num = limit; 658 - gfs2_log_unlock(sdp); 658 + spin_unlock(&sdp->sd_log_lock); 659 659 page = gfs2_get_log_desc(sdp, 660 660 is_databuf ? GFS2_LOG_DESC_JDATA : 661 661 GFS2_LOG_DESC_METADATA, num + 1, num); 662 662 ld = page_address(page); 663 - gfs2_log_lock(sdp); 663 + spin_lock(&sdp->sd_log_lock); 664 664 ptr = (__be64 *)(ld + 1); 665 665 666 666 n = 0; ··· 674 674 break; 675 675 } 676 676 677 - gfs2_log_unlock(sdp); 677 + spin_unlock(&sdp->sd_log_lock); 678 678 gfs2_log_write_page(sdp, page); 679 - gfs2_log_lock(sdp); 679 + spin_lock(&sdp->sd_log_lock); 680 680 681 681 n = 0; 682 682 list_for_each_entry_continue(bd2, blist, bd_list) { 683 683 get_bh(bd2->bd_bh); 684 - gfs2_log_unlock(sdp); 684 + spin_unlock(&sdp->sd_log_lock); 685 685 lock_buffer(bd2->bd_bh); 686 686 687 687 if (buffer_escaped(bd2->bd_bh)) { ··· 698 698 } else { 699 699 gfs2_log_write_bh(sdp, bd2->bd_bh); 700 700 } 701 - gfs2_log_lock(sdp); 701 + spin_lock(&sdp->sd_log_lock); 702 702 if (++n >= num) 703 703 break; 704 704 } ··· 706 706 BUG_ON(total < num); 707 707 total -= num; 708 708 } 709 - gfs2_log_unlock(sdp); 709 + spin_unlock(&sdp->sd_log_lock); 710 710 } 711 711 712 712 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+4 -4
fs/gfs2/meta_io.c
··· 391 391 struct buffer_head *bh; 392 392 u64 end = bstart + blen; 393 393 394 - gfs2_log_lock(sdp); 394 + spin_lock(&sdp->sd_log_lock); 395 395 spin_lock(&sdp->sd_ail_lock); 396 396 list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) { 397 397 list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list, ··· 404 404 } 405 405 } 406 406 spin_unlock(&sdp->sd_ail_lock); 407 - gfs2_log_unlock(sdp); 407 + spin_unlock(&sdp->sd_log_lock); 408 408 } 409 409 410 410 static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno) ··· 456 456 } 457 457 if (bh) { 458 458 lock_buffer(bh); 459 - gfs2_log_lock(sdp); 459 + spin_lock(&sdp->sd_log_lock); 460 460 spin_lock(&sdp->sd_ail_lock); 461 461 gfs2_remove_from_journal(bh, ty); 462 462 spin_unlock(&sdp->sd_ail_lock); 463 - gfs2_log_unlock(sdp); 463 + spin_unlock(&sdp->sd_log_lock); 464 464 unlock_buffer(bh); 465 465 brelse(bh); 466 466 }
+10 -10
fs/gfs2/trans.c
··· 205 205 set_bit(TR_TOUCHED, &tr->tr_flags); 206 206 goto out; 207 207 } 208 - gfs2_log_lock(sdp); 208 + spin_lock(&sdp->sd_log_lock); 209 209 bd = bh->b_private; 210 210 if (bd == NULL) { 211 - gfs2_log_unlock(sdp); 211 + spin_unlock(&sdp->sd_log_lock); 212 212 unlock_buffer(bh); 213 213 if (bh->b_private == NULL) 214 214 bd = gfs2_alloc_bufdata(gl, bh); 215 215 else 216 216 bd = bh->b_private; 217 217 lock_buffer(bh); 218 - gfs2_log_lock(sdp); 218 + spin_lock(&sdp->sd_log_lock); 219 219 } 220 220 gfs2_assert(sdp, bd->bd_gl == gl); 221 221 set_bit(TR_TOUCHED, &tr->tr_flags); ··· 226 226 tr->tr_num_databuf_new++; 227 227 list_add_tail(&bd->bd_list, &tr->tr_databuf); 228 228 } 229 - gfs2_log_unlock(sdp); 229 + spin_unlock(&sdp->sd_log_lock); 230 230 out: 231 231 unlock_buffer(bh); 232 232 } ··· 266 266 set_bit(TR_TOUCHED, &tr->tr_flags); 267 267 goto out; 268 268 } 269 - gfs2_log_lock(sdp); 269 + spin_lock(&sdp->sd_log_lock); 270 270 bd = bh->b_private; 271 271 if (bd == NULL) { 272 - gfs2_log_unlock(sdp); 272 + spin_unlock(&sdp->sd_log_lock); 273 273 unlock_buffer(bh); 274 274 folio_lock(bh->b_folio); 275 275 if (bh->b_private == NULL) ··· 278 278 bd = bh->b_private; 279 279 folio_unlock(bh->b_folio); 280 280 lock_buffer(bh); 281 - gfs2_log_lock(sdp); 281 + spin_lock(&sdp->sd_log_lock); 282 282 } 283 283 gfs2_assert(sdp, bd->bd_gl == gl); 284 284 set_bit(TR_TOUCHED, &tr->tr_flags); ··· 309 309 list_add(&bd->bd_list, &tr->tr_buf); 310 310 tr->tr_num_buf_new++; 311 311 out_unlock: 312 - gfs2_log_unlock(sdp); 312 + spin_unlock(&sdp->sd_log_lock); 313 313 out: 314 314 unlock_buffer(bh); 315 315 } ··· 329 329 struct gfs2_bufdata *bd, *tmp; 330 330 unsigned int n = len; 331 331 332 - gfs2_log_lock(sdp); 332 + spin_lock(&sdp->sd_log_lock); 333 333 list_for_each_entry_safe(bd, tmp, &sdp->sd_log_revokes, bd_list) { 334 334 if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) { 335 335 list_del_init(&bd->bd_list); ··· 343 343 break; 344 344 } 345 345 } 346 - gfs2_log_unlock(sdp); 346 + spin_unlock(&sdp->sd_log_lock); 347 347 } 348 348 349 349 void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)