Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'fsnotify_for_v7.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull fsnotify fixes from Jan Kara:
"Three fixes for fsnotify / fanotify"

* tag 'fsnotify_for_v7.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
fsnotify: fix inode reference leak in fsnotify_recalc_mask()
fanotify: Fix spelling mistake "enforecement" -> "enforcement"
fanotify: fix false positive on permission events

+50 -12
+1 -1
fs/notify/fanotify/fanotify.c
··· 457 457 /* 458 458 * Unlike file_handle, type and len of struct fanotify_fh are u8. 459 459 * Traditionally, filesystem return handle_type < 0xff, but there 460 - * is no enforecement for that in vfs. 460 + * is no enforcement for that in vfs. 461 461 */ 462 462 BUILD_BUG_ON(MAX_HANDLE_SZ > 0xff || FILEID_INVALID > 0xff); 463 463 if (type <= 0 || type >= FILEID_INVALID || fh_len != dwords << 2)
+1 -1
fs/notify/fsnotify.c
··· 388 388 return hlist_entry_safe(node, struct fsnotify_mark, obj_list); 389 389 } 390 390 391 - static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) 391 + struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark) 392 392 { 393 393 struct hlist_node *node = NULL; 394 394
+47 -10
fs/notify/mark.c
··· 238 238 return inode; 239 239 } 240 240 241 - static void *__fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) 241 + /* 242 + * Calculate mask of events for a list of marks. 243 + * 244 + * Return true if any of the attached marks want to hold an inode reference. 245 + */ 246 + static bool __fsnotify_recalc_mask(struct fsnotify_mark_connector *conn) 242 247 { 243 248 u32 new_mask = 0; 244 249 bool want_iref = false; ··· 266 261 * confusing readers not holding conn->lock with partial updates. 267 262 */ 268 263 WRITE_ONCE(*fsnotify_conn_mask_p(conn), new_mask); 264 + 265 + return want_iref; 266 + } 267 + 268 + /* 269 + * Calculate mask of events for a list of marks after attach/modify mark 270 + * and get an inode reference for the connector if needed. 271 + * 272 + * A concurrent add of evictable mark and detach of non-evictable mark can 273 + * lead to __fsnotify_recalc_mask() returning false want_iref, but in this 274 + * case we defer clearing iref to fsnotify_recalc_mask_clear_iref() called 275 + * from fsnotify_put_mark(). 276 + */ 277 + static void fsnotify_recalc_mask_set_iref(struct fsnotify_mark_connector *conn) 278 + { 279 + bool has_iref = conn->flags & FSNOTIFY_CONN_FLAG_HAS_IREF; 280 + bool want_iref = __fsnotify_recalc_mask(conn) || has_iref; 281 + 282 + (void) fsnotify_update_iref(conn, want_iref); 283 + } 284 + 285 + /* 286 + * Calculate mask of events for a list of marks after detach mark 287 + * and return the inode object if its reference is no longer needed. 288 + */ 289 + static void *fsnotify_recalc_mask_clear_iref(struct fsnotify_mark_connector *conn) 290 + { 291 + bool want_iref = __fsnotify_recalc_mask(conn); 269 292 270 293 return fsnotify_update_iref(conn, want_iref); 271 294 } ··· 331 298 332 299 spin_lock(&conn->lock); 333 300 update_children = !fsnotify_conn_watches_children(conn); 334 - __fsnotify_recalc_mask(conn); 301 + fsnotify_recalc_mask_set_iref(conn); 335 302 update_children &= fsnotify_conn_watches_children(conn); 336 303 spin_unlock(&conn->lock); 337 304 /* ··· 452 419 /* Update watched objects after detaching mark */ 453 420 if (sb) 454 421 fsnotify_update_sb_watchers(sb, conn); 455 - objp = __fsnotify_recalc_mask(conn); 422 + objp = fsnotify_recalc_mask_clear_iref(conn); 456 423 type = conn->type; 457 424 } 458 425 WRITE_ONCE(mark->connector, NULL); ··· 490 457 */ 491 458 static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark) 492 459 { 493 - if (!mark) 494 - return true; 495 - 496 460 if (refcount_inc_not_zero(&mark->refcnt)) { 497 461 spin_lock(&mark->lock); 498 462 if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) { ··· 530 500 int type; 531 501 532 502 fsnotify_foreach_iter_type(type) { 503 + struct fsnotify_mark *mark = iter_info->marks[type]; 504 + 533 505 /* This can fail if mark is being removed */ 534 - if (!fsnotify_get_mark_safe(iter_info->marks[type])) { 535 - __release(&fsnotify_mark_srcu); 536 - goto fail; 506 + while (mark && !fsnotify_get_mark_safe(mark)) { 507 + if (mark->group == iter_info->current_group) { 508 + __release(&fsnotify_mark_srcu); 509 + goto fail; 510 + } 511 + /* This is a mark in an unrelated group, skip */ 512 + mark = fsnotify_next_mark(mark); 513 + iter_info->marks[type] = mark; 537 514 } 538 515 } 539 516 540 517 /* 541 - * Now that both marks are pinned by refcount in the inode / vfsmount 518 + * Now that all marks are pinned by refcount in the inode / vfsmount / etc 542 519 * lists, we can drop SRCU lock, and safely resume the list iteration 543 520 * once userspace returns. 544 521 */
+1
include/linux/fsnotify_backend.h
··· 915 915 unsigned int obj_type); 916 916 extern void fsnotify_get_mark(struct fsnotify_mark *mark); 917 917 extern void fsnotify_put_mark(struct fsnotify_mark *mark); 918 + struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark); 918 919 extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); 919 920 extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); 920 921