Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kvack.org/~bcrl/aio-next

Pull AIO leak fixes from Ben LaHaise:
"I've put these two patches plus Linus's change through a round of
tests, and it passes millions of iterations of the aio numa
migratepage test, as well as a number of repetitions of a few simple
read and write tests.

The first patch fixes the memory leak Kent introduced, while the
second patch makes aio_migratepage() much more paranoid and robust"

* git://git.kvack.org/~bcrl/aio-next:
aio/migratepages: make aio migrate pages sane
aio: fix kioctx leak introduced by "aio: Fix a trinity splat"

+55 -16
+46 -9
fs/aio.c
··· 244 244 int i; 245 245 246 246 for (i = 0; i < ctx->nr_pages; i++) { 247 + struct page *page; 247 248 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, 248 249 page_count(ctx->ring_pages[i])); 249 - put_page(ctx->ring_pages[i]); 250 + page = ctx->ring_pages[i]; 251 + if (!page) 252 + continue; 253 + ctx->ring_pages[i] = NULL; 254 + put_page(page); 250 255 } 251 256 252 257 put_aio_ring_file(ctx); ··· 285 280 unsigned long flags; 286 281 int rc; 287 282 283 + rc = 0; 284 + 285 + /* Make sure the old page hasn't already been changed */ 286 + spin_lock(&mapping->private_lock); 287 + ctx = mapping->private_data; 288 + if (ctx) { 289 + pgoff_t idx; 290 + spin_lock_irqsave(&ctx->completion_lock, flags); 291 + idx = old->index; 292 + if (idx < (pgoff_t)ctx->nr_pages) { 293 + if (ctx->ring_pages[idx] != old) 294 + rc = -EAGAIN; 295 + } else 296 + rc = -EINVAL; 297 + spin_unlock_irqrestore(&ctx->completion_lock, flags); 298 + } else 299 + rc = -EINVAL; 300 + spin_unlock(&mapping->private_lock); 301 + 302 + if (rc != 0) 303 + return rc; 304 + 288 305 /* Writeback must be complete */ 289 306 BUG_ON(PageWriteback(old)); 290 - put_page(old); 307 + get_page(new); 291 308 292 - rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); 309 + rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); 293 310 if (rc != MIGRATEPAGE_SUCCESS) { 294 - get_page(old); 311 + put_page(new); 295 312 return rc; 296 313 } 297 - 298 - get_page(new); 299 314 300 315 /* We can potentially race against kioctx teardown here. Use the 301 316 * address_space's private data lock to protect the mapping's ··· 328 303 spin_lock_irqsave(&ctx->completion_lock, flags); 329 304 migrate_page_copy(new, old); 330 305 idx = old->index; 331 - if (idx < (pgoff_t)ctx->nr_pages) 332 - ctx->ring_pages[idx] = new; 306 + if (idx < (pgoff_t)ctx->nr_pages) { 307 + /* And only do the move if things haven't changed */ 308 + if (ctx->ring_pages[idx] == old) 309 + ctx->ring_pages[idx] = new; 310 + else 311 + rc = -EAGAIN; 312 + } else 313 + rc = -EINVAL; 333 314 spin_unlock_irqrestore(&ctx->completion_lock, flags); 334 315 } else 335 316 rc = -EBUSY; 336 317 spin_unlock(&mapping->private_lock); 318 + 319 + if (rc == MIGRATEPAGE_SUCCESS) 320 + put_page(old); 321 + else 322 + put_page(new); 337 323 338 324 return rc; 339 325 } ··· 676 640 aio_nr += ctx->max_reqs; 677 641 spin_unlock(&aio_nr_lock); 678 642 679 - percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 643 + percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ 644 + percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ 680 645 681 646 err = ioctx_add_table(ctx, mm); 682 647 if (err)
+2 -1
include/linux/migrate.h
··· 55 55 struct page *newpage, struct page *page); 56 56 extern int migrate_page_move_mapping(struct address_space *mapping, 57 57 struct page *newpage, struct page *page, 58 - struct buffer_head *head, enum migrate_mode mode); 58 + struct buffer_head *head, enum migrate_mode mode, 59 + int extra_count); 59 60 #else 60 61 61 62 static inline void putback_lru_pages(struct list_head *l) {}
+7 -6
mm/migrate.c
··· 317 317 */ 318 318 int migrate_page_move_mapping(struct address_space *mapping, 319 319 struct page *newpage, struct page *page, 320 - struct buffer_head *head, enum migrate_mode mode) 320 + struct buffer_head *head, enum migrate_mode mode, 321 + int extra_count) 321 322 { 322 - int expected_count = 0; 323 + int expected_count = 1 + extra_count; 323 324 void **pslot; 324 325 325 326 if (!mapping) { 326 327 /* Anonymous page without mapping */ 327 - if (page_count(page) != 1) 328 + if (page_count(page) != expected_count) 328 329 return -EAGAIN; 329 330 return MIGRATEPAGE_SUCCESS; 330 331 } ··· 335 334 pslot = radix_tree_lookup_slot(&mapping->page_tree, 336 335 page_index(page)); 337 336 338 - expected_count = 2 + page_has_private(page); 337 + expected_count += 1 + page_has_private(page); 339 338 if (page_count(page) != expected_count || 340 339 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { 341 340 spin_unlock_irq(&mapping->tree_lock); ··· 585 584 586 585 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ 587 586 588 - rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode); 587 + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); 589 588 590 589 if (rc != MIGRATEPAGE_SUCCESS) 591 590 return rc; ··· 612 611 613 612 head = page_buffers(page); 614 613 615 - rc = migrate_page_move_mapping(mapping, newpage, page, head, mode); 614 + rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); 616 615 617 616 if (rc != MIGRATEPAGE_SUCCESS) 618 617 return rc;