@@ -160,15 +160,28 @@ static int __must_check submit_one_bio(s
return blk_status_to_errno(ret);
}
-static void flush_write_bio(struct extent_page_data *epd)
+/*
+ * Submit bio from extent page data via submit_one_bio
+ *
+ * Return 0 if everything is OK.
+ * Return <0 for error.
+ */
+static int __must_check flush_write_bio(struct extent_page_data *epd)
{
- if (epd->bio) {
- int ret;
+ int ret = 0;
+ if (epd->bio) {
ret = submit_one_bio(epd->bio, 0, 0);
- BUG_ON(ret < 0); /* -ENOMEM */
+ /*
+ * Clean up of epd->bio is handled by its endio function.
+ * And endio is either triggered by successful bio execution
+ * or the error handler of submit bio hook.
+ * So at this point, no matter what happened, we don't need
+ * to clean up epd->bio.
+ */
epd->bio = NULL;
}
+ return ret;
}
int __init extent_io_init(void)
@@ -3538,7 +3551,8 @@ lock_extent_buffer_for_io(struct extent_
if (!btrfs_try_tree_write_lock(eb)) {
flush = 1;
- flush_write_bio(epd);
+ ret = flush_write_bio(epd);
+ BUG_ON(ret < 0);
btrfs_tree_lock(eb);
}
@@ -3547,7 +3561,8 @@ lock_extent_buffer_for_io(struct extent_
if (!epd->sync_io)
return 0;
if (!flush) {
- flush_write_bio(epd);
+ ret = flush_write_bio(epd);
+ BUG_ON(ret < 0);
flush = 1;
}
while (1) {
@@ -3588,7 +3603,8 @@ lock_extent_buffer_for_io(struct extent_
if (!trylock_page(p)) {
if (!flush) {
- flush_write_bio(epd);
+ ret = flush_write_bio(epd);
+ BUG_ON(ret < 0);
flush = 1;
}
lock_page(p);
@@ -3779,6 +3795,7 @@ int btree_write_cache_pages(struct addre
.sync_io = wbc->sync_mode == WB_SYNC_ALL,
};
int ret = 0;
+ int flush_ret;
int done = 0;
int nr_to_write_done = 0;
struct pagevec pvec;
@@ -3878,7 +3895,8 @@ retry:
index = 0;
goto retry;
}
- flush_write_bio(&epd);
+ flush_ret = flush_write_bio(&epd);
+ BUG_ON(flush_ret < 0);
return ret;
}
@@ -3975,7 +3993,8 @@ retry:
* tmpfs file mapping
*/
if (!trylock_page(page)) {
- flush_write_bio(epd);
+ ret = flush_write_bio(epd);
+ BUG_ON(ret < 0);
lock_page(page);
}
@@ -3985,8 +4004,10 @@ retry:
}
if (wbc->sync_mode != WB_SYNC_NONE) {
- if (PageWriteback(page))
- flush_write_bio(epd);
+ if (PageWriteback(page)) {
+ ret = flush_write_bio(epd);
+ BUG_ON(ret < 0);
+ }
wait_on_page_writeback(page);
}
@@ -4045,6 +4066,7 @@ retry:
int extent_write_full_page(struct page *page, struct writeback_control *wbc)
{
int ret;
+ int flush_ret;
struct extent_page_data epd = {
.bio = NULL,
.tree = &BTRFS_I(page->mapping->host)->io_tree,
@@ -4054,7 +4076,8 @@ int extent_write_full_page(struct page *
ret = __extent_writepage(page, wbc, &epd);
- flush_write_bio(&epd);
+ flush_ret = flush_write_bio(&epd);
+ BUG_ON(flush_ret < 0);
return ret;
}
@@ -4062,6 +4085,7 @@ int extent_write_locked_range(struct ino
int mode)
{
int ret = 0;
+ int flush_ret;
struct address_space *mapping = inode->i_mapping;
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *page;
@@ -4096,7 +4120,8 @@ int extent_write_locked_range(struct ino
start += PAGE_SIZE;
}
- flush_write_bio(&epd);
+ flush_ret = flush_write_bio(&epd);
+ BUG_ON(flush_ret < 0);
return ret;
}
@@ -4104,6 +4129,7 @@ int extent_writepages(struct address_spa
struct writeback_control *wbc)
{
int ret = 0;
+ int flush_ret;
struct extent_page_data epd = {
.bio = NULL,
.tree = &BTRFS_I(mapping->host)->io_tree,
@@ -4112,7 +4138,8 @@ int extent_writepages(struct address_spa
};
ret = extent_write_cache_pages(mapping, wbc, &epd);
- flush_write_bio(&epd);
+ flush_ret = flush_write_bio(&epd);
+ BUG_ON(flush_ret < 0);
return ret;
}