mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-01 00:58:39 +02:00 
			
		
		
		
	btrfs: locking: remove all the blocking helpers
Now that we're using a rw_semaphore we no longer need to indicate if a lock is blocking or not, nor do we need to flip the entire path from blocking to spinning. Remove these helpers and all the places they are called. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
		
							parent
							
								
									2ae0c2d80d
								
							
						
					
					
						commit
						ac5887c8e0
					
				
					 15 changed files with 30 additions and 219 deletions
				
			
		|  | @ -1341,14 +1341,12 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, | ||||||
| 					goto out; | 					goto out; | ||||||
| 				} | 				} | ||||||
| 
 | 
 | ||||||
| 				if (!path->skip_locking) { | 				if (!path->skip_locking) | ||||||
| 					btrfs_tree_read_lock(eb); | 					btrfs_tree_read_lock(eb); | ||||||
| 					btrfs_set_lock_blocking_read(eb); |  | ||||||
| 				} |  | ||||||
| 				ret = find_extent_in_eb(eb, bytenr, | 				ret = find_extent_in_eb(eb, bytenr, | ||||||
| 							*extent_item_pos, &eie, ignore_offset); | 							*extent_item_pos, &eie, ignore_offset); | ||||||
| 				if (!path->skip_locking) | 				if (!path->skip_locking) | ||||||
| 					btrfs_tree_read_unlock_blocking(eb); | 					btrfs_tree_read_unlock(eb); | ||||||
| 				free_extent_buffer(eb); | 				free_extent_buffer(eb); | ||||||
| 				if (ret < 0) | 				if (ret < 0) | ||||||
| 					goto out; | 					goto out; | ||||||
|  | @ -1685,7 +1683,7 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | ||||||
| 					   name_off, name_len); | 					   name_off, name_len); | ||||||
| 		if (eb != eb_in) { | 		if (eb != eb_in) { | ||||||
| 			if (!path->skip_locking) | 			if (!path->skip_locking) | ||||||
| 				btrfs_tree_read_unlock_blocking(eb); | 				btrfs_tree_read_unlock(eb); | ||||||
| 			free_extent_buffer(eb); | 			free_extent_buffer(eb); | ||||||
| 		} | 		} | ||||||
| 		ret = btrfs_find_item(fs_root, path, parent, 0, | 		ret = btrfs_find_item(fs_root, path, parent, 0, | ||||||
|  | @ -1705,8 +1703,6 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, | ||||||
| 		eb = path->nodes[0]; | 		eb = path->nodes[0]; | ||||||
| 		/* make sure we can use eb after releasing the path */ | 		/* make sure we can use eb after releasing the path */ | ||||||
| 		if (eb != eb_in) { | 		if (eb != eb_in) { | ||||||
| 			if (!path->skip_locking) |  | ||||||
| 				btrfs_set_lock_blocking_read(eb); |  | ||||||
| 			path->nodes[0] = NULL; | 			path->nodes[0] = NULL; | ||||||
| 			path->locks[0] = 0; | 			path->locks[0] = 0; | ||||||
| 		} | 		} | ||||||
|  |  | ||||||
|  | @ -1278,14 +1278,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, | ||||||
| 	if (!tm) | 	if (!tm) | ||||||
| 		return eb; | 		return eb; | ||||||
| 
 | 
 | ||||||
| 	btrfs_set_path_blocking(path); |  | ||||||
| 	btrfs_set_lock_blocking_read(eb); |  | ||||||
| 
 |  | ||||||
| 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { | 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { | ||||||
| 		BUG_ON(tm->slot != 0); | 		BUG_ON(tm->slot != 0); | ||||||
| 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); | 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); | ||||||
| 		if (!eb_rewin) { | 		if (!eb_rewin) { | ||||||
| 			btrfs_tree_read_unlock_blocking(eb); | 			btrfs_tree_read_unlock(eb); | ||||||
| 			free_extent_buffer(eb); | 			free_extent_buffer(eb); | ||||||
| 			return NULL; | 			return NULL; | ||||||
| 		} | 		} | ||||||
|  | @ -1297,13 +1294,13 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, | ||||||
| 	} else { | 	} else { | ||||||
| 		eb_rewin = btrfs_clone_extent_buffer(eb); | 		eb_rewin = btrfs_clone_extent_buffer(eb); | ||||||
| 		if (!eb_rewin) { | 		if (!eb_rewin) { | ||||||
| 			btrfs_tree_read_unlock_blocking(eb); | 			btrfs_tree_read_unlock(eb); | ||||||
| 			free_extent_buffer(eb); | 			free_extent_buffer(eb); | ||||||
| 			return NULL; | 			return NULL; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	btrfs_tree_read_unlock_blocking(eb); | 	btrfs_tree_read_unlock(eb); | ||||||
| 	free_extent_buffer(eb); | 	free_extent_buffer(eb); | ||||||
| 
 | 
 | ||||||
| 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), | 	btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin), | ||||||
|  | @ -1373,9 +1370,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq) | ||||||
| 		free_extent_buffer(eb_root); | 		free_extent_buffer(eb_root); | ||||||
| 		eb = alloc_dummy_extent_buffer(fs_info, logical); | 		eb = alloc_dummy_extent_buffer(fs_info, logical); | ||||||
| 	} else { | 	} else { | ||||||
| 		btrfs_set_lock_blocking_read(eb_root); |  | ||||||
| 		eb = btrfs_clone_extent_buffer(eb_root); | 		eb = btrfs_clone_extent_buffer(eb_root); | ||||||
| 		btrfs_tree_read_unlock_blocking(eb_root); | 		btrfs_tree_read_unlock(eb_root); | ||||||
| 		free_extent_buffer(eb_root); | 		free_extent_buffer(eb_root); | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -1483,10 +1479,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, | ||||||
| 
 | 
 | ||||||
| 	search_start = buf->start & ~((u64)SZ_1G - 1); | 	search_start = buf->start & ~((u64)SZ_1G - 1); | ||||||
| 
 | 
 | ||||||
| 	if (parent) |  | ||||||
| 		btrfs_set_lock_blocking_write(parent); |  | ||||||
| 	btrfs_set_lock_blocking_write(buf); |  | ||||||
| 
 |  | ||||||
| 	/*
 | 	/*
 | ||||||
| 	 * Before CoWing this block for later modification, check if it's | 	 * Before CoWing this block for later modification, check if it's | ||||||
| 	 * the subtree root and do the delayed subtree trace if needed. | 	 * the subtree root and do the delayed subtree trace if needed. | ||||||
|  | @ -1604,8 +1596,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, | ||||||
| 	if (parent_nritems <= 1) | 	if (parent_nritems <= 1) | ||||||
| 		return 0; | 		return 0; | ||||||
| 
 | 
 | ||||||
| 	btrfs_set_lock_blocking_write(parent); |  | ||||||
| 
 |  | ||||||
| 	for (i = start_slot; i <= end_slot; i++) { | 	for (i = start_slot; i <= end_slot; i++) { | ||||||
| 		struct btrfs_key first_key; | 		struct btrfs_key first_key; | ||||||
| 		int close = 1; | 		int close = 1; | ||||||
|  | @ -1663,7 +1653,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, | ||||||
| 			search_start = last_block; | 			search_start = last_block; | ||||||
| 
 | 
 | ||||||
| 		btrfs_tree_lock(cur); | 		btrfs_tree_lock(cur); | ||||||
| 		btrfs_set_lock_blocking_write(cur); |  | ||||||
| 		err = __btrfs_cow_block(trans, root, cur, parent, i, | 		err = __btrfs_cow_block(trans, root, cur, parent, i, | ||||||
| 					&cur, search_start, | 					&cur, search_start, | ||||||
| 					min(16 * blocksize, | 					min(16 * blocksize, | ||||||
|  | @ -1835,8 +1824,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | ||||||
| 
 | 
 | ||||||
| 	mid = path->nodes[level]; | 	mid = path->nodes[level]; | ||||||
| 
 | 
 | ||||||
| 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK && | 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK); | ||||||
| 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING); |  | ||||||
| 	WARN_ON(btrfs_header_generation(mid) != trans->transid); | 	WARN_ON(btrfs_header_generation(mid) != trans->transid); | ||||||
| 
 | 
 | ||||||
| 	orig_ptr = btrfs_node_blockptr(mid, orig_slot); | 	orig_ptr = btrfs_node_blockptr(mid, orig_slot); | ||||||
|  | @ -1865,7 +1853,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		btrfs_tree_lock(child); | 		btrfs_tree_lock(child); | ||||||
| 		btrfs_set_lock_blocking_write(child); |  | ||||||
| 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child, | 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child, | ||||||
| 				      BTRFS_NESTING_COW); | 				      BTRFS_NESTING_COW); | ||||||
| 		if (ret) { | 		if (ret) { | ||||||
|  | @ -1904,7 +1891,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | ||||||
| 
 | 
 | ||||||
| 	if (left) { | 	if (left) { | ||||||
| 		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT); | 		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT); | ||||||
| 		btrfs_set_lock_blocking_write(left); |  | ||||||
| 		wret = btrfs_cow_block(trans, root, left, | 		wret = btrfs_cow_block(trans, root, left, | ||||||
| 				       parent, pslot - 1, &left, | 				       parent, pslot - 1, &left, | ||||||
| 				       BTRFS_NESTING_LEFT_COW); | 				       BTRFS_NESTING_LEFT_COW); | ||||||
|  | @ -1920,7 +1906,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, | ||||||
| 
 | 
 | ||||||
| 	if (right) { | 	if (right) { | ||||||
| 		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); | 		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); | ||||||
| 		btrfs_set_lock_blocking_write(right); |  | ||||||
| 		wret = btrfs_cow_block(trans, root, right, | 		wret = btrfs_cow_block(trans, root, right, | ||||||
| 				       parent, pslot + 1, &right, | 				       parent, pslot + 1, &right, | ||||||
| 				       BTRFS_NESTING_RIGHT_COW); | 				       BTRFS_NESTING_RIGHT_COW); | ||||||
|  | @ -2084,7 +2069,6 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, | ||||||
| 		u32 left_nr; | 		u32 left_nr; | ||||||
| 
 | 
 | ||||||
| 		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT); | 		__btrfs_tree_lock(left, BTRFS_NESTING_LEFT); | ||||||
| 		btrfs_set_lock_blocking_write(left); |  | ||||||
| 
 | 
 | ||||||
| 		left_nr = btrfs_header_nritems(left); | 		left_nr = btrfs_header_nritems(left); | ||||||
| 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { | 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { | ||||||
|  | @ -2139,7 +2123,6 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans, | ||||||
| 		u32 right_nr; | 		u32 right_nr; | ||||||
| 
 | 
 | ||||||
| 		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); | 		__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); | ||||||
| 		btrfs_set_lock_blocking_write(right); |  | ||||||
| 
 | 
 | ||||||
| 		right_nr = btrfs_header_nritems(right); | 		right_nr = btrfs_header_nritems(right); | ||||||
| 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { | 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) { | ||||||
|  | @ -2399,14 +2382,6 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, | ||||||
| 			return 0; | 			return 0; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		/* the pages were up to date, but we failed
 |  | ||||||
| 		 * the generation number check.  Do a full |  | ||||||
| 		 * read for the generation number that is correct. |  | ||||||
| 		 * We must do this without dropping locks so |  | ||||||
| 		 * we can trust our generation number |  | ||||||
| 		 */ |  | ||||||
| 		btrfs_set_path_blocking(p); |  | ||||||
| 
 |  | ||||||
| 		/* now we're allowed to do a blocking uptodate check */ | 		/* now we're allowed to do a blocking uptodate check */ | ||||||
| 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key); | 		ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key); | ||||||
| 		if (!ret) { | 		if (!ret) { | ||||||
|  | @ -2426,7 +2401,6 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p, | ||||||
| 	 * out which blocks to read. | 	 * out which blocks to read. | ||||||
| 	 */ | 	 */ | ||||||
| 	btrfs_unlock_up_safe(p, level + 1); | 	btrfs_unlock_up_safe(p, level + 1); | ||||||
| 	btrfs_set_path_blocking(p); |  | ||||||
| 
 | 
 | ||||||
| 	if (p->reada != READA_NONE) | 	if (p->reada != READA_NONE) | ||||||
| 		reada_for_search(fs_info, p, level, slot, key->objectid); | 		reada_for_search(fs_info, p, level, slot, key->objectid); | ||||||
|  | @ -2480,7 +2454,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans, | ||||||
| 			goto again; | 			goto again; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		btrfs_set_path_blocking(p); |  | ||||||
| 		reada_for_balance(fs_info, p, level); | 		reada_for_balance(fs_info, p, level); | ||||||
| 		sret = split_node(trans, root, p, level); | 		sret = split_node(trans, root, p, level); | ||||||
| 
 | 
 | ||||||
|  | @ -2500,7 +2473,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans, | ||||||
| 			goto again; | 			goto again; | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		btrfs_set_path_blocking(p); |  | ||||||
| 		reada_for_balance(fs_info, p, level); | 		reada_for_balance(fs_info, p, level); | ||||||
| 		sret = balance_level(trans, root, p, level); | 		sret = balance_level(trans, root, p, level); | ||||||
| 
 | 
 | ||||||
|  | @ -2752,7 +2724,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||||||
| 				goto again; | 				goto again; | ||||||
| 			} | 			} | ||||||
| 
 | 
 | ||||||
| 			btrfs_set_path_blocking(p); |  | ||||||
| 			if (last_level) | 			if (last_level) | ||||||
| 				err = btrfs_cow_block(trans, root, b, NULL, 0, | 				err = btrfs_cow_block(trans, root, b, NULL, 0, | ||||||
| 						      &b, | 						      &b, | ||||||
|  | @ -2822,7 +2793,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||||||
| 					goto again; | 					goto again; | ||||||
| 				} | 				} | ||||||
| 
 | 
 | ||||||
| 				btrfs_set_path_blocking(p); |  | ||||||
| 				err = split_leaf(trans, root, key, | 				err = split_leaf(trans, root, key, | ||||||
| 						 p, ins_len, ret == 0); | 						 p, ins_len, ret == 0); | ||||||
| 
 | 
 | ||||||
|  | @ -2884,17 +2854,11 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||||||
| 		if (!p->skip_locking) { | 		if (!p->skip_locking) { | ||||||
| 			level = btrfs_header_level(b); | 			level = btrfs_header_level(b); | ||||||
| 			if (level <= write_lock_level) { | 			if (level <= write_lock_level) { | ||||||
| 				if (!btrfs_try_tree_write_lock(b)) { | 				btrfs_tree_lock(b); | ||||||
| 					btrfs_set_path_blocking(p); |  | ||||||
| 					btrfs_tree_lock(b); |  | ||||||
| 				} |  | ||||||
| 				p->locks[level] = BTRFS_WRITE_LOCK; | 				p->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 			} else { | 			} else { | ||||||
| 				if (!btrfs_tree_read_lock_atomic(b)) { | 				__btrfs_tree_read_lock(b, BTRFS_NESTING_NORMAL, | ||||||
| 					btrfs_set_path_blocking(p); | 						       p->recurse); | ||||||
| 					__btrfs_tree_read_lock(b, BTRFS_NESTING_NORMAL, |  | ||||||
| 							       p->recurse); |  | ||||||
| 				} |  | ||||||
| 				p->locks[level] = BTRFS_READ_LOCK; | 				p->locks[level] = BTRFS_READ_LOCK; | ||||||
| 			} | 			} | ||||||
| 			p->nodes[level] = b; | 			p->nodes[level] = b; | ||||||
|  | @ -2902,12 +2866,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||||||
| 	} | 	} | ||||||
| 	ret = 1; | 	ret = 1; | ||||||
| done: | done: | ||||||
| 	/*
 |  | ||||||
| 	 * we don't really know what they plan on doing with the path |  | ||||||
| 	 * from here on, so for now just mark it as blocking |  | ||||||
| 	 */ |  | ||||||
| 	if (!p->leave_spinning) |  | ||||||
| 		btrfs_set_path_blocking(p); |  | ||||||
| 	if (ret < 0 && !p->skip_release_on_error) | 	if (ret < 0 && !p->skip_release_on_error) | ||||||
| 		btrfs_release_path(p); | 		btrfs_release_path(p); | ||||||
| 	return ret; | 	return ret; | ||||||
|  | @ -2999,10 +2957,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		level = btrfs_header_level(b); | 		level = btrfs_header_level(b); | ||||||
| 		if (!btrfs_tree_read_lock_atomic(b)) { | 		btrfs_tree_read_lock(b); | ||||||
| 			btrfs_set_path_blocking(p); |  | ||||||
| 			btrfs_tree_read_lock(b); |  | ||||||
| 		} |  | ||||||
| 		b = tree_mod_log_rewind(fs_info, p, b, time_seq); | 		b = tree_mod_log_rewind(fs_info, p, b, time_seq); | ||||||
| 		if (!b) { | 		if (!b) { | ||||||
| 			ret = -ENOMEM; | 			ret = -ENOMEM; | ||||||
|  | @ -3013,8 +2968,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, | ||||||
| 	} | 	} | ||||||
| 	ret = 1; | 	ret = 1; | ||||||
| done: | done: | ||||||
| 	if (!p->leave_spinning) |  | ||||||
| 		btrfs_set_path_blocking(p); |  | ||||||
| 	if (ret < 0) | 	if (ret < 0) | ||||||
| 		btrfs_release_path(p); | 		btrfs_release_path(p); | ||||||
| 
 | 
 | ||||||
|  | @ -3441,7 +3394,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, | ||||||
| 	add_root_to_dirty_list(root); | 	add_root_to_dirty_list(root); | ||||||
| 	atomic_inc(&c->refs); | 	atomic_inc(&c->refs); | ||||||
| 	path->nodes[level] = c; | 	path->nodes[level] = c; | ||||||
| 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | 	path->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 	path->slots[level] = 0; | 	path->slots[level] = 0; | ||||||
| 	return 0; | 	return 0; | ||||||
| } | } | ||||||
|  | @ -3814,7 +3767,6 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root | ||||||
| 		return 1; | 		return 1; | ||||||
| 
 | 
 | ||||||
| 	__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); | 	__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT); | ||||||
| 	btrfs_set_lock_blocking_write(right); |  | ||||||
| 
 | 
 | ||||||
| 	free_space = btrfs_leaf_free_space(right); | 	free_space = btrfs_leaf_free_space(right); | ||||||
| 	if (free_space < data_size) | 	if (free_space < data_size) | ||||||
|  | @ -4053,7 +4005,6 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root | ||||||
| 		return 1; | 		return 1; | ||||||
| 
 | 
 | ||||||
| 	__btrfs_tree_lock(left, BTRFS_NESTING_LEFT); | 	__btrfs_tree_lock(left, BTRFS_NESTING_LEFT); | ||||||
| 	btrfs_set_lock_blocking_write(left); |  | ||||||
| 
 | 
 | ||||||
| 	free_space = btrfs_leaf_free_space(left); | 	free_space = btrfs_leaf_free_space(left); | ||||||
| 	if (free_space < data_size) { | 	if (free_space < data_size) { | ||||||
|  | @ -4448,7 +4399,6 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, | ||||||
| 			goto err; | 			goto err; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	btrfs_set_path_blocking(path); |  | ||||||
| 	ret = split_leaf(trans, root, &key, path, ins_len, 1); | 	ret = split_leaf(trans, root, &key, path, ins_len, 1); | ||||||
| 	if (ret) | 	if (ret) | ||||||
| 		goto err; | 		goto err; | ||||||
|  | @ -4478,8 +4428,6 @@ static noinline int split_item(struct btrfs_path *path, | ||||||
| 	leaf = path->nodes[0]; | 	leaf = path->nodes[0]; | ||||||
| 	BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); | 	BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item)); | ||||||
| 
 | 
 | ||||||
| 	btrfs_set_path_blocking(path); |  | ||||||
| 
 |  | ||||||
| 	item = btrfs_item_nr(path->slots[0]); | 	item = btrfs_item_nr(path->slots[0]); | ||||||
| 	orig_offset = btrfs_item_offset(leaf, item); | 	orig_offset = btrfs_item_offset(leaf, item); | ||||||
| 	item_size = btrfs_item_size(leaf, item); | 	item_size = btrfs_item_size(leaf, item); | ||||||
|  | @ -5055,7 +5003,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||||||
| 		if (leaf == root->node) { | 		if (leaf == root->node) { | ||||||
| 			btrfs_set_header_level(leaf, 0); | 			btrfs_set_header_level(leaf, 0); | ||||||
| 		} else { | 		} else { | ||||||
| 			btrfs_set_path_blocking(path); |  | ||||||
| 			btrfs_clean_tree_block(leaf); | 			btrfs_clean_tree_block(leaf); | ||||||
| 			btrfs_del_leaf(trans, root, path, leaf); | 			btrfs_del_leaf(trans, root, path, leaf); | ||||||
| 		} | 		} | ||||||
|  | @ -5077,7 +5024,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||||||
| 			slot = path->slots[1]; | 			slot = path->slots[1]; | ||||||
| 			atomic_inc(&leaf->refs); | 			atomic_inc(&leaf->refs); | ||||||
| 
 | 
 | ||||||
| 			btrfs_set_path_blocking(path); |  | ||||||
| 			wret = push_leaf_left(trans, root, path, 1, 1, | 			wret = push_leaf_left(trans, root, path, 1, 1, | ||||||
| 					      1, (u32)-1); | 					      1, (u32)-1); | ||||||
| 			if (wret < 0 && wret != -ENOSPC) | 			if (wret < 0 && wret != -ENOSPC) | ||||||
|  | @ -5248,7 +5194,6 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, | ||||||
| 		 */ | 		 */ | ||||||
| 		if (slot >= nritems) { | 		if (slot >= nritems) { | ||||||
| 			path->slots[level] = slot; | 			path->slots[level] = slot; | ||||||
| 			btrfs_set_path_blocking(path); |  | ||||||
| 			sret = btrfs_find_next_key(root, path, min_key, level, | 			sret = btrfs_find_next_key(root, path, min_key, level, | ||||||
| 						  min_trans); | 						  min_trans); | ||||||
| 			if (sret == 0) { | 			if (sret == 0) { | ||||||
|  | @ -5265,7 +5210,6 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, | ||||||
| 			ret = 0; | 			ret = 0; | ||||||
| 			goto out; | 			goto out; | ||||||
| 		} | 		} | ||||||
| 		btrfs_set_path_blocking(path); |  | ||||||
| 		cur = btrfs_read_node_slot(cur, slot); | 		cur = btrfs_read_node_slot(cur, slot); | ||||||
| 		if (IS_ERR(cur)) { | 		if (IS_ERR(cur)) { | ||||||
| 			ret = PTR_ERR(cur); | 			ret = PTR_ERR(cur); | ||||||
|  | @ -5282,7 +5226,6 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key, | ||||||
| 	path->keep_locks = keep_locks; | 	path->keep_locks = keep_locks; | ||||||
| 	if (ret == 0) { | 	if (ret == 0) { | ||||||
| 		btrfs_unlock_up_safe(path, path->lowest_level + 1); | 		btrfs_unlock_up_safe(path, path->lowest_level + 1); | ||||||
| 		btrfs_set_path_blocking(path); |  | ||||||
| 		memcpy(min_key, &found_key, sizeof(found_key)); | 		memcpy(min_key, &found_key, sizeof(found_key)); | ||||||
| 	} | 	} | ||||||
| 	return ret; | 	return ret; | ||||||
|  | @ -5492,7 +5435,6 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||||||
| 				goto again; | 				goto again; | ||||||
| 			} | 			} | ||||||
| 			if (!ret) { | 			if (!ret) { | ||||||
| 				btrfs_set_path_blocking(path); |  | ||||||
| 				__btrfs_tree_read_lock(next, | 				__btrfs_tree_read_lock(next, | ||||||
| 						       BTRFS_NESTING_RIGHT, | 						       BTRFS_NESTING_RIGHT, | ||||||
| 						       path->recurse); | 						       path->recurse); | ||||||
|  | @ -5527,13 +5469,8 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		if (!path->skip_locking) { | 		if (!path->skip_locking) { | ||||||
| 			ret = btrfs_try_tree_read_lock(next); | 			__btrfs_tree_read_lock(next, BTRFS_NESTING_RIGHT, | ||||||
| 			if (!ret) { | 					       path->recurse); | ||||||
| 				btrfs_set_path_blocking(path); |  | ||||||
| 				__btrfs_tree_read_lock(next, |  | ||||||
| 						       BTRFS_NESTING_RIGHT, |  | ||||||
| 						       path->recurse); |  | ||||||
| 			} |  | ||||||
| 			next_rw_lock = BTRFS_READ_LOCK; | 			next_rw_lock = BTRFS_READ_LOCK; | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  | @ -5541,8 +5478,6 @@ int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path, | ||||||
| done: | done: | ||||||
| 	unlock_up(path, 0, 1, 0, NULL); | 	unlock_up(path, 0, 1, 0, NULL); | ||||||
| 	path->leave_spinning = old_spinning; | 	path->leave_spinning = old_spinning; | ||||||
| 	if (!old_spinning) |  | ||||||
| 		btrfs_set_path_blocking(path); |  | ||||||
| 
 | 
 | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
|  | @ -5564,7 +5499,6 @@ int btrfs_previous_item(struct btrfs_root *root, | ||||||
| 
 | 
 | ||||||
| 	while (1) { | 	while (1) { | ||||||
| 		if (path->slots[0] == 0) { | 		if (path->slots[0] == 0) { | ||||||
| 			btrfs_set_path_blocking(path); |  | ||||||
| 			ret = btrfs_prev_leaf(root, path); | 			ret = btrfs_prev_leaf(root, path); | ||||||
| 			if (ret != 0) | 			if (ret != 0) | ||||||
| 				return ret; | 				return ret; | ||||||
|  | @ -5606,7 +5540,6 @@ int btrfs_previous_extent_item(struct btrfs_root *root, | ||||||
| 
 | 
 | ||||||
| 	while (1) { | 	while (1) { | ||||||
| 		if (path->slots[0] == 0) { | 		if (path->slots[0] == 0) { | ||||||
| 			btrfs_set_path_blocking(path); |  | ||||||
| 			ret = btrfs_prev_leaf(root, path); | 			ret = btrfs_prev_leaf(root, path); | ||||||
| 			if (ret != 0) | 			if (ret != 0) | ||||||
| 				return ret; | 				return ret; | ||||||
|  |  | ||||||
|  | @ -740,13 +740,6 @@ static int btrfs_batch_insert_items(struct btrfs_root *root, | ||||||
| 		goto out; | 		goto out; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	/*
 |  | ||||||
| 	 * we need allocate some memory space, but it might cause the task |  | ||||||
| 	 * to sleep, so we set all locked nodes in the path to blocking locks |  | ||||||
| 	 * first. |  | ||||||
| 	 */ |  | ||||||
| 	btrfs_set_path_blocking(path); |  | ||||||
| 
 |  | ||||||
| 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS); | 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS); | ||||||
| 	if (!keys) { | 	if (!keys) { | ||||||
| 		ret = -ENOMEM; | 		ret = -ENOMEM; | ||||||
|  |  | ||||||
|  | @ -250,10 +250,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, | ||||||
| 	if (atomic) | 	if (atomic) | ||||||
| 		return -EAGAIN; | 		return -EAGAIN; | ||||||
| 
 | 
 | ||||||
| 	if (need_lock) { | 	if (need_lock) | ||||||
| 		btrfs_tree_read_lock(eb); | 		btrfs_tree_read_lock(eb); | ||||||
| 		btrfs_set_lock_blocking_read(eb); |  | ||||||
| 	} |  | ||||||
| 
 | 
 | ||||||
| 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, | 	lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, | ||||||
| 			 &cached_state); | 			 &cached_state); | ||||||
|  | @ -282,7 +280,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, | ||||||
| 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, | 	unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, | ||||||
| 			     &cached_state); | 			     &cached_state); | ||||||
| 	if (need_lock) | 	if (need_lock) | ||||||
| 		btrfs_tree_read_unlock_blocking(eb); | 		btrfs_tree_read_unlock(eb); | ||||||
| 	return ret; | 	return ret; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | @ -1013,8 +1011,6 @@ void btrfs_clean_tree_block(struct extent_buffer *buf) | ||||||
| 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, | 			percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, | ||||||
| 						 -buf->len, | 						 -buf->len, | ||||||
| 						 fs_info->dirty_metadata_batch); | 						 fs_info->dirty_metadata_batch); | ||||||
| 			/* ugh, clear_extent_buffer_dirty needs to lock the page */ |  | ||||||
| 			btrfs_set_lock_blocking_write(buf); |  | ||||||
| 			clear_extent_buffer_dirty(buf); | 			clear_extent_buffer_dirty(buf); | ||||||
| 		} | 		} | ||||||
| 	} | 	} | ||||||
|  |  | ||||||
|  | @ -4665,7 +4665,6 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||||||
| 	btrfs_clean_tree_block(buf); | 	btrfs_clean_tree_block(buf); | ||||||
| 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); | 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags); | ||||||
| 
 | 
 | ||||||
| 	btrfs_set_lock_blocking_write(buf); |  | ||||||
| 	set_extent_buffer_uptodate(buf); | 	set_extent_buffer_uptodate(buf); | ||||||
| 
 | 
 | ||||||
| 	memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); | 	memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header)); | ||||||
|  | @ -5054,7 +5053,6 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, | ||||||
| 		reada = 1; | 		reada = 1; | ||||||
| 	} | 	} | ||||||
| 	btrfs_tree_lock(next); | 	btrfs_tree_lock(next); | ||||||
| 	btrfs_set_lock_blocking_write(next); |  | ||||||
| 
 | 
 | ||||||
| 	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, | 	ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1, | ||||||
| 				       &wc->refs[level - 1], | 				       &wc->refs[level - 1], | ||||||
|  | @ -5114,7 +5112,6 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, | ||||||
| 			return -EIO; | 			return -EIO; | ||||||
| 		} | 		} | ||||||
| 		btrfs_tree_lock(next); | 		btrfs_tree_lock(next); | ||||||
| 		btrfs_set_lock_blocking_write(next); |  | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	level--; | 	level--; | ||||||
|  | @ -5126,7 +5123,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, | ||||||
| 	} | 	} | ||||||
| 	path->nodes[level] = next; | 	path->nodes[level] = next; | ||||||
| 	path->slots[level] = 0; | 	path->slots[level] = 0; | ||||||
| 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | 	path->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 	wc->level = level; | 	wc->level = level; | ||||||
| 	if (wc->level == 1) | 	if (wc->level == 1) | ||||||
| 		wc->reada_slot = 0; | 		wc->reada_slot = 0; | ||||||
|  | @ -5254,8 +5251,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, | ||||||
| 		if (!path->locks[level]) { | 		if (!path->locks[level]) { | ||||||
| 			BUG_ON(level == 0); | 			BUG_ON(level == 0); | ||||||
| 			btrfs_tree_lock(eb); | 			btrfs_tree_lock(eb); | ||||||
| 			btrfs_set_lock_blocking_write(eb); | 			path->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; |  | ||||||
| 
 | 
 | ||||||
| 			ret = btrfs_lookup_extent_info(trans, fs_info, | 			ret = btrfs_lookup_extent_info(trans, fs_info, | ||||||
| 						       eb->start, level, 1, | 						       eb->start, level, 1, | ||||||
|  | @ -5298,8 +5294,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, | ||||||
| 		if (!path->locks[level] && | 		if (!path->locks[level] && | ||||||
| 		    btrfs_header_generation(eb) == trans->transid) { | 		    btrfs_header_generation(eb) == trans->transid) { | ||||||
| 			btrfs_tree_lock(eb); | 			btrfs_tree_lock(eb); | ||||||
| 			btrfs_set_lock_blocking_write(eb); | 			path->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; |  | ||||||
| 		} | 		} | ||||||
| 		btrfs_clean_tree_block(eb); | 		btrfs_clean_tree_block(eb); | ||||||
| 	} | 	} | ||||||
|  | @ -5467,9 +5462,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) | ||||||
| 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { | 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) { | ||||||
| 		level = btrfs_header_level(root->node); | 		level = btrfs_header_level(root->node); | ||||||
| 		path->nodes[level] = btrfs_lock_root_node(root); | 		path->nodes[level] = btrfs_lock_root_node(root); | ||||||
| 		btrfs_set_lock_blocking_write(path->nodes[level]); |  | ||||||
| 		path->slots[level] = 0; | 		path->slots[level] = 0; | ||||||
| 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | 		path->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 		memset(&wc->update_progress, 0, | 		memset(&wc->update_progress, 0, | ||||||
| 		       sizeof(wc->update_progress)); | 		       sizeof(wc->update_progress)); | ||||||
| 	} else { | 	} else { | ||||||
|  | @ -5497,8 +5491,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) | ||||||
| 		level = btrfs_header_level(root->node); | 		level = btrfs_header_level(root->node); | ||||||
| 		while (1) { | 		while (1) { | ||||||
| 			btrfs_tree_lock(path->nodes[level]); | 			btrfs_tree_lock(path->nodes[level]); | ||||||
| 			btrfs_set_lock_blocking_write(path->nodes[level]); | 			path->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; |  | ||||||
| 
 | 
 | ||||||
| 			ret = btrfs_lookup_extent_info(trans, fs_info, | 			ret = btrfs_lookup_extent_info(trans, fs_info, | ||||||
| 						path->nodes[level]->start, | 						path->nodes[level]->start, | ||||||
|  | @ -5685,7 +5678,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | ||||||
| 	level = btrfs_header_level(node); | 	level = btrfs_header_level(node); | ||||||
| 	path->nodes[level] = node; | 	path->nodes[level] = node; | ||||||
| 	path->slots[level] = 0; | 	path->slots[level] = 0; | ||||||
| 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; | 	path->locks[level] = BTRFS_WRITE_LOCK; | ||||||
| 
 | 
 | ||||||
| 	wc->refs[parent_level] = 1; | 	wc->refs[parent_level] = 1; | ||||||
| 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; | 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; | ||||||
|  |  | ||||||
|  | @ -991,8 +991,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, | ||||||
| 	 * write lock. | 	 * write lock. | ||||||
| 	 */ | 	 */ | ||||||
| 	if (!ret && replace_extent && leafs_visited == 1 && | 	if (!ret && replace_extent && leafs_visited == 1 && | ||||||
| 	    (path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING || | 	    path->locks[0] == BTRFS_WRITE_LOCK && | ||||||
| 	     path->locks[0] == BTRFS_WRITE_LOCK) && |  | ||||||
| 	    btrfs_leaf_free_space(leaf) >= | 	    btrfs_leaf_free_space(leaf) >= | ||||||
| 	    sizeof(struct btrfs_item) + extent_item_size) { | 	    sizeof(struct btrfs_item) + extent_item_size) { | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -6789,7 +6789,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, | ||||||
| 		em->orig_start = em->start; | 		em->orig_start = em->start; | ||||||
| 		ptr = btrfs_file_extent_inline_start(item) + extent_offset; | 		ptr = btrfs_file_extent_inline_start(item) + extent_offset; | ||||||
| 
 | 
 | ||||||
| 		btrfs_set_path_blocking(path); |  | ||||||
| 		if (!PageUptodate(page)) { | 		if (!PageUptodate(page)) { | ||||||
| 			if (btrfs_file_extent_compression(leaf, item) != | 			if (btrfs_file_extent_compression(leaf, item) != | ||||||
| 			    BTRFS_COMPRESS_NONE) { | 			    BTRFS_COMPRESS_NONE) { | ||||||
|  |  | ||||||
|  | @ -50,31 +50,6 @@ | ||||||
|  * |  * | ||||||
|  */ |  */ | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Mark already held read lock as blocking. Can be nested in write lock by the |  | ||||||
|  * same thread. |  | ||||||
|  * |  | ||||||
|  * Use when there are potentially long operations ahead so other thread waiting |  | ||||||
|  * on the lock will not actively spin but sleep instead. |  | ||||||
|  * |  | ||||||
|  * The rwlock is released and blocking reader counter is increased. |  | ||||||
|  */ |  | ||||||
| void btrfs_set_lock_blocking_read(struct extent_buffer *eb) |  | ||||||
| { |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /*
 |  | ||||||
|  * Mark already held write lock as blocking. |  | ||||||
|  * |  | ||||||
|  * Use when there are potentially long operations ahead so other threads |  | ||||||
|  * waiting on the lock will not actively spin but sleep instead. |  | ||||||
|  * |  | ||||||
|  * The rwlock is released and blocking writers is set. |  | ||||||
|  */ |  | ||||||
| void btrfs_set_lock_blocking_write(struct extent_buffer *eb) |  | ||||||
| { |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * __btrfs_tree_read_lock - lock extent buffer for read |  * __btrfs_tree_read_lock - lock extent buffer for read | ||||||
|  * @eb:		the eb to be locked |  * @eb:		the eb to be locked | ||||||
|  | @ -130,17 +105,6 @@ void btrfs_tree_read_lock(struct extent_buffer *eb) | ||||||
| 	__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false); | 	__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Lock extent buffer for read, optimistically expecting that there are no |  | ||||||
|  * contending blocking writers. If there are, don't wait. |  | ||||||
|  * |  | ||||||
|  * Return 1 if the rwlock has been taken, 0 otherwise |  | ||||||
|  */ |  | ||||||
| int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) |  | ||||||
| { |  | ||||||
| 	return btrfs_try_tree_read_lock(eb); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * Try-lock for read. |  * Try-lock for read. | ||||||
|  * |  * | ||||||
|  | @ -192,18 +156,6 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) | ||||||
| 	up_read(&eb->lock); | 	up_read(&eb->lock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Release read lock, previously set to blocking by a pairing call to |  | ||||||
|  * btrfs_set_lock_blocking_read(). Can be nested in write lock by the same |  | ||||||
|  * thread. |  | ||||||
|  * |  | ||||||
|  * State of rwlock is unchanged, last reader wakes waiting threads. |  | ||||||
|  */ |  | ||||||
| void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) |  | ||||||
| { |  | ||||||
| 	btrfs_tree_read_unlock(eb); |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * __btrfs_tree_lock - lock eb for write |  * __btrfs_tree_lock - lock eb for write | ||||||
|  * @eb:		the eb to lock |  * @eb:		the eb to lock | ||||||
|  | @ -239,32 +191,6 @@ void btrfs_tree_unlock(struct extent_buffer *eb) | ||||||
| 	up_write(&eb->lock); | 	up_write(&eb->lock); | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /*
 |  | ||||||
|  * Set all locked nodes in the path to blocking locks.  This should be done |  | ||||||
|  * before scheduling |  | ||||||
|  */ |  | ||||||
| void btrfs_set_path_blocking(struct btrfs_path *p) |  | ||||||
| { |  | ||||||
| 	int i; |  | ||||||
| 
 |  | ||||||
| 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) { |  | ||||||
| 		if (!p->nodes[i] || !p->locks[i]) |  | ||||||
| 			continue; |  | ||||||
| 		/*
 |  | ||||||
| 		 * If we currently have a spinning reader or writer lock this |  | ||||||
| 		 * will bump the count of blocking holders and drop the |  | ||||||
| 		 * spinlock. |  | ||||||
| 		 */ |  | ||||||
| 		if (p->locks[i] == BTRFS_READ_LOCK) { |  | ||||||
| 			btrfs_set_lock_blocking_read(p->nodes[i]); |  | ||||||
| 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING; |  | ||||||
| 		} else if (p->locks[i] == BTRFS_WRITE_LOCK) { |  | ||||||
| 			btrfs_set_lock_blocking_write(p->nodes[i]); |  | ||||||
| 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; |  | ||||||
| 		} |  | ||||||
| 	} |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| /*
 | /*
 | ||||||
|  * This releases any locks held in the path starting at level and going all the |  * This releases any locks held in the path starting at level and going all the | ||||||
|  * way up to the root. |  * way up to the root. | ||||||
|  |  | ||||||
|  | @ -13,8 +13,6 @@ | ||||||
| 
 | 
 | ||||||
| #define BTRFS_WRITE_LOCK 1 | #define BTRFS_WRITE_LOCK 1 | ||||||
| #define BTRFS_READ_LOCK 2 | #define BTRFS_READ_LOCK 2 | ||||||
| #define BTRFS_WRITE_LOCK_BLOCKING 3 |  | ||||||
| #define BTRFS_READ_LOCK_BLOCKING 4 |  | ||||||
| 
 | 
 | ||||||
| /*
 | /*
 | ||||||
|  * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at |  * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at | ||||||
|  | @ -93,12 +91,8 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne | ||||||
| 			    bool recurse); | 			    bool recurse); | ||||||
| void btrfs_tree_read_lock(struct extent_buffer *eb); | void btrfs_tree_read_lock(struct extent_buffer *eb); | ||||||
| void btrfs_tree_read_unlock(struct extent_buffer *eb); | void btrfs_tree_read_unlock(struct extent_buffer *eb); | ||||||
| void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); |  | ||||||
| void btrfs_set_lock_blocking_read(struct extent_buffer *eb); |  | ||||||
| void btrfs_set_lock_blocking_write(struct extent_buffer *eb); |  | ||||||
| int btrfs_try_tree_read_lock(struct extent_buffer *eb); | int btrfs_try_tree_read_lock(struct extent_buffer *eb); | ||||||
| int btrfs_try_tree_write_lock(struct extent_buffer *eb); | int btrfs_try_tree_write_lock(struct extent_buffer *eb); | ||||||
| int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); |  | ||||||
| struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); | struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); | ||||||
| struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root, | struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root, | ||||||
| 						  bool recurse); | 						  bool recurse); | ||||||
|  | @ -116,15 +110,12 @@ static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { | ||||||
| static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } | static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } | ||||||
| #endif | #endif | ||||||
| 
 | 
 | ||||||
| void btrfs_set_path_blocking(struct btrfs_path *p); |  | ||||||
| void btrfs_unlock_up_safe(struct btrfs_path *path, int level); | void btrfs_unlock_up_safe(struct btrfs_path *path, int level); | ||||||
| 
 | 
 | ||||||
| static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) | ||||||
| { | { | ||||||
| 	if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING) | 	if (rw == BTRFS_WRITE_LOCK) | ||||||
| 		btrfs_tree_unlock(eb); | 		btrfs_tree_unlock(eb); | ||||||
| 	else if (rw == BTRFS_READ_LOCK_BLOCKING) |  | ||||||
| 		btrfs_tree_read_unlock_blocking(eb); |  | ||||||
| 	else if (rw == BTRFS_READ_LOCK) | 	else if (rw == BTRFS_READ_LOCK) | ||||||
| 		btrfs_tree_read_unlock(eb); | 		btrfs_tree_read_unlock(eb); | ||||||
| 	else | 	else | ||||||
|  |  | ||||||
|  | @ -1970,8 +1970,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans, | ||||||
| 			src_path->nodes[cur_level] = eb; | 			src_path->nodes[cur_level] = eb; | ||||||
| 
 | 
 | ||||||
| 			btrfs_tree_read_lock(eb); | 			btrfs_tree_read_lock(eb); | ||||||
| 			btrfs_set_lock_blocking_read(eb); | 			src_path->locks[cur_level] = BTRFS_READ_LOCK; | ||||||
| 			src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING; |  | ||||||
| 		} | 		} | ||||||
| 
 | 
 | ||||||
| 		src_path->slots[cur_level] = dst_path->slots[cur_level]; | 		src_path->slots[cur_level] = dst_path->slots[cur_level]; | ||||||
|  | @ -2111,8 +2110,7 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans, | ||||||
| 		dst_path->slots[cur_level] = 0; | 		dst_path->slots[cur_level] = 0; | ||||||
| 
 | 
 | ||||||
| 		btrfs_tree_read_lock(eb); | 		btrfs_tree_read_lock(eb); | ||||||
| 		btrfs_set_lock_blocking_read(eb); | 		dst_path->locks[cur_level] = BTRFS_READ_LOCK; | ||||||
| 		dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING; |  | ||||||
| 		need_cleanup = true; | 		need_cleanup = true; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
|  | @ -2286,8 +2284,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans, | ||||||
| 			path->slots[level] = 0; | 			path->slots[level] = 0; | ||||||
| 
 | 
 | ||||||
| 			btrfs_tree_read_lock(eb); | 			btrfs_tree_read_lock(eb); | ||||||
| 			btrfs_set_lock_blocking_read(eb); | 			path->locks[level] = BTRFS_READ_LOCK; | ||||||
| 			path->locks[level] = BTRFS_READ_LOCK_BLOCKING; |  | ||||||
| 
 | 
 | ||||||
| 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr, | 			ret = btrfs_qgroup_trace_extent(trans, child_bytenr, | ||||||
| 							fs_info->nodesize, | 							fs_info->nodesize, | ||||||
|  |  | ||||||
|  | @ -575,10 +575,9 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path, | ||||||
| 				return -EIO; | 				return -EIO; | ||||||
| 			} | 			} | ||||||
| 			btrfs_tree_read_lock(eb); | 			btrfs_tree_read_lock(eb); | ||||||
| 			btrfs_set_lock_blocking_read(eb); |  | ||||||
| 			path->nodes[level-1] = eb; | 			path->nodes[level-1] = eb; | ||||||
| 			path->slots[level-1] = 0; | 			path->slots[level-1] = 0; | ||||||
| 			path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING; | 			path->locks[level-1] = BTRFS_READ_LOCK; | ||||||
| 		} else { | 		} else { | ||||||
| 			ret = process_leaf(root, path, bytenr, num_bytes); | 			ret = process_leaf(root, path, bytenr, num_bytes); | ||||||
| 			if (ret) | 			if (ret) | ||||||
|  | @ -1000,11 +999,10 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info) | ||||||
| 		return -ENOMEM; | 		return -ENOMEM; | ||||||
| 
 | 
 | ||||||
| 	eb = btrfs_read_lock_root_node(fs_info->extent_root); | 	eb = btrfs_read_lock_root_node(fs_info->extent_root); | ||||||
| 	btrfs_set_lock_blocking_read(eb); |  | ||||||
| 	level = btrfs_header_level(eb); | 	level = btrfs_header_level(eb); | ||||||
| 	path->nodes[level] = eb; | 	path->nodes[level] = eb; | ||||||
| 	path->slots[level] = 0; | 	path->slots[level] = 0; | ||||||
| 	path->locks[level] = BTRFS_READ_LOCK_BLOCKING; | 	path->locks[level] = BTRFS_READ_LOCK; | ||||||
| 
 | 
 | ||||||
| 	while (1) { | 	while (1) { | ||||||
| 		/*
 | 		/*
 | ||||||
|  |  | ||||||
|  | @ -1196,7 +1196,6 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, | ||||||
| 	btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); | 	btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot); | ||||||
| 
 | 
 | ||||||
| 	eb = btrfs_lock_root_node(dest); | 	eb = btrfs_lock_root_node(dest); | ||||||
| 	btrfs_set_lock_blocking_write(eb); |  | ||||||
| 	level = btrfs_header_level(eb); | 	level = btrfs_header_level(eb); | ||||||
| 
 | 
 | ||||||
| 	if (level < lowest_level) { | 	if (level < lowest_level) { | ||||||
|  | @ -1210,7 +1209,6 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, | ||||||
| 				      BTRFS_NESTING_COW); | 				      BTRFS_NESTING_COW); | ||||||
| 		BUG_ON(ret); | 		BUG_ON(ret); | ||||||
| 	} | 	} | ||||||
| 	btrfs_set_lock_blocking_write(eb); |  | ||||||
| 
 | 
 | ||||||
| 	if (next_key) { | 	if (next_key) { | ||||||
| 		next_key->objectid = (u64)-1; | 		next_key->objectid = (u64)-1; | ||||||
|  | @ -1279,7 +1277,6 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc, | ||||||
| 						      BTRFS_NESTING_COW); | 						      BTRFS_NESTING_COW); | ||||||
| 				BUG_ON(ret); | 				BUG_ON(ret); | ||||||
| 			} | 			} | ||||||
| 			btrfs_set_lock_blocking_write(eb); |  | ||||||
| 
 | 
 | ||||||
| 			btrfs_tree_unlock(parent); | 			btrfs_tree_unlock(parent); | ||||||
| 			free_extent_buffer(parent); | 			free_extent_buffer(parent); | ||||||
|  | @ -2309,7 +2306,6 @@ static int do_relocation(struct btrfs_trans_handle *trans, | ||||||
| 			goto next; | 			goto next; | ||||||
| 		} | 		} | ||||||
| 		btrfs_tree_lock(eb); | 		btrfs_tree_lock(eb); | ||||||
| 		btrfs_set_lock_blocking_write(eb); |  | ||||||
| 
 | 
 | ||||||
| 		if (!node->eb) { | 		if (!node->eb) { | ||||||
| 			ret = btrfs_cow_block(trans, root, eb, upper->eb, | 			ret = btrfs_cow_block(trans, root, eb, upper->eb, | ||||||
|  |  | ||||||
|  | @ -1598,8 +1598,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | ||||||
| 		goto fail; | 		goto fail; | ||||||
| 	} | 	} | ||||||
| 
 | 
 | ||||||
| 	btrfs_set_lock_blocking_write(old); |  | ||||||
| 
 |  | ||||||
| 	ret = btrfs_copy_root(trans, root, old, &tmp, objectid); | 	ret = btrfs_copy_root(trans, root, old, &tmp, objectid); | ||||||
| 	/* clean up in any case */ | 	/* clean up in any case */ | ||||||
| 	btrfs_tree_unlock(old); | 	btrfs_tree_unlock(old); | ||||||
|  |  | ||||||
|  | @ -52,7 +52,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, | ||||||
| 		u32 nritems; | 		u32 nritems; | ||||||
| 
 | 
 | ||||||
| 		root_node = btrfs_lock_root_node(root); | 		root_node = btrfs_lock_root_node(root); | ||||||
| 		btrfs_set_lock_blocking_write(root_node); |  | ||||||
| 		nritems = btrfs_header_nritems(root_node); | 		nritems = btrfs_header_nritems(root_node); | ||||||
| 		root->defrag_max.objectid = 0; | 		root->defrag_max.objectid = 0; | ||||||
| 		/* from above we know this is not a leaf */ | 		/* from above we know this is not a leaf */ | ||||||
|  |  | ||||||
|  | @ -2722,7 +2722,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, | ||||||
| 
 | 
 | ||||||
| 				if (trans) { | 				if (trans) { | ||||||
| 					btrfs_tree_lock(next); | 					btrfs_tree_lock(next); | ||||||
| 					btrfs_set_lock_blocking_write(next); |  | ||||||
| 					btrfs_clean_tree_block(next); | 					btrfs_clean_tree_block(next); | ||||||
| 					btrfs_wait_tree_block_writeback(next); | 					btrfs_wait_tree_block_writeback(next); | ||||||
| 					btrfs_tree_unlock(next); | 					btrfs_tree_unlock(next); | ||||||
|  | @ -2791,7 +2790,6 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, | ||||||
| 
 | 
 | ||||||
| 				if (trans) { | 				if (trans) { | ||||||
| 					btrfs_tree_lock(next); | 					btrfs_tree_lock(next); | ||||||
| 					btrfs_set_lock_blocking_write(next); |  | ||||||
| 					btrfs_clean_tree_block(next); | 					btrfs_clean_tree_block(next); | ||||||
| 					btrfs_wait_tree_block_writeback(next); | 					btrfs_wait_tree_block_writeback(next); | ||||||
| 					btrfs_tree_unlock(next); | 					btrfs_tree_unlock(next); | ||||||
|  | @ -2873,7 +2871,6 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, | ||||||
| 
 | 
 | ||||||
| 			if (trans) { | 			if (trans) { | ||||||
| 				btrfs_tree_lock(next); | 				btrfs_tree_lock(next); | ||||||
| 				btrfs_set_lock_blocking_write(next); |  | ||||||
| 				btrfs_clean_tree_block(next); | 				btrfs_clean_tree_block(next); | ||||||
| 				btrfs_wait_tree_block_writeback(next); | 				btrfs_wait_tree_block_writeback(next); | ||||||
| 				btrfs_tree_unlock(next); | 				btrfs_tree_unlock(next); | ||||||
|  |  | ||||||
		Loading…
	
		Reference in a new issue
	
	 Josef Bacik
						Josef Bacik