mirror of
				https://github.com/torvalds/linux.git
				synced 2025-11-04 02:30:34 +02:00 
			
		
		
		
	Merge branch 'for-2.6.39/drivers' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.39/drivers' of git://git.kernel.dk/linux-2.6-block: (122 commits) cciss: fix lost command issue drbd: need include for bitops functions declarations Revert "cciss: Add missing allocation in scsi_cmd_stack_setup and corresponding deallocation" cciss: fix missed command status value CMD_UNABORTABLE cciss: remove unnecessary casts cciss: Mask off error bits of c->busaddr in cmd_special_free when calling pci_free_consistent cciss: Inform controller we are using 32-bit tags. cciss: hoist tag masking out of loop cciss: Add missing allocation in scsi_cmd_stack_setup and corresponding deallocation cciss: export resettable host attribute drbd: drop code present under #ifdef which is relevant to 2.6.28 and below drbd: Fixed handling of read errors on a 'VerifyS' node drbd: Fixed handling of read errors on a 'VerifyT' node drbd: Implemented real timeout checking for request processing time drbd: Remove unused function atodb_endio() drbd: improve log message if received sector offset exceeds local capacity drbd: kill dead code drbd: don't BUG_ON, if bio_add_page of a single page to an empty bio fails drbd: Removed left over, now wrong comments drbd: serialize admin requests for new verify run with pending bitmap io ...
This commit is contained in:
		
						commit
						8d49a77568
					
				
					 21 changed files with 2273 additions and 1409 deletions
				
			
		| 
						 | 
				
			
			@ -59,3 +59,15 @@ Kernel Version: 2.6.31
 | 
			
		|||
Contact:	iss_storagedev@hp.com
 | 
			
		||||
Description:	Displays the usage count (number of opens) of logical drive Y
 | 
			
		||||
		of controller X.
 | 
			
		||||
 | 
			
		||||
Where:		/sys/bus/pci/devices/<dev>/ccissX/resettable
 | 
			
		||||
Date:		February 2011
 | 
			
		||||
Kernel Version:	2.6.38
 | 
			
		||||
Contact:	iss_storagedev@hp.com
 | 
			
		||||
Description:	Value of 1 indicates the controller can honor the reset_devices
 | 
			
		||||
		kernel parameter.  Value of 0 indicates reset_devices cannot be
 | 
			
		||||
		honored.  This is to allow, for example, kexec tools to be able
 | 
			
		||||
		to warn the user if they designate an unresettable device as
 | 
			
		||||
		a dump device, as kdump requires resetting the device in order
 | 
			
		||||
		to work reliably.
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -193,7 +193,7 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev,
 | 
			
		|||
	u64 *cfg_offset);
 | 
			
		||||
static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
 | 
			
		||||
	unsigned long *memory_bar);
 | 
			
		||||
 | 
			
		||||
static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
 | 
			
		||||
 | 
			
		||||
/* performant mode helper functions */
 | 
			
		||||
static void  calc_bucket_map(int *bucket, int num_buckets, int nsgs,
 | 
			
		||||
| 
						 | 
				
			
			@ -231,7 +231,7 @@ static const struct block_device_operations cciss_fops = {
 | 
			
		|||
 */
 | 
			
		||||
static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
 | 
			
		||||
{
 | 
			
		||||
	if (likely(h->transMethod == CFGTBL_Trans_Performant))
 | 
			
		||||
	if (likely(h->transMethod & CFGTBL_Trans_Performant))
 | 
			
		||||
		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -556,6 +556,44 @@ static void __devinit cciss_procinit(ctlr_info_t *h)
 | 
			
		|||
#define to_hba(n) container_of(n, struct ctlr_info, dev)
 | 
			
		||||
#define to_drv(n) container_of(n, drive_info_struct, dev)
 | 
			
		||||
 | 
			
		||||
/* List of controllers which cannot be reset on kexec with reset_devices */
 | 
			
		||||
static u32 unresettable_controller[] = {
 | 
			
		||||
	0x324a103C, /* Smart Array P712m */
 | 
			
		||||
	0x324b103C, /* SmartArray P711m */
 | 
			
		||||
	0x3223103C, /* Smart Array P800 */
 | 
			
		||||
	0x3234103C, /* Smart Array P400 */
 | 
			
		||||
	0x3235103C, /* Smart Array P400i */
 | 
			
		||||
	0x3211103C, /* Smart Array E200i */
 | 
			
		||||
	0x3212103C, /* Smart Array E200 */
 | 
			
		||||
	0x3213103C, /* Smart Array E200i */
 | 
			
		||||
	0x3214103C, /* Smart Array E200i */
 | 
			
		||||
	0x3215103C, /* Smart Array E200i */
 | 
			
		||||
	0x3237103C, /* Smart Array E500 */
 | 
			
		||||
	0x323D103C, /* Smart Array P700m */
 | 
			
		||||
	0x409C0E11, /* Smart Array 6400 */
 | 
			
		||||
	0x409D0E11, /* Smart Array 6400 EM */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int ctlr_is_resettable(struct ctlr_info *h)
 | 
			
		||||
{
 | 
			
		||||
	int i;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
 | 
			
		||||
		if (unresettable_controller[i] == h->board_id)
 | 
			
		||||
			return 0;
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static ssize_t host_show_resettable(struct device *dev,
 | 
			
		||||
				    struct device_attribute *attr,
 | 
			
		||||
				    char *buf)
 | 
			
		||||
{
 | 
			
		||||
	struct ctlr_info *h = to_hba(dev);
 | 
			
		||||
 | 
			
		||||
	return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h));
 | 
			
		||||
}
 | 
			
		||||
static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
 | 
			
		||||
 | 
			
		||||
static ssize_t host_store_rescan(struct device *dev,
 | 
			
		||||
				 struct device_attribute *attr,
 | 
			
		||||
				 const char *buf, size_t count)
 | 
			
		||||
| 
						 | 
				
			
			@ -741,6 +779,7 @@ static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
 | 
			
		|||
 | 
			
		||||
static struct attribute *cciss_host_attrs[] = {
 | 
			
		||||
	&dev_attr_rescan.attr,
 | 
			
		||||
	&dev_attr_resettable.attr,
 | 
			
		||||
	NULL
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -973,8 +1012,8 @@ static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
 | 
			
		|||
	temp64.val32.upper = c->ErrDesc.Addr.upper;
 | 
			
		||||
	pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
 | 
			
		||||
			    c->err_info, (dma_addr_t) temp64.val);
 | 
			
		||||
	pci_free_consistent(h->pdev, sizeof(CommandList_struct),
 | 
			
		||||
			    c, (dma_addr_t) c->busaddr);
 | 
			
		||||
	pci_free_consistent(h->pdev, sizeof(CommandList_struct), c,
 | 
			
		||||
		(dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline ctlr_info_t *get_host(struct gendisk *disk)
 | 
			
		||||
| 
						 | 
				
			
			@ -1490,8 +1529,7 @@ static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
 | 
			
		|||
		return -EINVAL;
 | 
			
		||||
	if (!capable(CAP_SYS_RAWIO))
 | 
			
		||||
		return -EPERM;
 | 
			
		||||
	ioc = (BIG_IOCTL_Command_struct *)
 | 
			
		||||
	    kmalloc(sizeof(*ioc), GFP_KERNEL);
 | 
			
		||||
	ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
 | 
			
		||||
	if (!ioc) {
 | 
			
		||||
		status = -ENOMEM;
 | 
			
		||||
		goto cleanup1;
 | 
			
		||||
| 
						 | 
				
			
			@ -2653,6 +2691,10 @@ static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
 | 
			
		|||
			c->Request.CDB[0]);
 | 
			
		||||
		return_status = IO_NEEDS_RETRY;
 | 
			
		||||
		break;
 | 
			
		||||
	case CMD_UNABORTABLE:
 | 
			
		||||
		dev_warn(&h->pdev->dev, "cmd unabortable\n");
 | 
			
		||||
		return_status = IO_ERROR;
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
 | 
			
		||||
		       "unknown status %x\n", c->Request.CDB[0],
 | 
			
		||||
| 
						 | 
				
			
			@ -3103,6 +3145,13 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
 | 
			
		|||
			(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
 | 
			
		||||
				DID_PASSTHROUGH : DID_ERROR);
 | 
			
		||||
		break;
 | 
			
		||||
	case CMD_UNABORTABLE:
 | 
			
		||||
		dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
 | 
			
		||||
		rq->errors = make_status_bytes(SAM_STAT_GOOD,
 | 
			
		||||
			cmd->err_info->CommandStatus, DRIVER_OK,
 | 
			
		||||
			cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ?
 | 
			
		||||
				DID_PASSTHROUGH : DID_ERROR);
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		dev_warn(&h->pdev->dev, "cmd %p returned "
 | 
			
		||||
		       "unknown status %x\n", cmd,
 | 
			
		||||
| 
						 | 
				
			
			@ -3136,10 +3185,13 @@ static inline u32 cciss_tag_to_index(u32 tag)
 | 
			
		|||
	return tag >> DIRECT_LOOKUP_SHIFT;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline u32 cciss_tag_discard_error_bits(u32 tag)
 | 
			
		||||
static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
 | 
			
		||||
{
 | 
			
		||||
#define CCISS_ERROR_BITS 0x03
 | 
			
		||||
	return tag & ~CCISS_ERROR_BITS;
 | 
			
		||||
#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
 | 
			
		||||
#define CCISS_SIMPLE_ERROR_BITS 0x03
 | 
			
		||||
	if (likely(h->transMethod & CFGTBL_Trans_Performant))
 | 
			
		||||
		return tag & ~CCISS_PERF_ERROR_BITS;
 | 
			
		||||
	return tag & ~CCISS_SIMPLE_ERROR_BITS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void cciss_mark_tag_indexed(u32 *tag)
 | 
			
		||||
| 
						 | 
				
			
			@ -3359,7 +3411,7 @@ static inline u32 next_command(ctlr_info_t *h)
 | 
			
		|||
{
 | 
			
		||||
	u32 a;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
 | 
			
		||||
	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
 | 
			
		||||
		return h->access.command_completed(h);
 | 
			
		||||
 | 
			
		||||
	if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
 | 
			
		||||
| 
						 | 
				
			
			@ -3394,14 +3446,12 @@ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
 | 
			
		|||
/* process completion of a non-indexed command */
 | 
			
		||||
static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
 | 
			
		||||
{
 | 
			
		||||
	u32 tag;
 | 
			
		||||
	CommandList_struct *c = NULL;
 | 
			
		||||
	__u32 busaddr_masked, tag_masked;
 | 
			
		||||
 | 
			
		||||
	tag = cciss_tag_discard_error_bits(raw_tag);
 | 
			
		||||
	tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
 | 
			
		||||
	list_for_each_entry(c, &h->cmpQ, list) {
 | 
			
		||||
		busaddr_masked = cciss_tag_discard_error_bits(c->busaddr);
 | 
			
		||||
		tag_masked = cciss_tag_discard_error_bits(tag);
 | 
			
		||||
		busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
 | 
			
		||||
		if (busaddr_masked == tag_masked) {
 | 
			
		||||
			finish_cmd(h, c, raw_tag);
 | 
			
		||||
			return next_command(h);
 | 
			
		||||
| 
						 | 
				
			
			@ -3753,7 +3803,8 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
 | 
			
		||||
static __devinit void cciss_enter_performant_mode(ctlr_info_t *h,
 | 
			
		||||
	u32 use_short_tags)
 | 
			
		||||
{
 | 
			
		||||
	/* This is a bit complicated.  There are 8 registers on
 | 
			
		||||
	 * the controller which we write to to tell it 8 different
 | 
			
		||||
| 
						 | 
				
			
			@ -3808,7 +3859,7 @@ static __devinit void cciss_enter_performant_mode(ctlr_info_t *h)
 | 
			
		|||
	writel(0, &h->transtable->RepQCtrAddrHigh32);
 | 
			
		||||
	writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
 | 
			
		||||
	writel(0, &h->transtable->RepQAddr0High32);
 | 
			
		||||
	writel(CFGTBL_Trans_Performant,
 | 
			
		||||
	writel(CFGTBL_Trans_Performant | use_short_tags,
 | 
			
		||||
			&(h->cfgtable->HostWrite.TransportRequest));
 | 
			
		||||
 | 
			
		||||
	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
 | 
			
		||||
| 
						 | 
				
			
			@ -3855,7 +3906,8 @@ static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h)
 | 
			
		|||
	if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
 | 
			
		||||
		goto clean_up;
 | 
			
		||||
 | 
			
		||||
	cciss_enter_performant_mode(h);
 | 
			
		||||
	cciss_enter_performant_mode(h,
 | 
			
		||||
		trans_support & CFGTBL_Trans_use_short_tags);
 | 
			
		||||
 | 
			
		||||
	/* Change the access methods to the performant access methods */
 | 
			
		||||
	h->access = SA5_performant_access;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -222,6 +222,7 @@ static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
 | 
			
		|||
			h->ctlr, c->busaddr);
 | 
			
		||||
#endif /* CCISS_DEBUG */
 | 
			
		||||
         writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
 | 
			
		||||
	readl(h->vaddr + SA5_REQUEST_PORT_OFFSET);
 | 
			
		||||
	 h->commands_outstanding++;
 | 
			
		||||
	 if ( h->commands_outstanding > h->max_outstanding)
 | 
			
		||||
		h->max_outstanding = h->commands_outstanding;
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -56,6 +56,7 @@
 | 
			
		|||
 | 
			
		||||
#define CFGTBL_Trans_Simple     0x00000002l
 | 
			
		||||
#define CFGTBL_Trans_Performant 0x00000004l
 | 
			
		||||
#define CFGTBL_Trans_use_short_tags 0x20000000l
 | 
			
		||||
 | 
			
		||||
#define CFGTBL_BusType_Ultra2   0x00000001l
 | 
			
		||||
#define CFGTBL_BusType_Ultra3   0x00000002l
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -824,13 +824,18 @@ static void complete_scsi_command(CommandList_struct *c, int timeout,
 | 
			
		|||
			break;
 | 
			
		||||
			case CMD_UNSOLICITED_ABORT:
 | 
			
		||||
				cmd->result = DID_ABORT << 16;
 | 
			
		||||
				dev_warn(&h->pdev->dev, "%p aborted do to an "
 | 
			
		||||
				dev_warn(&h->pdev->dev, "%p aborted due to an "
 | 
			
		||||
					"unsolicited abort\n", c);
 | 
			
		||||
			break;
 | 
			
		||||
			case CMD_TIMEOUT:
 | 
			
		||||
				cmd->result = DID_TIME_OUT << 16;
 | 
			
		||||
				dev_warn(&h->pdev->dev, "%p timedout\n", c);
 | 
			
		||||
			break;
 | 
			
		||||
			case CMD_UNABORTABLE:
 | 
			
		||||
				cmd->result = DID_ERROR << 16;
 | 
			
		||||
				dev_warn(&h->pdev->dev, "c %p command "
 | 
			
		||||
					"unabortable\n", c);
 | 
			
		||||
			break;
 | 
			
		||||
			default:
 | 
			
		||||
				cmd->result = DID_ERROR << 16;
 | 
			
		||||
				dev_warn(&h->pdev->dev,
 | 
			
		||||
| 
						 | 
				
			
			@ -1007,11 +1012,15 @@ cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
 | 
			
		|||
		break;
 | 
			
		||||
		case CMD_UNSOLICITED_ABORT:
 | 
			
		||||
			dev_warn(&h->pdev->dev,
 | 
			
		||||
				"%p aborted do to an unsolicited abort\n", c);
 | 
			
		||||
				"%p aborted due to an unsolicited abort\n", c);
 | 
			
		||||
		break;
 | 
			
		||||
		case CMD_TIMEOUT:
 | 
			
		||||
			dev_warn(&h->pdev->dev, "%p timedout\n", c);
 | 
			
		||||
		break;
 | 
			
		||||
		case CMD_UNABORTABLE:
 | 
			
		||||
			dev_warn(&h->pdev->dev,
 | 
			
		||||
				"%p unabortable\n", c);
 | 
			
		||||
		break;
 | 
			
		||||
		default:
 | 
			
		||||
			dev_warn(&h->pdev->dev,
 | 
			
		||||
				"%p returned unknown status %x\n",
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -92,7 +92,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
 | 
			
		|||
	bio->bi_end_io = drbd_md_io_complete;
 | 
			
		||||
	bio->bi_rw = rw;
 | 
			
		||||
 | 
			
		||||
	if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
 | 
			
		||||
	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
 | 
			
		||||
		bio_endio(bio, -EIO);
 | 
			
		||||
	else
 | 
			
		||||
		submit_bio(rw, bio);
 | 
			
		||||
| 
						 | 
				
			
			@ -176,13 +176,17 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
 | 
			
		|||
	struct lc_element *al_ext;
 | 
			
		||||
	struct lc_element *tmp;
 | 
			
		||||
	unsigned long     al_flags = 0;
 | 
			
		||||
	int wake;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&mdev->al_lock);
 | 
			
		||||
	tmp = lc_find(mdev->resync, enr/AL_EXT_PER_BM_SECT);
 | 
			
		||||
	if (unlikely(tmp != NULL)) {
 | 
			
		||||
		struct bm_extent  *bm_ext = lc_entry(tmp, struct bm_extent, lce);
 | 
			
		||||
		if (test_bit(BME_NO_WRITES, &bm_ext->flags)) {
 | 
			
		||||
			wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags);
 | 
			
		||||
			spin_unlock_irq(&mdev->al_lock);
 | 
			
		||||
			if (wake)
 | 
			
		||||
				wake_up(&mdev->al_wait);
 | 
			
		||||
			return NULL;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -258,6 +262,33 @@ void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector)
 | 
			
		|||
	spin_unlock_irqrestore(&mdev->al_lock, flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)
 | 
			
		||||
/* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT
 | 
			
		||||
 * are still coupled, or assume too much about their relation.
 | 
			
		||||
 * Code below will not work if this is violated.
 | 
			
		||||
 * Will be cleaned up with some followup patch.
 | 
			
		||||
 */
 | 
			
		||||
# error FIXME
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static unsigned int al_extent_to_bm_page(unsigned int al_enr)
 | 
			
		||||
{
 | 
			
		||||
	return al_enr >>
 | 
			
		||||
		/* bit to page */
 | 
			
		||||
		((PAGE_SHIFT + 3) -
 | 
			
		||||
		/* al extent number to bit */
 | 
			
		||||
		 (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static unsigned int rs_extent_to_bm_page(unsigned int rs_enr)
 | 
			
		||||
{
 | 
			
		||||
	return rs_enr >>
 | 
			
		||||
		/* bit to page */
 | 
			
		||||
		((PAGE_SHIFT + 3) -
 | 
			
		||||
		/* al extent number to bit */
 | 
			
		||||
		 (BM_EXT_SHIFT - BM_BLOCK_SHIFT));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -285,7 +316,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 | 
			
		|||
	 * For now, we must not write the transaction,
 | 
			
		||||
	 * if we cannot write out the bitmap of the evicted extent. */
 | 
			
		||||
	if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
 | 
			
		||||
		drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
 | 
			
		||||
		drbd_bm_write_page(mdev, al_extent_to_bm_page(evicted));
 | 
			
		||||
 | 
			
		||||
	/* The bitmap write may have failed, causing a state change. */
 | 
			
		||||
	if (mdev->state.disk < D_INCONSISTENT) {
 | 
			
		||||
| 
						 | 
				
			
			@ -334,7 +365,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 | 
			
		|||
		+ mdev->ldev->md.al_offset + mdev->al_tr_pos;
 | 
			
		||||
 | 
			
		||||
	if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
 | 
			
		||||
		drbd_chk_io_error(mdev, 1, TRUE);
 | 
			
		||||
		drbd_chk_io_error(mdev, 1, true);
 | 
			
		||||
 | 
			
		||||
	if (++mdev->al_tr_pos >
 | 
			
		||||
	    div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
 | 
			
		||||
| 
						 | 
				
			
			@ -511,225 +542,6 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
 | 
			
		|||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void atodb_endio(struct bio *bio, int error)
 | 
			
		||||
{
 | 
			
		||||
	struct drbd_atodb_wait *wc = bio->bi_private;
 | 
			
		||||
	struct drbd_conf *mdev = wc->mdev;
 | 
			
		||||
	struct page *page;
 | 
			
		||||
	int uptodate = bio_flagged(bio, BIO_UPTODATE);
 | 
			
		||||
 | 
			
		||||
	/* strange behavior of some lower level drivers...
 | 
			
		||||
	 * fail the request by clearing the uptodate flag,
 | 
			
		||||
	 * but do not return any error?! */
 | 
			
		||||
	if (!error && !uptodate)
 | 
			
		||||
		error = -EIO;
 | 
			
		||||
 | 
			
		||||
	drbd_chk_io_error(mdev, error, TRUE);
 | 
			
		||||
	if (error && wc->error == 0)
 | 
			
		||||
		wc->error = error;
 | 
			
		||||
 | 
			
		||||
	if (atomic_dec_and_test(&wc->count))
 | 
			
		||||
		complete(&wc->io_done);
 | 
			
		||||
 | 
			
		||||
	page = bio->bi_io_vec[0].bv_page;
 | 
			
		||||
	put_page(page);
 | 
			
		||||
	bio_put(bio);
 | 
			
		||||
	mdev->bm_writ_cnt++;
 | 
			
		||||
	put_ldev(mdev);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* sector to word */
 | 
			
		||||
#define S2W(s)	((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL))
 | 
			
		||||
 | 
			
		||||
/* activity log to on disk bitmap -- prepare bio unless that sector
 | 
			
		||||
 * is already covered by previously prepared bios */
 | 
			
		||||
static int atodb_prepare_unless_covered(struct drbd_conf *mdev,
 | 
			
		||||
					struct bio **bios,
 | 
			
		||||
					unsigned int enr,
 | 
			
		||||
					struct drbd_atodb_wait *wc) __must_hold(local)
 | 
			
		||||
{
 | 
			
		||||
	struct bio *bio;
 | 
			
		||||
	struct page *page;
 | 
			
		||||
	sector_t on_disk_sector;
 | 
			
		||||
	unsigned int page_offset = PAGE_SIZE;
 | 
			
		||||
	int offset;
 | 
			
		||||
	int i = 0;
 | 
			
		||||
	int err = -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	/* We always write aligned, full 4k blocks,
 | 
			
		||||
	 * so we can ignore the logical_block_size (for now) */
 | 
			
		||||
	enr &= ~7U;
 | 
			
		||||
	on_disk_sector = enr + mdev->ldev->md.md_offset
 | 
			
		||||
			     + mdev->ldev->md.bm_offset;
 | 
			
		||||
 | 
			
		||||
	D_ASSERT(!(on_disk_sector & 7U));
 | 
			
		||||
 | 
			
		||||
	/* Check if that enr is already covered by an already created bio.
 | 
			
		||||
	 * Caution, bios[] is not NULL terminated,
 | 
			
		||||
	 * but only initialized to all NULL.
 | 
			
		||||
	 * For completely scattered activity log,
 | 
			
		||||
	 * the last invocation iterates over all bios,
 | 
			
		||||
	 * and finds the last NULL entry.
 | 
			
		||||
	 */
 | 
			
		||||
	while ((bio = bios[i])) {
 | 
			
		||||
		if (bio->bi_sector == on_disk_sector)
 | 
			
		||||
			return 0;
 | 
			
		||||
		i++;
 | 
			
		||||
	}
 | 
			
		||||
	/* bios[i] == NULL, the next not yet used slot */
 | 
			
		||||
 | 
			
		||||
	/* GFP_KERNEL, we are not in the write-out path */
 | 
			
		||||
	bio = bio_alloc(GFP_KERNEL, 1);
 | 
			
		||||
	if (bio == NULL)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	if (i > 0) {
 | 
			
		||||
		const struct bio_vec *prev_bv = bios[i-1]->bi_io_vec;
 | 
			
		||||
		page_offset = prev_bv->bv_offset + prev_bv->bv_len;
 | 
			
		||||
		page = prev_bv->bv_page;
 | 
			
		||||
	}
 | 
			
		||||
	if (page_offset == PAGE_SIZE) {
 | 
			
		||||
		page = alloc_page(__GFP_HIGHMEM);
 | 
			
		||||
		if (page == NULL)
 | 
			
		||||
			goto out_bio_put;
 | 
			
		||||
		page_offset = 0;
 | 
			
		||||
	} else {
 | 
			
		||||
		get_page(page);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	offset = S2W(enr);
 | 
			
		||||
	drbd_bm_get_lel(mdev, offset,
 | 
			
		||||
			min_t(size_t, S2W(8), drbd_bm_words(mdev) - offset),
 | 
			
		||||
			kmap(page) + page_offset);
 | 
			
		||||
	kunmap(page);
 | 
			
		||||
 | 
			
		||||
	bio->bi_private = wc;
 | 
			
		||||
	bio->bi_end_io = atodb_endio;
 | 
			
		||||
	bio->bi_bdev = mdev->ldev->md_bdev;
 | 
			
		||||
	bio->bi_sector = on_disk_sector;
 | 
			
		||||
 | 
			
		||||
	if (bio_add_page(bio, page, 4096, page_offset) != 4096)
 | 
			
		||||
		goto out_put_page;
 | 
			
		||||
 | 
			
		||||
	atomic_inc(&wc->count);
 | 
			
		||||
	/* we already know that we may do this...
 | 
			
		||||
	 * get_ldev_if_state(mdev,D_ATTACHING);
 | 
			
		||||
	 * just get the extra reference, so that the local_cnt reflects
 | 
			
		||||
	 * the number of pending IO requests DRBD at its backing device.
 | 
			
		||||
	 */
 | 
			
		||||
	atomic_inc(&mdev->local_cnt);
 | 
			
		||||
 | 
			
		||||
	bios[i] = bio;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
out_put_page:
 | 
			
		||||
	err = -EINVAL;
 | 
			
		||||
	put_page(page);
 | 
			
		||||
out_bio_put:
 | 
			
		||||
	bio_put(bio);
 | 
			
		||||
	return err;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * drbd_al_to_on_disk_bm() -  * Writes bitmap parts covered by active AL extents
 | 
			
		||||
 * @mdev:	DRBD device.
 | 
			
		||||
 *
 | 
			
		||||
 * Called when we detach (unconfigure) local storage,
 | 
			
		||||
 * or when we go from R_PRIMARY to R_SECONDARY role.
 | 
			
		||||
 */
 | 
			
		||||
void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
 | 
			
		||||
{
 | 
			
		||||
	int i, nr_elements;
 | 
			
		||||
	unsigned int enr;
 | 
			
		||||
	struct bio **bios;
 | 
			
		||||
	struct drbd_atodb_wait wc;
 | 
			
		||||
 | 
			
		||||
	ERR_IF (!get_ldev_if_state(mdev, D_ATTACHING))
 | 
			
		||||
		return; /* sorry, I don't have any act_log etc... */
 | 
			
		||||
 | 
			
		||||
	wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
 | 
			
		||||
 | 
			
		||||
	nr_elements = mdev->act_log->nr_elements;
 | 
			
		||||
 | 
			
		||||
	/* GFP_KERNEL, we are not in anyone's write-out path */
 | 
			
		||||
	bios = kzalloc(sizeof(struct bio *) * nr_elements, GFP_KERNEL);
 | 
			
		||||
	if (!bios)
 | 
			
		||||
		goto submit_one_by_one;
 | 
			
		||||
 | 
			
		||||
	atomic_set(&wc.count, 0);
 | 
			
		||||
	init_completion(&wc.io_done);
 | 
			
		||||
	wc.mdev = mdev;
 | 
			
		||||
	wc.error = 0;
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < nr_elements; i++) {
 | 
			
		||||
		enr = lc_element_by_index(mdev->act_log, i)->lc_number;
 | 
			
		||||
		if (enr == LC_FREE)
 | 
			
		||||
			continue;
 | 
			
		||||
		/* next statement also does atomic_inc wc.count and local_cnt */
 | 
			
		||||
		if (atodb_prepare_unless_covered(mdev, bios,
 | 
			
		||||
						enr/AL_EXT_PER_BM_SECT,
 | 
			
		||||
						&wc))
 | 
			
		||||
			goto free_bios_submit_one_by_one;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* unnecessary optimization? */
 | 
			
		||||
	lc_unlock(mdev->act_log);
 | 
			
		||||
	wake_up(&mdev->al_wait);
 | 
			
		||||
 | 
			
		||||
	/* all prepared, submit them */
 | 
			
		||||
	for (i = 0; i < nr_elements; i++) {
 | 
			
		||||
		if (bios[i] == NULL)
 | 
			
		||||
			break;
 | 
			
		||||
		if (FAULT_ACTIVE(mdev, DRBD_FAULT_MD_WR)) {
 | 
			
		||||
			bios[i]->bi_rw = WRITE;
 | 
			
		||||
			bio_endio(bios[i], -EIO);
 | 
			
		||||
		} else {
 | 
			
		||||
			submit_bio(WRITE, bios[i]);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* always (try to) flush bitmap to stable storage */
 | 
			
		||||
	drbd_md_flush(mdev);
 | 
			
		||||
 | 
			
		||||
	/* In case we did not submit a single IO do not wait for
 | 
			
		||||
	 * them to complete. ( Because we would wait forever here. )
 | 
			
		||||
	 *
 | 
			
		||||
	 * In case we had IOs and they are already complete, there
 | 
			
		||||
	 * is not point in waiting anyways.
 | 
			
		||||
	 * Therefore this if () ... */
 | 
			
		||||
	if (atomic_read(&wc.count))
 | 
			
		||||
		wait_for_completion(&wc.io_done);
 | 
			
		||||
 | 
			
		||||
	put_ldev(mdev);
 | 
			
		||||
 | 
			
		||||
	kfree(bios);
 | 
			
		||||
	return;
 | 
			
		||||
 | 
			
		||||
 free_bios_submit_one_by_one:
 | 
			
		||||
	/* free everything by calling the endio callback directly. */
 | 
			
		||||
	for (i = 0; i < nr_elements && bios[i]; i++)
 | 
			
		||||
		bio_endio(bios[i], 0);
 | 
			
		||||
 | 
			
		||||
	kfree(bios);
 | 
			
		||||
 | 
			
		||||
 submit_one_by_one:
 | 
			
		||||
	dev_warn(DEV, "Using the slow drbd_al_to_on_disk_bm()\n");
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < mdev->act_log->nr_elements; i++) {
 | 
			
		||||
		enr = lc_element_by_index(mdev->act_log, i)->lc_number;
 | 
			
		||||
		if (enr == LC_FREE)
 | 
			
		||||
			continue;
 | 
			
		||||
		/* Really slow: if we have al-extents 16..19 active,
 | 
			
		||||
		 * sector 4 will be written four times! Synchronous! */
 | 
			
		||||
		drbd_bm_write_sect(mdev, enr/AL_EXT_PER_BM_SECT);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lc_unlock(mdev->act_log);
 | 
			
		||||
	wake_up(&mdev->al_wait);
 | 
			
		||||
	put_ldev(mdev);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * drbd_al_apply_to_bm() - Sets the bitmap to diry(1) where covered ba active AL extents
 | 
			
		||||
 * @mdev:	DRBD device.
 | 
			
		||||
| 
						 | 
				
			
			@ -809,7 +621,7 @@ static int w_update_odbm(struct drbd_conf *mdev, struct drbd_work *w, int unused
 | 
			
		|||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	drbd_bm_write_sect(mdev, udw->enr);
 | 
			
		||||
	drbd_bm_write_page(mdev, rs_extent_to_bm_page(udw->enr));
 | 
			
		||||
	put_ldev(mdev);
 | 
			
		||||
 | 
			
		||||
	kfree(udw);
 | 
			
		||||
| 
						 | 
				
			
			@ -889,7 +701,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
 | 
			
		|||
				dev_warn(DEV, "Kicking resync_lru element enr=%u "
 | 
			
		||||
				     "out with rs_failed=%d\n",
 | 
			
		||||
				     ext->lce.lc_number, ext->rs_failed);
 | 
			
		||||
				set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
 | 
			
		||||
			}
 | 
			
		||||
			ext->rs_left = rs_left;
 | 
			
		||||
			ext->rs_failed = success ? 0 : count;
 | 
			
		||||
| 
						 | 
				
			
			@ -908,7 +719,6 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
 | 
			
		|||
				drbd_queue_work_front(&mdev->data.work, &udw->w);
 | 
			
		||||
			} else {
 | 
			
		||||
				dev_warn(DEV, "Could not kmalloc an udw\n");
 | 
			
		||||
				set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
| 
						 | 
				
			
			@ -919,6 +729,22 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long now = jiffies;
 | 
			
		||||
	unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
 | 
			
		||||
	int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
 | 
			
		||||
	if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
 | 
			
		||||
		if (mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go &&
 | 
			
		||||
		    mdev->state.conn != C_PAUSED_SYNC_T &&
 | 
			
		||||
		    mdev->state.conn != C_PAUSED_SYNC_S) {
 | 
			
		||||
			mdev->rs_mark_time[next] = now;
 | 
			
		||||
			mdev->rs_mark_left[next] = still_to_go;
 | 
			
		||||
			mdev->rs_last_mark = next;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* clear the bit corresponding to the piece of storage in question:
 | 
			
		||||
 * size byte of data starting from sector.  Only clear a bits of the affected
 | 
			
		||||
 * one ore more _aligned_ BM_BLOCK_SIZE blocks.
 | 
			
		||||
| 
						 | 
				
			
			@ -936,7 +762,7 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
 | 
			
		|||
	int wake_up = 0;
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
 | 
			
		||||
	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
 | 
			
		||||
		dev_err(DEV, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n",
 | 
			
		||||
				(unsigned long long)sector, size);
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			@ -969,21 +795,9 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
 | 
			
		|||
	 */
 | 
			
		||||
	count = drbd_bm_clear_bits(mdev, sbnr, ebnr);
 | 
			
		||||
	if (count && get_ldev(mdev)) {
 | 
			
		||||
		unsigned long now = jiffies;
 | 
			
		||||
		unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark];
 | 
			
		||||
		int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS;
 | 
			
		||||
		if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) {
 | 
			
		||||
			unsigned long tw = drbd_bm_total_weight(mdev);
 | 
			
		||||
			if (mdev->rs_mark_left[mdev->rs_last_mark] != tw &&
 | 
			
		||||
			    mdev->state.conn != C_PAUSED_SYNC_T &&
 | 
			
		||||
			    mdev->state.conn != C_PAUSED_SYNC_S) {
 | 
			
		||||
				mdev->rs_mark_time[next] = now;
 | 
			
		||||
				mdev->rs_mark_left[next] = tw;
 | 
			
		||||
				mdev->rs_last_mark = next;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		drbd_advance_rs_marks(mdev, drbd_bm_total_weight(mdev));
 | 
			
		||||
		spin_lock_irqsave(&mdev->al_lock, flags);
 | 
			
		||||
		drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE);
 | 
			
		||||
		drbd_try_clear_on_disk_bm(mdev, sector, count, true);
 | 
			
		||||
		spin_unlock_irqrestore(&mdev->al_lock, flags);
 | 
			
		||||
 | 
			
		||||
		/* just wake_up unconditional now, various lc_chaged(),
 | 
			
		||||
| 
						 | 
				
			
			@ -998,27 +812,27 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size,
 | 
			
		|||
/*
 | 
			
		||||
 * this is intended to set one request worth of data out of sync.
 | 
			
		||||
 * affects at least 1 bit,
 | 
			
		||||
 * and at most 1+DRBD_MAX_SEGMENT_SIZE/BM_BLOCK_SIZE bits.
 | 
			
		||||
 * and at most 1+DRBD_MAX_BIO_SIZE/BM_BLOCK_SIZE bits.
 | 
			
		||||
 *
 | 
			
		||||
 * called by tl_clear and drbd_send_dblock (==drbd_make_request).
 | 
			
		||||
 * so this can be _any_ process.
 | 
			
		||||
 */
 | 
			
		||||
void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
 | 
			
		||||
int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
 | 
			
		||||
			    const char *file, const unsigned int line)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long sbnr, ebnr, lbnr, flags;
 | 
			
		||||
	sector_t esector, nr_sectors;
 | 
			
		||||
	unsigned int enr, count;
 | 
			
		||||
	unsigned int enr, count = 0;
 | 
			
		||||
	struct lc_element *e;
 | 
			
		||||
 | 
			
		||||
	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
 | 
			
		||||
	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
 | 
			
		||||
		dev_err(DEV, "sector: %llus, size: %d\n",
 | 
			
		||||
			(unsigned long long)sector, size);
 | 
			
		||||
		return;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!get_ldev(mdev))
 | 
			
		||||
		return; /* no disk, no metadata, no bitmap to set bits in */
 | 
			
		||||
		return 0; /* no disk, no metadata, no bitmap to set bits in */
 | 
			
		||||
 | 
			
		||||
	nr_sectors = drbd_get_capacity(mdev->this_bdev);
 | 
			
		||||
	esector = sector + (size >> 9) - 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -1048,6 +862,8 @@ void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
 | 
			
		|||
 | 
			
		||||
out:
 | 
			
		||||
	put_ldev(mdev);
 | 
			
		||||
 | 
			
		||||
	return count;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static
 | 
			
		||||
| 
						 | 
				
			
			@ -1128,7 +944,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
 | 
			
		|||
	unsigned int enr = BM_SECT_TO_EXT(sector);
 | 
			
		||||
	struct bm_extent *bm_ext;
 | 
			
		||||
	int i, sig;
 | 
			
		||||
	int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait.
 | 
			
		||||
			 200 times -> 20 seconds. */
 | 
			
		||||
 | 
			
		||||
retry:
 | 
			
		||||
	sig = wait_event_interruptible(mdev->al_wait,
 | 
			
		||||
			(bm_ext = _bme_get(mdev, enr)));
 | 
			
		||||
	if (sig)
 | 
			
		||||
| 
						 | 
				
			
			@ -1139,16 +958,25 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
 | 
			
		|||
 | 
			
		||||
	for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
 | 
			
		||||
		sig = wait_event_interruptible(mdev->al_wait,
 | 
			
		||||
				!_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i));
 | 
			
		||||
		if (sig) {
 | 
			
		||||
					       !_is_in_al(mdev, enr * AL_EXT_PER_BM_SECT + i) ||
 | 
			
		||||
					       test_bit(BME_PRIORITY, &bm_ext->flags));
 | 
			
		||||
 | 
			
		||||
		if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) {
 | 
			
		||||
			spin_lock_irq(&mdev->al_lock);
 | 
			
		||||
			if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
 | 
			
		||||
				clear_bit(BME_NO_WRITES, &bm_ext->flags);
 | 
			
		||||
				bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */
 | 
			
		||||
				mdev->resync_locked--;
 | 
			
		||||
				wake_up(&mdev->al_wait);
 | 
			
		||||
			}
 | 
			
		||||
			spin_unlock_irq(&mdev->al_lock);
 | 
			
		||||
			return -EINTR;
 | 
			
		||||
			if (sig)
 | 
			
		||||
				return -EINTR;
 | 
			
		||||
			if (schedule_timeout_interruptible(HZ/10))
 | 
			
		||||
				return -EINTR;
 | 
			
		||||
			if (sa && --sa == 0)
 | 
			
		||||
				dev_warn(DEV,"drbd_rs_begin_io() stepped aside for 20sec."
 | 
			
		||||
					 "Resync stalled?\n");
 | 
			
		||||
			goto retry;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	set_bit(BME_LOCKED, &bm_ext->flags);
 | 
			
		||||
| 
						 | 
				
			
			@ -1291,8 +1119,7 @@ void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector)
 | 
			
		|||
	}
 | 
			
		||||
 | 
			
		||||
	if (lc_put(mdev->resync, &bm_ext->lce) == 0) {
 | 
			
		||||
		clear_bit(BME_LOCKED, &bm_ext->flags);
 | 
			
		||||
		clear_bit(BME_NO_WRITES, &bm_ext->flags);
 | 
			
		||||
		bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */
 | 
			
		||||
		mdev->resync_locked--;
 | 
			
		||||
		wake_up(&mdev->al_wait);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1383,7 +1210,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
 | 
			
		|||
	sector_t esector, nr_sectors;
 | 
			
		||||
	int wake_up = 0;
 | 
			
		||||
 | 
			
		||||
	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
 | 
			
		||||
	if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
 | 
			
		||||
		dev_err(DEV, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n",
 | 
			
		||||
				(unsigned long long)sector, size);
 | 
			
		||||
		return;
 | 
			
		||||
| 
						 | 
				
			
			@ -1420,7 +1247,7 @@ void drbd_rs_failed_io(struct drbd_conf *mdev, sector_t sector, int size)
 | 
			
		|||
		mdev->rs_failed += count;
 | 
			
		||||
 | 
			
		||||
		if (get_ldev(mdev)) {
 | 
			
		||||
			drbd_try_clear_on_disk_bm(mdev, sector, count, FALSE);
 | 
			
		||||
			drbd_try_clear_on_disk_bm(mdev, sector, count, false);
 | 
			
		||||
			put_ldev(mdev);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							| 
						 | 
				
			
			@ -72,13 +72,6 @@ extern int fault_devs;
 | 
			
		|||
extern char usermode_helper[];
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifndef TRUE
 | 
			
		||||
#define TRUE 1
 | 
			
		||||
#endif
 | 
			
		||||
#ifndef FALSE
 | 
			
		||||
#define FALSE 0
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/* I don't remember why XCPU ...
 | 
			
		||||
 * This is used to wake the asender,
 | 
			
		||||
 * and to interrupt sending the sending task
 | 
			
		||||
| 
						 | 
				
			
			@ -104,6 +97,7 @@ extern char usermode_helper[];
 | 
			
		|||
#define ID_SYNCER (-1ULL)
 | 
			
		||||
#define ID_VACANT 0
 | 
			
		||||
#define is_syncer_block_id(id) ((id) == ID_SYNCER)
 | 
			
		||||
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
 | 
			
		||||
 | 
			
		||||
struct drbd_conf;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -137,20 +131,19 @@ enum {
 | 
			
		|||
	DRBD_FAULT_MAX,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_DRBD_FAULT_INJECTION
 | 
			
		||||
extern unsigned int
 | 
			
		||||
_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type);
 | 
			
		||||
 | 
			
		||||
static inline int
 | 
			
		||||
drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) {
 | 
			
		||||
#ifdef CONFIG_DRBD_FAULT_INJECTION
 | 
			
		||||
	return fault_rate &&
 | 
			
		||||
		(enable_faults & (1<<type)) &&
 | 
			
		||||
		_drbd_insert_fault(mdev, type);
 | 
			
		||||
}
 | 
			
		||||
#define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t)))
 | 
			
		||||
 | 
			
		||||
#else
 | 
			
		||||
#define FAULT_ACTIVE(_m, _t) (0)
 | 
			
		||||
	return 0;
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* integer division, round _UP_ to the next integer */
 | 
			
		||||
#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
 | 
			
		||||
| 
						 | 
				
			
			@ -212,8 +205,10 @@ enum drbd_packets {
 | 
			
		|||
	/* P_CKPT_FENCE_REQ      = 0x25, * currently reserved for protocol D */
 | 
			
		||||
	/* P_CKPT_DISABLE_REQ    = 0x26, * currently reserved for protocol D */
 | 
			
		||||
	P_DELAY_PROBE         = 0x27, /* is used on BOTH sockets */
 | 
			
		||||
	P_OUT_OF_SYNC         = 0x28, /* Mark as out of sync (Outrunning), data socket */
 | 
			
		||||
	P_RS_CANCEL           = 0x29, /* meta: Used to cancel RS_DATA_REQUEST packet by SyncSource */
 | 
			
		||||
 | 
			
		||||
	P_MAX_CMD	      = 0x28,
 | 
			
		||||
	P_MAX_CMD	      = 0x2A,
 | 
			
		||||
	P_MAY_IGNORE	      = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */
 | 
			
		||||
	P_MAX_OPT_CMD	      = 0x101,
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -269,6 +264,7 @@ static inline const char *cmdname(enum drbd_packets cmd)
 | 
			
		|||
		[P_RS_IS_IN_SYNC]	= "CsumRSIsInSync",
 | 
			
		||||
		[P_COMPRESSED_BITMAP]   = "CBitmap",
 | 
			
		||||
		[P_DELAY_PROBE]         = "DelayProbe",
 | 
			
		||||
		[P_OUT_OF_SYNC]		= "OutOfSync",
 | 
			
		||||
		[P_MAX_CMD]	        = NULL,
 | 
			
		||||
	};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -512,7 +508,7 @@ struct p_sizes {
 | 
			
		|||
	u64	    d_size;  /* size of disk */
 | 
			
		||||
	u64	    u_size;  /* user requested size */
 | 
			
		||||
	u64	    c_size;  /* current exported size */
 | 
			
		||||
	u32	    max_segment_size;  /* Maximal size of a BIO */
 | 
			
		||||
	u32	    max_bio_size;  /* Maximal size of a BIO */
 | 
			
		||||
	u16	    queue_order_type;  /* not yet implemented in DRBD*/
 | 
			
		||||
	u16	    dds_flags; /* use enum dds_flags here. */
 | 
			
		||||
} __packed;
 | 
			
		||||
| 
						 | 
				
			
			@ -550,6 +546,13 @@ struct p_discard {
 | 
			
		|||
	u32	    pad;
 | 
			
		||||
} __packed;
 | 
			
		||||
 | 
			
		||||
struct p_block_desc {
 | 
			
		||||
	struct p_header80 head;
 | 
			
		||||
	u64 sector;
 | 
			
		||||
	u32 blksize;
 | 
			
		||||
	u32 pad;	/* to multiple of 8 Byte */
 | 
			
		||||
} __packed;
 | 
			
		||||
 | 
			
		||||
/* Valid values for the encoding field.
 | 
			
		||||
 * Bump proto version when changing this. */
 | 
			
		||||
enum drbd_bitmap_code {
 | 
			
		||||
| 
						 | 
				
			
			@ -647,6 +650,7 @@ union p_polymorph {
 | 
			
		|||
        struct p_block_req       block_req;
 | 
			
		||||
	struct p_delay_probe93   delay_probe93;
 | 
			
		||||
	struct p_rs_uuid         rs_uuid;
 | 
			
		||||
	struct p_block_desc      block_desc;
 | 
			
		||||
} __packed;
 | 
			
		||||
 | 
			
		||||
/**********************************************************************/
 | 
			
		||||
| 
						 | 
				
			
			@ -677,13 +681,6 @@ static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
 | 
			
		|||
	return thi->t_state;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Having this as the first member of a struct provides sort of "inheritance".
 | 
			
		||||
 * "derived" structs can be "drbd_queue_work()"ed.
 | 
			
		||||
 * The callback should know and cast back to the descendant struct.
 | 
			
		||||
 * drbd_request and drbd_epoch_entry are descendants of drbd_work.
 | 
			
		||||
 */
 | 
			
		||||
struct drbd_work;
 | 
			
		||||
typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel);
 | 
			
		||||
struct drbd_work {
 | 
			
		||||
| 
						 | 
				
			
			@ -712,9 +709,6 @@ struct drbd_request {
 | 
			
		|||
	 * starting a new epoch...
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	/* up to here, the struct layout is identical to drbd_epoch_entry;
 | 
			
		||||
	 * we might be able to use that to our advantage...  */
 | 
			
		||||
 | 
			
		||||
	struct list_head tl_requests; /* ring list in the transfer log */
 | 
			
		||||
	struct bio *master_bio;       /* master bio pointer */
 | 
			
		||||
	unsigned long rq_state; /* see comments above _req_mod() */
 | 
			
		||||
| 
						 | 
				
			
			@ -831,7 +825,7 @@ enum {
 | 
			
		|||
	CRASHED_PRIMARY,	/* This node was a crashed primary.
 | 
			
		||||
				 * Gets cleared when the state.conn
 | 
			
		||||
				 * goes into C_CONNECTED state. */
 | 
			
		||||
	WRITE_BM_AFTER_RESYNC,	/* A kmalloc() during resync failed */
 | 
			
		||||
	NO_BARRIER_SUPP,	/* underlying block device doesn't implement barriers */
 | 
			
		||||
	CONSIDER_RESYNC,
 | 
			
		||||
 | 
			
		||||
	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
 | 
			
		||||
| 
						 | 
				
			
			@ -856,10 +850,37 @@ enum {
 | 
			
		|||
	GOT_PING_ACK,		/* set when we receive a ping_ack packet, misc wait gets woken */
 | 
			
		||||
	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
 | 
			
		||||
	AL_SUSPENDED,		/* Activity logging is currently suspended. */
 | 
			
		||||
	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct drbd_bitmap; /* opaque for drbd_conf */
 | 
			
		||||
 | 
			
		||||
/* definition of bits in bm_flags to be used in drbd_bm_lock
 | 
			
		||||
 * and drbd_bitmap_io and friends. */
 | 
			
		||||
enum bm_flag {
 | 
			
		||||
	/* do we need to kfree, or vfree bm_pages? */
 | 
			
		||||
	BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
 | 
			
		||||
 | 
			
		||||
	/* currently locked for bulk operation */
 | 
			
		||||
	BM_LOCKED_MASK = 0x7,
 | 
			
		||||
 | 
			
		||||
	/* in detail, that is: */
 | 
			
		||||
	BM_DONT_CLEAR = 0x1,
 | 
			
		||||
	BM_DONT_SET   = 0x2,
 | 
			
		||||
	BM_DONT_TEST  = 0x4,
 | 
			
		||||
 | 
			
		||||
	/* (test bit, count bit) allowed (common case) */
 | 
			
		||||
	BM_LOCKED_TEST_ALLOWED = 0x3,
 | 
			
		||||
 | 
			
		||||
	/* testing bits, as well as setting new bits allowed, but clearing bits
 | 
			
		||||
	 * would be unexpected.  Used during bitmap receive.  Setting new bits
 | 
			
		||||
	 * requires sending of "out-of-sync" information, though. */
 | 
			
		||||
	BM_LOCKED_SET_ALLOWED = 0x1,
 | 
			
		||||
 | 
			
		||||
	/* clear is not expected while bitmap is locked for bulk operation */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* TODO sort members for performance
 | 
			
		||||
 * MAYBE group them further */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -925,6 +946,7 @@ struct drbd_md_io {
 | 
			
		|||
struct bm_io_work {
 | 
			
		||||
	struct drbd_work w;
 | 
			
		||||
	char *why;
 | 
			
		||||
	enum bm_flag flags;
 | 
			
		||||
	int (*io_fn)(struct drbd_conf *mdev);
 | 
			
		||||
	void (*done)(struct drbd_conf *mdev, int rv);
 | 
			
		||||
};
 | 
			
		||||
| 
						 | 
				
			
			@ -963,9 +985,12 @@ struct drbd_conf {
 | 
			
		|||
	struct drbd_work  resync_work,
 | 
			
		||||
			  unplug_work,
 | 
			
		||||
			  go_diskless,
 | 
			
		||||
			  md_sync_work;
 | 
			
		||||
			  md_sync_work,
 | 
			
		||||
			  start_resync_work;
 | 
			
		||||
	struct timer_list resync_timer;
 | 
			
		||||
	struct timer_list md_sync_timer;
 | 
			
		||||
	struct timer_list start_resync_timer;
 | 
			
		||||
	struct timer_list request_timer;
 | 
			
		||||
#ifdef DRBD_DEBUG_MD_SYNC
 | 
			
		||||
	struct {
 | 
			
		||||
		unsigned int line;
 | 
			
		||||
| 
						 | 
				
			
			@ -1000,9 +1025,9 @@ struct drbd_conf {
 | 
			
		|||
	struct hlist_head *tl_hash;
 | 
			
		||||
	unsigned int tl_hash_s;
 | 
			
		||||
 | 
			
		||||
	/* blocks to sync in this run [unit BM_BLOCK_SIZE] */
 | 
			
		||||
	/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
 | 
			
		||||
	unsigned long rs_total;
 | 
			
		||||
	/* number of sync IOs that failed in this run */
 | 
			
		||||
	/* number of resync blocks that failed in this run */
 | 
			
		||||
	unsigned long rs_failed;
 | 
			
		||||
	/* Syncer's start time [unit jiffies] */
 | 
			
		||||
	unsigned long rs_start;
 | 
			
		||||
| 
						 | 
				
			
			@ -1102,6 +1127,7 @@ struct drbd_conf {
 | 
			
		|||
	struct fifo_buffer rs_plan_s; /* correction values of resync planer */
 | 
			
		||||
	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
 | 
			
		||||
	int rs_planed;    /* resync sectors already planed */
 | 
			
		||||
	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
 | 
			
		||||
| 
						 | 
				
			
			@ -1163,14 +1189,19 @@ enum dds_flags {
 | 
			
		|||
};
 | 
			
		||||
 | 
			
		||||
extern void drbd_init_set_defaults(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
 | 
			
		||||
			union drbd_state mask, union drbd_state val);
 | 
			
		||||
extern enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev,
 | 
			
		||||
					    enum chg_state_flags f,
 | 
			
		||||
					    union drbd_state mask,
 | 
			
		||||
					    union drbd_state val);
 | 
			
		||||
extern void drbd_force_state(struct drbd_conf *, union drbd_state,
 | 
			
		||||
			union drbd_state);
 | 
			
		||||
extern int _drbd_request_state(struct drbd_conf *, union drbd_state,
 | 
			
		||||
			union drbd_state, enum chg_state_flags);
 | 
			
		||||
extern int __drbd_set_state(struct drbd_conf *, union drbd_state,
 | 
			
		||||
			    enum chg_state_flags, struct completion *done);
 | 
			
		||||
extern enum drbd_state_rv _drbd_request_state(struct drbd_conf *,
 | 
			
		||||
					      union drbd_state,
 | 
			
		||||
					      union drbd_state,
 | 
			
		||||
					      enum chg_state_flags);
 | 
			
		||||
extern enum drbd_state_rv __drbd_set_state(struct drbd_conf *, union drbd_state,
 | 
			
		||||
					   enum chg_state_flags,
 | 
			
		||||
					   struct completion *done);
 | 
			
		||||
extern void print_st_err(struct drbd_conf *, union drbd_state,
 | 
			
		||||
			union drbd_state, int);
 | 
			
		||||
extern int  drbd_thread_start(struct drbd_thread *thi);
 | 
			
		||||
| 
						 | 
				
			
			@ -1195,7 +1226,7 @@ extern int drbd_send(struct drbd_conf *mdev, struct socket *sock,
 | 
			
		|||
extern int drbd_send_protocol(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_send_uuids(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val);
 | 
			
		||||
extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
 | 
			
		||||
extern int _drbd_send_state(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_send_state(struct drbd_conf *mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -1220,11 +1251,10 @@ extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
 | 
			
		|||
			struct p_data *dp, int data_size);
 | 
			
		||||
extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
 | 
			
		||||
			    sector_t sector, int blksize, u64 block_id);
 | 
			
		||||
extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
 | 
			
		||||
extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
 | 
			
		||||
			   struct drbd_epoch_entry *e);
 | 
			
		||||
extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
 | 
			
		||||
extern int _drbd_send_barrier(struct drbd_conf *mdev,
 | 
			
		||||
			struct drbd_tl_epoch *barrier);
 | 
			
		||||
extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
 | 
			
		||||
			      sector_t sector, int size, u64 block_id);
 | 
			
		||||
extern int drbd_send_drequest_csum(struct drbd_conf *mdev,
 | 
			
		||||
| 
						 | 
				
			
			@ -1235,14 +1265,13 @@ extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size)
 | 
			
		|||
 | 
			
		||||
extern int drbd_send_bitmap(struct drbd_conf *mdev);
 | 
			
		||||
extern int _drbd_send_bitmap(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode);
 | 
			
		||||
extern int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode);
 | 
			
		||||
extern void drbd_free_bc(struct drbd_backing_dev *ldev);
 | 
			
		||||
extern void drbd_mdev_cleanup(struct drbd_conf *mdev);
 | 
			
		||||
void drbd_print_uuids(struct drbd_conf *mdev, const char *text);
 | 
			
		||||
 | 
			
		||||
/* drbd_meta-data.c (still in drbd_main.c) */
 | 
			
		||||
extern void drbd_md_sync(struct drbd_conf *mdev);
 | 
			
		||||
extern int  drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
 | 
			
		||||
/* maybe define them below as inline? */
 | 
			
		||||
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
 | 
			
		||||
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
 | 
			
		||||
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
 | 
			
		||||
| 
						 | 
				
			
			@ -1261,10 +1290,12 @@ extern void drbd_md_mark_dirty_(struct drbd_conf *mdev,
 | 
			
		|||
extern void drbd_queue_bitmap_io(struct drbd_conf *mdev,
 | 
			
		||||
				 int (*io_fn)(struct drbd_conf *),
 | 
			
		||||
				 void (*done)(struct drbd_conf *, int),
 | 
			
		||||
				 char *why);
 | 
			
		||||
				 char *why, enum bm_flag flags);
 | 
			
		||||
extern int drbd_bitmap_io(struct drbd_conf *mdev,
 | 
			
		||||
		int (*io_fn)(struct drbd_conf *),
 | 
			
		||||
		char *why, enum bm_flag flags);
 | 
			
		||||
extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
 | 
			
		||||
extern void drbd_go_diskless(struct drbd_conf *mdev);
 | 
			
		||||
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1313,6 +1344,7 @@ struct bm_extent {
 | 
			
		|||
 | 
			
		||||
#define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
 | 
			
		||||
#define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
 | 
			
		||||
#define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
 | 
			
		||||
 | 
			
		||||
/* drbd_bitmap.c */
 | 
			
		||||
/*
 | 
			
		||||
| 
						 | 
				
			
			@ -1390,7 +1422,9 @@ struct bm_extent {
 | 
			
		|||
 * you should use 64bit OS for that much storage, anyways. */
 | 
			
		||||
#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
 | 
			
		||||
#else
 | 
			
		||||
#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32)
 | 
			
		||||
/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
 | 
			
		||||
#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
 | 
			
		||||
/* corresponds to (1UL << 38) bits right now. */
 | 
			
		||||
#endif
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1398,7 +1432,7 @@ struct bm_extent {
 | 
			
		|||
 * With a value of 8 all IO in one 128K block make it to the same slot of the
 | 
			
		||||
 * hash table. */
 | 
			
		||||
#define HT_SHIFT 8
 | 
			
		||||
#define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT))
 | 
			
		||||
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
 | 
			
		||||
 | 
			
		||||
#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1410,16 +1444,20 @@ extern int  drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new
 | 
			
		|||
extern void drbd_bm_cleanup(struct drbd_conf *mdev);
 | 
			
		||||
extern void drbd_bm_set_all(struct drbd_conf *mdev);
 | 
			
		||||
extern void drbd_bm_clear_all(struct drbd_conf *mdev);
 | 
			
		||||
/* set/clear/test only a few bits at a time */
 | 
			
		||||
extern int  drbd_bm_set_bits(
 | 
			
		||||
		struct drbd_conf *mdev, unsigned long s, unsigned long e);
 | 
			
		||||
extern int  drbd_bm_clear_bits(
 | 
			
		||||
		struct drbd_conf *mdev, unsigned long s, unsigned long e);
 | 
			
		||||
/* bm_set_bits variant for use while holding drbd_bm_lock */
 | 
			
		||||
extern int drbd_bm_count_bits(
 | 
			
		||||
	struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
 | 
			
		||||
/* bm_set_bits variant for use while holding drbd_bm_lock,
 | 
			
		||||
 * may process the whole bitmap in one go */
 | 
			
		||||
extern void _drbd_bm_set_bits(struct drbd_conf *mdev,
 | 
			
		||||
		const unsigned long s, const unsigned long e);
 | 
			
		||||
extern int  drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr);
 | 
			
		||||
extern int  drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
 | 
			
		||||
extern int  drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local);
 | 
			
		||||
extern int  drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
 | 
			
		||||
extern int  drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
 | 
			
		||||
extern int  drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
 | 
			
		||||
extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
 | 
			
		||||
| 
						 | 
				
			
			@ -1427,6 +1465,8 @@ extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
 | 
			
		|||
extern size_t	     drbd_bm_words(struct drbd_conf *mdev);
 | 
			
		||||
extern unsigned long drbd_bm_bits(struct drbd_conf *mdev);
 | 
			
		||||
extern sector_t      drbd_bm_capacity(struct drbd_conf *mdev);
 | 
			
		||||
 | 
			
		||||
#define DRBD_END_OF_BITMAP	(~(unsigned long)0)
 | 
			
		||||
extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
 | 
			
		||||
/* bm_find_next variants for use while you hold drbd_bm_lock() */
 | 
			
		||||
extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo);
 | 
			
		||||
| 
						 | 
				
			
			@ -1437,14 +1477,12 @@ extern int drbd_bm_rs_done(struct drbd_conf *mdev);
 | 
			
		|||
/* for receive_bitmap */
 | 
			
		||||
extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset,
 | 
			
		||||
		size_t number, unsigned long *buffer);
 | 
			
		||||
/* for _drbd_send_bitmap and drbd_bm_write_sect */
 | 
			
		||||
/* for _drbd_send_bitmap */
 | 
			
		||||
extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset,
 | 
			
		||||
		size_t number, unsigned long *buffer);
 | 
			
		||||
 | 
			
		||||
extern void drbd_bm_lock(struct drbd_conf *mdev, char *why);
 | 
			
		||||
extern void drbd_bm_lock(struct drbd_conf *mdev, char *why, enum bm_flag flags);
 | 
			
		||||
extern void drbd_bm_unlock(struct drbd_conf *mdev);
 | 
			
		||||
 | 
			
		||||
extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e);
 | 
			
		||||
/* drbd_main.c */
 | 
			
		||||
 | 
			
		||||
extern struct kmem_cache *drbd_request_cache;
 | 
			
		||||
| 
						 | 
				
			
			@ -1467,7 +1505,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev);
 | 
			
		|||
extern int proc_details;
 | 
			
		||||
 | 
			
		||||
/* drbd_req */
 | 
			
		||||
extern int drbd_make_request_26(struct request_queue *q, struct bio *bio);
 | 
			
		||||
extern int drbd_make_request(struct request_queue *q, struct bio *bio);
 | 
			
		||||
extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req);
 | 
			
		||||
extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec);
 | 
			
		||||
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
 | 
			
		||||
| 
						 | 
				
			
			@ -1482,8 +1520,9 @@ enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew =
 | 
			
		|||
extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local);
 | 
			
		||||
extern void resync_after_online_grow(struct drbd_conf *);
 | 
			
		||||
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
 | 
			
		||||
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
 | 
			
		||||
		int force);
 | 
			
		||||
extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev,
 | 
			
		||||
					enum drbd_role new_role,
 | 
			
		||||
					int force);
 | 
			
		||||
extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev);
 | 
			
		||||
extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_khelper(struct drbd_conf *mdev, char *cmd);
 | 
			
		||||
| 
						 | 
				
			
			@ -1499,6 +1538,7 @@ extern int drbd_resync_finished(struct drbd_conf *mdev);
 | 
			
		|||
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
 | 
			
		||||
		struct drbd_backing_dev *bdev, sector_t sector, int rw);
 | 
			
		||||
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
 | 
			
		||||
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
 | 
			
		||||
 | 
			
		||||
static inline void ov_oos_print(struct drbd_conf *mdev)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -1522,21 +1562,23 @@ extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		|||
extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_resync_timer(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_send_oos(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
extern int w_start_resync(struct drbd_conf *, struct drbd_work *, int);
 | 
			
		||||
 | 
			
		||||
extern void resync_timer_fn(unsigned long data);
 | 
			
		||||
extern void start_resync_timer_fn(unsigned long data);
 | 
			
		||||
 | 
			
		||||
/* drbd_receiver.c */
 | 
			
		||||
extern int drbd_rs_should_slow_down(struct drbd_conf *mdev);
 | 
			
		||||
extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
 | 
			
		||||
extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
 | 
			
		||||
		const unsigned rw, const int fault_type);
 | 
			
		||||
extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
 | 
			
		||||
| 
						 | 
				
			
			@ -1619,16 +1661,16 @@ extern int drbd_rs_del_all(struct drbd_conf *mdev);
 | 
			
		|||
extern void drbd_rs_failed_io(struct drbd_conf *mdev,
 | 
			
		||||
		sector_t sector, int size);
 | 
			
		||||
extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *);
 | 
			
		||||
extern void drbd_advance_rs_marks(struct drbd_conf *mdev, unsigned long still_to_go);
 | 
			
		||||
extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector,
 | 
			
		||||
		int size, const char *file, const unsigned int line);
 | 
			
		||||
#define drbd_set_in_sync(mdev, sector, size) \
 | 
			
		||||
	__drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__)
 | 
			
		||||
extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
 | 
			
		||||
extern int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector,
 | 
			
		||||
		int size, const char *file, const unsigned int line);
 | 
			
		||||
#define drbd_set_out_of_sync(mdev, sector, size) \
 | 
			
		||||
	__drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__)
 | 
			
		||||
extern void drbd_al_apply_to_bm(struct drbd_conf *mdev);
 | 
			
		||||
extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev);
 | 
			
		||||
extern void drbd_al_shrink(struct drbd_conf *mdev);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1747,11 +1789,11 @@ static inline void drbd_state_unlock(struct drbd_conf *mdev)
 | 
			
		|||
	wake_up(&mdev->misc_wait);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int _drbd_set_state(struct drbd_conf *mdev,
 | 
			
		||||
				   union drbd_state ns, enum chg_state_flags flags,
 | 
			
		||||
				   struct completion *done)
 | 
			
		||||
static inline enum drbd_state_rv
 | 
			
		||||
_drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
 | 
			
		||||
		enum chg_state_flags flags, struct completion *done)
 | 
			
		||||
{
 | 
			
		||||
	int rv;
 | 
			
		||||
	enum drbd_state_rv rv;
 | 
			
		||||
 | 
			
		||||
	read_lock(&global_state_lock);
 | 
			
		||||
	rv = __drbd_set_state(mdev, ns, flags, done);
 | 
			
		||||
| 
						 | 
				
			
			@ -1982,17 +2024,17 @@ static inline int drbd_send_ping_ack(struct drbd_conf *mdev)
 | 
			
		|||
 | 
			
		||||
static inline void drbd_thread_stop(struct drbd_thread *thi)
 | 
			
		||||
{
 | 
			
		||||
	_drbd_thread_stop(thi, FALSE, TRUE);
 | 
			
		||||
	_drbd_thread_stop(thi, false, true);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
 | 
			
		||||
{
 | 
			
		||||
	_drbd_thread_stop(thi, FALSE, FALSE);
 | 
			
		||||
	_drbd_thread_stop(thi, false, false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
 | 
			
		||||
{
 | 
			
		||||
	_drbd_thread_stop(thi, TRUE, FALSE);
 | 
			
		||||
	_drbd_thread_stop(thi, true, false);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* counts how many answer packets packets we expect from our peer,
 | 
			
		||||
| 
						 | 
				
			
			@ -2146,17 +2188,18 @@ extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
 | 
			
		|||
static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
 | 
			
		||||
		unsigned long *bits_left, unsigned int *per_mil_done)
 | 
			
		||||
{
 | 
			
		||||
	/*
 | 
			
		||||
	 * this is to break it at compile time when we change that
 | 
			
		||||
	 * (we may feel 4TB maximum storage per drbd is not enough)
 | 
			
		||||
	 */
 | 
			
		||||
	/* this is to break it at compile time when we change that, in case we
 | 
			
		||||
	 * want to support more than (1<<32) bits on a 32bit arch. */
 | 
			
		||||
	typecheck(unsigned long, mdev->rs_total);
 | 
			
		||||
 | 
			
		||||
	/* note: both rs_total and rs_left are in bits, i.e. in
 | 
			
		||||
	 * units of BM_BLOCK_SIZE.
 | 
			
		||||
	 * for the percentage, we don't care. */
 | 
			
		||||
 | 
			
		||||
	*bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
 | 
			
		||||
	if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
 | 
			
		||||
		*bits_left = mdev->ov_left;
 | 
			
		||||
	else
 | 
			
		||||
		*bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
 | 
			
		||||
	/* >> 10 to prevent overflow,
 | 
			
		||||
	 * +1 to prevent division by zero */
 | 
			
		||||
	if (*bits_left > mdev->rs_total) {
 | 
			
		||||
| 
						 | 
				
			
			@ -2171,10 +2214,19 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
 | 
			
		|||
				*bits_left, mdev->rs_total, mdev->rs_failed);
 | 
			
		||||
		*per_mil_done = 0;
 | 
			
		||||
	} else {
 | 
			
		||||
		/* make sure the calculation happens in long context */
 | 
			
		||||
		unsigned long tmp = 1000UL -
 | 
			
		||||
				(*bits_left >> 10)*1000UL
 | 
			
		||||
				/ ((mdev->rs_total >> 10) + 1UL);
 | 
			
		||||
		/* Make sure the division happens in long context.
 | 
			
		||||
		 * We allow up to one petabyte storage right now,
 | 
			
		||||
		 * at a granularity of 4k per bit that is 2**38 bits.
 | 
			
		||||
		 * After shift right and multiplication by 1000,
 | 
			
		||||
		 * this should still fit easily into a 32bit long,
 | 
			
		||||
		 * so we don't need a 64bit division on 32bit arch.
 | 
			
		||||
		 * Note: currently we don't support such large bitmaps on 32bit
 | 
			
		||||
		 * arch anyways, but no harm done to be prepared for it here.
 | 
			
		||||
		 */
 | 
			
		||||
		unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
 | 
			
		||||
		unsigned long left = *bits_left >> shift;
 | 
			
		||||
		unsigned long total = 1UL + (mdev->rs_total >> shift);
 | 
			
		||||
		unsigned long tmp = 1000UL - left * 1000UL/total;
 | 
			
		||||
		*per_mil_done = tmp;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -2193,8 +2245,9 @@ static inline int drbd_get_max_buffers(struct drbd_conf *mdev)
 | 
			
		|||
	return mxb;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int drbd_state_is_stable(union drbd_state s)
 | 
			
		||||
static inline int drbd_state_is_stable(struct drbd_conf *mdev)
 | 
			
		||||
{
 | 
			
		||||
	union drbd_state s = mdev->state;
 | 
			
		||||
 | 
			
		||||
	/* DO NOT add a default clause, we want the compiler to warn us
 | 
			
		||||
	 * for any newly introduced state we may have forgotten to add here */
 | 
			
		||||
| 
						 | 
				
			
			@ -2211,11 +2264,9 @@ static inline int drbd_state_is_stable(union drbd_state s)
 | 
			
		|||
	case C_VERIFY_T:
 | 
			
		||||
	case C_PAUSED_SYNC_S:
 | 
			
		||||
	case C_PAUSED_SYNC_T:
 | 
			
		||||
		/* maybe stable, look at the disk state */
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	/* no new io accepted during tansitional states
 | 
			
		||||
	 * like handshake or teardown */
 | 
			
		||||
	case C_AHEAD:
 | 
			
		||||
	case C_BEHIND:
 | 
			
		||||
		/* transitional states, IO allowed */
 | 
			
		||||
	case C_DISCONNECTING:
 | 
			
		||||
	case C_UNCONNECTED:
 | 
			
		||||
	case C_TIMEOUT:
 | 
			
		||||
| 
						 | 
				
			
			@ -2226,7 +2277,15 @@ static inline int drbd_state_is_stable(union drbd_state s)
 | 
			
		|||
	case C_WF_REPORT_PARAMS:
 | 
			
		||||
	case C_STARTING_SYNC_S:
 | 
			
		||||
	case C_STARTING_SYNC_T:
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
		/* Allow IO in BM exchange states with new protocols */
 | 
			
		||||
	case C_WF_BITMAP_S:
 | 
			
		||||
		if (mdev->agreed_pro_version < 96)
 | 
			
		||||
			return 0;
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
		/* no new io accepted in these states */
 | 
			
		||||
	case C_WF_BITMAP_T:
 | 
			
		||||
	case C_WF_SYNC_UUID:
 | 
			
		||||
	case C_MASK:
 | 
			
		||||
| 
						 | 
				
			
			@ -2261,41 +2320,47 @@ static inline int is_susp(union drbd_state s)
 | 
			
		|||
	return s.susp || s.susp_nod || s.susp_fen;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int __inc_ap_bio_cond(struct drbd_conf *mdev)
 | 
			
		||||
static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
 | 
			
		||||
{
 | 
			
		||||
	int mxb = drbd_get_max_buffers(mdev);
 | 
			
		||||
 | 
			
		||||
	if (is_susp(mdev->state))
 | 
			
		||||
		return 0;
 | 
			
		||||
		return false;
 | 
			
		||||
	if (test_bit(SUSPEND_IO, &mdev->flags))
 | 
			
		||||
		return 0;
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	/* to avoid potential deadlock or bitmap corruption,
 | 
			
		||||
	 * in various places, we only allow new application io
 | 
			
		||||
	 * to start during "stable" states. */
 | 
			
		||||
 | 
			
		||||
	/* no new io accepted when attaching or detaching the disk */
 | 
			
		||||
	if (!drbd_state_is_stable(mdev->state))
 | 
			
		||||
		return 0;
 | 
			
		||||
	if (!drbd_state_is_stable(mdev))
 | 
			
		||||
		return false;
 | 
			
		||||
 | 
			
		||||
	/* since some older kernels don't have atomic_add_unless,
 | 
			
		||||
	 * and we are within the spinlock anyways, we have this workaround.  */
 | 
			
		||||
	if (atomic_read(&mdev->ap_bio_cnt) > mxb)
 | 
			
		||||
		return 0;
 | 
			
		||||
		return false;
 | 
			
		||||
	if (test_bit(BITMAP_IO, &mdev->flags))
 | 
			
		||||
		return 0;
 | 
			
		||||
	return 1;
 | 
			
		||||
		return false;
 | 
			
		||||
	return true;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool inc_ap_bio_cond(struct drbd_conf *mdev, int count)
 | 
			
		||||
{
 | 
			
		||||
	bool rv = false;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
	rv = may_inc_ap_bio(mdev);
 | 
			
		||||
	if (rv)
 | 
			
		||||
		atomic_add(count, &mdev->ap_bio_cnt);
 | 
			
		||||
	spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
 | 
			
		||||
	return rv;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* I'd like to use wait_event_lock_irq,
 | 
			
		||||
 * but I'm not sure when it got introduced,
 | 
			
		||||
 * and not sure when it has 3 or 4 arguments */
 | 
			
		||||
static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
 | 
			
		||||
{
 | 
			
		||||
	/* compare with after_state_ch,
 | 
			
		||||
	 * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */
 | 
			
		||||
	DEFINE_WAIT(wait);
 | 
			
		||||
 | 
			
		||||
	/* we wait here
 | 
			
		||||
	 *    as long as the device is suspended
 | 
			
		||||
	 *    until the bitmap is no longer on the fly during connection
 | 
			
		||||
| 
						 | 
				
			
			@ -2304,16 +2369,7 @@ static inline void inc_ap_bio(struct drbd_conf *mdev, int count)
 | 
			
		|||
	 * to avoid races with the reconnect code,
 | 
			
		||||
	 * we need to atomic_inc within the spinlock. */
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
	while (!__inc_ap_bio_cond(mdev)) {
 | 
			
		||||
		prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
 | 
			
		||||
		spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
		schedule();
 | 
			
		||||
		finish_wait(&mdev->misc_wait, &wait);
 | 
			
		||||
		spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
	}
 | 
			
		||||
	atomic_add(count, &mdev->ap_bio_cnt);
 | 
			
		||||
	spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
	wait_event(mdev->misc_wait, inc_ap_bio_cond(mdev, count));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void dec_ap_bio(struct drbd_conf *mdev)
 | 
			
		||||
| 
						 | 
				
			
			@ -2333,9 +2389,11 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
 | 
			
		||||
static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
 | 
			
		||||
{
 | 
			
		||||
	int changed = mdev->ed_uuid != val;
 | 
			
		||||
	mdev->ed_uuid = val;
 | 
			
		||||
	return changed;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int seq_cmp(u32 a, u32 b)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							| 
						 | 
				
			
			@ -288,10 +288,11 @@ void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
 | 
			
		|||
		dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		||||
enum drbd_state_rv
 | 
			
		||||
drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		||||
{
 | 
			
		||||
	const int max_tries = 4;
 | 
			
		||||
	int r = 0;
 | 
			
		||||
	enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
 | 
			
		||||
	int try = 0;
 | 
			
		||||
	int forced = 0;
 | 
			
		||||
	union drbd_state mask, val;
 | 
			
		||||
| 
						 | 
				
			
			@ -306,17 +307,17 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
	val.i  = 0; val.role  = new_role;
 | 
			
		||||
 | 
			
		||||
	while (try++ < max_tries) {
 | 
			
		||||
		r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
 | 
			
		||||
		rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
 | 
			
		||||
 | 
			
		||||
		/* in case we first succeeded to outdate,
 | 
			
		||||
		 * but now suddenly could establish a connection */
 | 
			
		||||
		if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
 | 
			
		||||
		if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
 | 
			
		||||
			val.pdsk = 0;
 | 
			
		||||
			mask.pdsk = 0;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (r == SS_NO_UP_TO_DATE_DISK && force &&
 | 
			
		||||
		if (rv == SS_NO_UP_TO_DATE_DISK && force &&
 | 
			
		||||
		    (mdev->state.disk < D_UP_TO_DATE &&
 | 
			
		||||
		     mdev->state.disk >= D_INCONSISTENT)) {
 | 
			
		||||
			mask.disk = D_MASK;
 | 
			
		||||
| 
						 | 
				
			
			@ -325,7 +326,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (r == SS_NO_UP_TO_DATE_DISK &&
 | 
			
		||||
		if (rv == SS_NO_UP_TO_DATE_DISK &&
 | 
			
		||||
		    mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
 | 
			
		||||
			D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
 | 
			
		||||
			nps = drbd_try_outdate_peer(mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -341,9 +342,9 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (r == SS_NOTHING_TO_DO)
 | 
			
		||||
		if (rv == SS_NOTHING_TO_DO)
 | 
			
		||||
			goto fail;
 | 
			
		||||
		if (r == SS_PRIMARY_NOP && mask.pdsk == 0) {
 | 
			
		||||
		if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
 | 
			
		||||
			nps = drbd_try_outdate_peer(mdev);
 | 
			
		||||
 | 
			
		||||
			if (force && nps > D_OUTDATED) {
 | 
			
		||||
| 
						 | 
				
			
			@ -356,25 +357,24 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
		if (r == SS_TWO_PRIMARIES) {
 | 
			
		||||
		if (rv == SS_TWO_PRIMARIES) {
 | 
			
		||||
			/* Maybe the peer is detected as dead very soon...
 | 
			
		||||
			   retry at most once more in this case. */
 | 
			
		||||
			__set_current_state(TASK_INTERRUPTIBLE);
 | 
			
		||||
			schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10);
 | 
			
		||||
			schedule_timeout_interruptible((mdev->net_conf->ping_timeo+1)*HZ/10);
 | 
			
		||||
			if (try < max_tries)
 | 
			
		||||
				try = max_tries - 1;
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
		if (r < SS_SUCCESS) {
 | 
			
		||||
			r = _drbd_request_state(mdev, mask, val,
 | 
			
		||||
		if (rv < SS_SUCCESS) {
 | 
			
		||||
			rv = _drbd_request_state(mdev, mask, val,
 | 
			
		||||
						CS_VERBOSE + CS_WAIT_COMPLETE);
 | 
			
		||||
			if (r < SS_SUCCESS)
 | 
			
		||||
			if (rv < SS_SUCCESS)
 | 
			
		||||
				goto fail;
 | 
			
		||||
		}
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (r < SS_SUCCESS)
 | 
			
		||||
	if (rv < SS_SUCCESS)
 | 
			
		||||
		goto fail;
 | 
			
		||||
 | 
			
		||||
	if (forced)
 | 
			
		||||
| 
						 | 
				
			
			@ -384,7 +384,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
	wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
 | 
			
		||||
 | 
			
		||||
	if (new_role == R_SECONDARY) {
 | 
			
		||||
		set_disk_ro(mdev->vdisk, TRUE);
 | 
			
		||||
		set_disk_ro(mdev->vdisk, true);
 | 
			
		||||
		if (get_ldev(mdev)) {
 | 
			
		||||
			mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
 | 
			
		||||
			put_ldev(mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -394,7 +394,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
			mdev->net_conf->want_lose = 0;
 | 
			
		||||
			put_net_conf(mdev);
 | 
			
		||||
		}
 | 
			
		||||
		set_disk_ro(mdev->vdisk, FALSE);
 | 
			
		||||
		set_disk_ro(mdev->vdisk, false);
 | 
			
		||||
		if (get_ldev(mdev)) {
 | 
			
		||||
			if (((mdev->state.conn < C_CONNECTED ||
 | 
			
		||||
			       mdev->state.pdsk <= D_FAILED)
 | 
			
		||||
| 
						 | 
				
			
			@ -406,10 +406,8 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if ((new_role == R_SECONDARY) && get_ldev(mdev)) {
 | 
			
		||||
		drbd_al_to_on_disk_bm(mdev);
 | 
			
		||||
		put_ldev(mdev);
 | 
			
		||||
	}
 | 
			
		||||
	/* writeout of activity log covered areas of the bitmap
 | 
			
		||||
	 * to stable storage done in after state change already */
 | 
			
		||||
 | 
			
		||||
	if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
 | 
			
		||||
		/* if this was forced, we should consider sync */
 | 
			
		||||
| 
						 | 
				
			
			@ -423,7 +421,7 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 | 
			
		|||
	kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
 | 
			
		||||
 fail:
 | 
			
		||||
	mutex_unlock(&mdev->state_mutex);
 | 
			
		||||
	return r;
 | 
			
		||||
	return rv;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct drbd_conf *ensure_mdev(int minor, int create)
 | 
			
		||||
| 
						 | 
				
			
			@ -528,17 +526,19 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
 | 
			
		|||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* input size is expected to be in KB */
 | 
			
		||||
char *ppsize(char *buf, unsigned long long size)
 | 
			
		||||
{
 | 
			
		||||
	/* Needs 9 bytes at max. */
 | 
			
		||||
	/* Needs 9 bytes at max including trailing NUL:
 | 
			
		||||
	 * -1ULL ==> "16384 EB" */
 | 
			
		||||
	static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
 | 
			
		||||
	int base = 0;
 | 
			
		||||
	while (size >= 10000) {
 | 
			
		||||
	while (size >= 10000 && base < sizeof(units)-1) {
 | 
			
		||||
		/* shift + round */
 | 
			
		||||
		size = (size >> 10) + !!(size & (1<<9));
 | 
			
		||||
		base++;
 | 
			
		||||
	}
 | 
			
		||||
	sprintf(buf, "%lu %cB", (long)size, units[base]);
 | 
			
		||||
	sprintf(buf, "%u %cB", (unsigned)size, units[base]);
 | 
			
		||||
 | 
			
		||||
	return buf;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			@ -642,11 +642,19 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_
 | 
			
		|||
		|| prev_size	   != mdev->ldev->md.md_size_sect;
 | 
			
		||||
 | 
			
		||||
	if (la_size_changed || md_moved) {
 | 
			
		||||
		int err;
 | 
			
		||||
 | 
			
		||||
		drbd_al_shrink(mdev); /* All extents inactive. */
 | 
			
		||||
		dev_info(DEV, "Writing the whole bitmap, %s\n",
 | 
			
		||||
			 la_size_changed && md_moved ? "size changed and md moved" :
 | 
			
		||||
			 la_size_changed ? "size changed" : "md moved");
 | 
			
		||||
		rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */
 | 
			
		||||
		/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
 | 
			
		||||
		err = drbd_bitmap_io(mdev, &drbd_bm_write,
 | 
			
		||||
				"size changed", BM_LOCKED_MASK);
 | 
			
		||||
		if (err) {
 | 
			
		||||
			rv = dev_size_error;
 | 
			
		||||
			goto out;
 | 
			
		||||
		}
 | 
			
		||||
		drbd_md_mark_dirty(mdev);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -765,22 +773,21 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local)
 | 
			
		||||
void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local)
 | 
			
		||||
{
 | 
			
		||||
	struct request_queue * const q = mdev->rq_queue;
 | 
			
		||||
	struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
 | 
			
		||||
	int max_segments = mdev->ldev->dc.max_bio_bvecs;
 | 
			
		||||
	int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
 | 
			
		||||
 | 
			
		||||
	max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s);
 | 
			
		||||
 | 
			
		||||
	blk_queue_max_hw_sectors(q, max_seg_s >> 9);
 | 
			
		||||
	blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
 | 
			
		||||
	blk_queue_max_segment_size(q, max_seg_s);
 | 
			
		||||
	blk_queue_logical_block_size(q, 512);
 | 
			
		||||
	blk_queue_segment_boundary(q, PAGE_SIZE-1);
 | 
			
		||||
	blk_stack_limits(&q->limits, &b->limits, 0);
 | 
			
		||||
	blk_queue_max_hw_sectors(q, max_hw_sectors);
 | 
			
		||||
	/* This is the workaround for "bio would need to, but cannot, be split" */
 | 
			
		||||
	blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
 | 
			
		||||
	blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
 | 
			
		||||
	blk_queue_stack_limits(q, b);
 | 
			
		||||
 | 
			
		||||
	dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q));
 | 
			
		||||
	dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9);
 | 
			
		||||
 | 
			
		||||
	if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
 | 
			
		||||
		dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
 | 
			
		||||
| 
						 | 
				
			
			@ -850,7 +857,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
 | 
			
		|||
static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 | 
			
		||||
			     struct drbd_nl_cfg_reply *reply)
 | 
			
		||||
{
 | 
			
		||||
	enum drbd_ret_codes retcode;
 | 
			
		||||
	enum drbd_ret_code retcode;
 | 
			
		||||
	enum determine_dev_size dd;
 | 
			
		||||
	sector_t max_possible_sectors;
 | 
			
		||||
	sector_t min_md_device_sectors;
 | 
			
		||||
| 
						 | 
				
			
			@ -858,8 +865,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 | 
			
		|||
	struct block_device *bdev;
 | 
			
		||||
	struct lru_cache *resync_lru = NULL;
 | 
			
		||||
	union drbd_state ns, os;
 | 
			
		||||
	unsigned int max_seg_s;
 | 
			
		||||
	int rv;
 | 
			
		||||
	unsigned int max_bio_size;
 | 
			
		||||
	enum drbd_state_rv rv;
 | 
			
		||||
	int cp_discovered = 0;
 | 
			
		||||
	int logical_block_size;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1005,9 +1012,10 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 | 
			
		|||
	/* and for any other previously queued work */
 | 
			
		||||
	drbd_flush_workqueue(mdev);
 | 
			
		||||
 | 
			
		||||
	retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
 | 
			
		||||
	rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
 | 
			
		||||
	retcode = rv;  /* FIXME: Type mismatch. */
 | 
			
		||||
	drbd_resume_io(mdev);
 | 
			
		||||
	if (retcode < SS_SUCCESS)
 | 
			
		||||
	if (rv < SS_SUCCESS)
 | 
			
		||||
		goto fail;
 | 
			
		||||
 | 
			
		||||
	if (!get_ldev_if_state(mdev, D_ATTACHING))
 | 
			
		||||
| 
						 | 
				
			
			@ -1109,20 +1117,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 | 
			
		|||
	mdev->read_cnt = 0;
 | 
			
		||||
	mdev->writ_cnt = 0;
 | 
			
		||||
 | 
			
		||||
	max_seg_s = DRBD_MAX_SEGMENT_SIZE;
 | 
			
		||||
	max_bio_size = DRBD_MAX_BIO_SIZE;
 | 
			
		||||
	if (mdev->state.conn == C_CONNECTED) {
 | 
			
		||||
		/* We are Primary, Connected, and now attach a new local
 | 
			
		||||
		 * backing store. We must not increase the user visible maximum
 | 
			
		||||
		 * bio size on this device to something the peer may not be
 | 
			
		||||
		 * able to handle. */
 | 
			
		||||
		if (mdev->agreed_pro_version < 94)
 | 
			
		||||
			max_seg_s = queue_max_segment_size(mdev->rq_queue);
 | 
			
		||||
			max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
 | 
			
		||||
		else if (mdev->agreed_pro_version == 94)
 | 
			
		||||
			max_seg_s = DRBD_MAX_SIZE_H80_PACKET;
 | 
			
		||||
			max_bio_size = DRBD_MAX_SIZE_H80_PACKET;
 | 
			
		||||
		/* else: drbd 8.3.9 and later, stay with default */
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	drbd_setup_queue_param(mdev, max_seg_s);
 | 
			
		||||
	drbd_setup_queue_param(mdev, max_bio_size);
 | 
			
		||||
 | 
			
		||||
	/* If I am currently not R_PRIMARY,
 | 
			
		||||
	 * but meta data primary indicator is set,
 | 
			
		||||
| 
						 | 
				
			
			@ -1154,12 +1162,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 | 
			
		|||
	if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
 | 
			
		||||
		dev_info(DEV, "Assuming that all blocks are out of sync "
 | 
			
		||||
		     "(aka FullSync)\n");
 | 
			
		||||
		if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) {
 | 
			
		||||
		if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
 | 
			
		||||
			"set_n_write from attaching", BM_LOCKED_MASK)) {
 | 
			
		||||
			retcode = ERR_IO_MD_DISK;
 | 
			
		||||
			goto force_diskless_dec;
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) {
 | 
			
		||||
		if (drbd_bitmap_io(mdev, &drbd_bm_read,
 | 
			
		||||
			"read from attaching", BM_LOCKED_MASK) < 0) {
 | 
			
		||||
			retcode = ERR_IO_MD_DISK;
 | 
			
		||||
			goto force_diskless_dec;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -1167,7 +1177,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
 | 
			
		|||
 | 
			
		||||
	if (cp_discovered) {
 | 
			
		||||
		drbd_al_apply_to_bm(mdev);
 | 
			
		||||
		drbd_al_to_on_disk_bm(mdev);
 | 
			
		||||
		if (drbd_bitmap_io(mdev, &drbd_bm_write,
 | 
			
		||||
			"crashed primary apply AL", BM_LOCKED_MASK)) {
 | 
			
		||||
			retcode = ERR_IO_MD_DISK;
 | 
			
		||||
			goto force_diskless_dec;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
 | 
			
		||||
| 
						 | 
				
			
			@ -1279,7 +1293,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 | 
			
		|||
			    struct drbd_nl_cfg_reply *reply)
 | 
			
		||||
{
 | 
			
		||||
	int i, ns;
 | 
			
		||||
	enum drbd_ret_codes retcode;
 | 
			
		||||
	enum drbd_ret_code retcode;
 | 
			
		||||
	struct net_conf *new_conf = NULL;
 | 
			
		||||
	struct crypto_hash *tfm = NULL;
 | 
			
		||||
	struct crypto_hash *integrity_w_tfm = NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -1324,6 +1338,8 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 | 
			
		|||
	new_conf->wire_protocol    = DRBD_PROT_C;
 | 
			
		||||
	new_conf->ping_timeo	   = DRBD_PING_TIMEO_DEF;
 | 
			
		||||
	new_conf->rr_conflict	   = DRBD_RR_CONFLICT_DEF;
 | 
			
		||||
	new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
 | 
			
		||||
	new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
 | 
			
		||||
 | 
			
		||||
	if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) {
 | 
			
		||||
		retcode = ERR_MANDATORY_TAG;
 | 
			
		||||
| 
						 | 
				
			
			@ -1345,6 +1361,11 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 | 
			
		|||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
 | 
			
		||||
		retcode = ERR_CONG_NOT_PROTO_A;
 | 
			
		||||
		goto fail;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
 | 
			
		||||
		retcode = ERR_DISCARD;
 | 
			
		||||
		goto fail;
 | 
			
		||||
| 
						 | 
				
			
			@ -1525,6 +1546,21 @@ static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 | 
			
		|||
			      struct drbd_nl_cfg_reply *reply)
 | 
			
		||||
{
 | 
			
		||||
	int retcode;
 | 
			
		||||
	struct disconnect dc;
 | 
			
		||||
 | 
			
		||||
	memset(&dc, 0, sizeof(struct disconnect));
 | 
			
		||||
	if (!disconnect_from_tags(mdev, nlp->tag_list, &dc)) {
 | 
			
		||||
		retcode = ERR_MANDATORY_TAG;
 | 
			
		||||
		goto fail;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (dc.force) {
 | 
			
		||||
		spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
		if (mdev->state.conn >= C_WF_CONNECTION)
 | 
			
		||||
			_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), CS_HARD, NULL);
 | 
			
		||||
		spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
		goto done;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1842,6 +1878,10 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 | 
			
		|||
{
 | 
			
		||||
	int retcode;
 | 
			
		||||
 | 
			
		||||
	/* If there is still bitmap IO pending, probably because of a previous
 | 
			
		||||
	 * resync just being finished, wait for it before requesting a new resync. */
 | 
			
		||||
	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 | 
			
		||||
 | 
			
		||||
	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
 | 
			
		||||
 | 
			
		||||
	if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
 | 
			
		||||
| 
						 | 
				
			
			@ -1877,6 +1917,10 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
 | 
			
		|||
{
 | 
			
		||||
	int retcode;
 | 
			
		||||
 | 
			
		||||
	/* If there is still bitmap IO pending, probably because of a previous
 | 
			
		||||
	 * resync just being finished, wait for it before requesting a new resync. */
 | 
			
		||||
	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 | 
			
		||||
 | 
			
		||||
	retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
 | 
			
		||||
 | 
			
		||||
	if (retcode < SS_SUCCESS) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1885,9 +1929,9 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
 | 
			
		|||
			   into a full resync. */
 | 
			
		||||
			retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
 | 
			
		||||
			if (retcode >= SS_SUCCESS) {
 | 
			
		||||
				/* open coded drbd_bitmap_io() */
 | 
			
		||||
				if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
 | 
			
		||||
						   "set_n_write from invalidate_peer"))
 | 
			
		||||
					"set_n_write from invalidate_peer",
 | 
			
		||||
					BM_LOCKED_SET_ALLOWED))
 | 
			
		||||
					retcode = ERR_IO_MD_DISK;
 | 
			
		||||
			}
 | 
			
		||||
		} else
 | 
			
		||||
| 
						 | 
				
			
			@ -1914,9 +1958,17 @@ static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n
 | 
			
		|||
			       struct drbd_nl_cfg_reply *reply)
 | 
			
		||||
{
 | 
			
		||||
	int retcode = NO_ERROR;
 | 
			
		||||
	union drbd_state s;
 | 
			
		||||
 | 
			
		||||
	if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO)
 | 
			
		||||
		retcode = ERR_PAUSE_IS_CLEAR;
 | 
			
		||||
	if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
 | 
			
		||||
		s = mdev->state;
 | 
			
		||||
		if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
 | 
			
		||||
			retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
 | 
			
		||||
				  s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
 | 
			
		||||
		} else {
 | 
			
		||||
			retcode = ERR_PAUSE_IS_CLEAR;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	reply->ret_code = retcode;
 | 
			
		||||
	return 0;
 | 
			
		||||
| 
						 | 
				
			
			@ -2054,6 +2106,11 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 | 
			
		|||
		reply->ret_code = ERR_MANDATORY_TAG;
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* If there is still bitmap IO pending, e.g. previous resync or verify
 | 
			
		||||
	 * just being finished, wait for it before requesting a new resync. */
 | 
			
		||||
	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
 | 
			
		||||
 | 
			
		||||
	/* w_make_ov_request expects position to be aligned */
 | 
			
		||||
	mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
 | 
			
		||||
	reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
 | 
			
		||||
| 
						 | 
				
			
			@ -2097,7 +2154,8 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 | 
			
		|||
	drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
 | 
			
		||||
 | 
			
		||||
	if (args.clear_bm) {
 | 
			
		||||
		err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid");
 | 
			
		||||
		err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
 | 
			
		||||
			"clear_n_write from new_c_uuid", BM_LOCKED_MASK);
 | 
			
		||||
		if (err) {
 | 
			
		||||
			dev_err(DEV, "Writing bitmap failed with %d\n",err);
 | 
			
		||||
			retcode = ERR_IO_MD_DISK;
 | 
			
		||||
| 
						 | 
				
			
			@ -2105,6 +2163,7 @@ static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
 | 
			
		|||
		if (skip_initial_sync) {
 | 
			
		||||
			drbd_send_uuids_skip_initial_sync(mdev);
 | 
			
		||||
			_drbd_uuid_set(mdev, UI_BITMAP, 0);
 | 
			
		||||
			drbd_print_uuids(mdev, "cleared bitmap UUID");
 | 
			
		||||
			spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
			_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
 | 
			
		||||
					CS_VERBOSE, NULL);
 | 
			
		||||
| 
						 | 
				
			
			@ -2189,7 +2248,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
 | 
			
		|||
		goto fail;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (nlp->packet_type >= P_nl_after_last_packet) {
 | 
			
		||||
	if (nlp->packet_type >= P_nl_after_last_packet ||
 | 
			
		||||
	    nlp->packet_type == P_return_code_only) {
 | 
			
		||||
		retcode = ERR_PACKET_NR;
 | 
			
		||||
		goto fail;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -2205,7 +2265,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
 | 
			
		|||
	reply_size += cm->reply_body_size;
 | 
			
		||||
 | 
			
		||||
	/* allocation not in the IO path, cqueue thread context */
 | 
			
		||||
	cn_reply = kmalloc(reply_size, GFP_KERNEL);
 | 
			
		||||
	cn_reply = kzalloc(reply_size, GFP_KERNEL);
 | 
			
		||||
	if (!cn_reply) {
 | 
			
		||||
		retcode = ERR_NOMEM;
 | 
			
		||||
		goto fail;
 | 
			
		||||
| 
						 | 
				
			
			@ -2213,7 +2273,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms
 | 
			
		|||
	reply = (struct drbd_nl_cfg_reply *) cn_reply->data;
 | 
			
		||||
 | 
			
		||||
	reply->packet_type =
 | 
			
		||||
		cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet;
 | 
			
		||||
		cm->reply_body_size ? nlp->packet_type : P_return_code_only;
 | 
			
		||||
	reply->minor = nlp->drbd_minor;
 | 
			
		||||
	reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */
 | 
			
		||||
	/* reply->tag_list; might be modified by cm->function. */
 | 
			
		||||
| 
						 | 
				
			
			@ -2376,7 +2436,7 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
 | 
			
		|||
	/* receiver thread context, which is not in the writeout path (of this node),
 | 
			
		||||
	 * but may be in the writeout path of the _other_ node.
 | 
			
		||||
	 * GFP_NOIO to avoid potential "distributed deadlock". */
 | 
			
		||||
	cn_reply = kmalloc(
 | 
			
		||||
	cn_reply = kzalloc(
 | 
			
		||||
		sizeof(struct cn_msg)+
 | 
			
		||||
		sizeof(struct drbd_nl_cfg_reply)+
 | 
			
		||||
		sizeof(struct dump_ee_tag_len_struct)+
 | 
			
		||||
| 
						 | 
				
			
			@ -2398,10 +2458,11 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
 | 
			
		|||
	tl = tl_add_int(tl, T_ee_sector, &e->sector);
 | 
			
		||||
	tl = tl_add_int(tl, T_ee_block_id, &e->block_id);
 | 
			
		||||
 | 
			
		||||
	/* dump the first 32k */
 | 
			
		||||
	len = min_t(unsigned, e->size, 32 << 10);
 | 
			
		||||
	put_unaligned(T_ee_data, tl++);
 | 
			
		||||
	put_unaligned(e->size, tl++);
 | 
			
		||||
	put_unaligned(len, tl++);
 | 
			
		||||
 | 
			
		||||
	len = e->size;
 | 
			
		||||
	page = e->pages;
 | 
			
		||||
	page_chain_for_each(page) {
 | 
			
		||||
		void *d = kmap_atomic(page, KM_USER0);
 | 
			
		||||
| 
						 | 
				
			
			@ -2410,6 +2471,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
 | 
			
		|||
		kunmap_atomic(d, KM_USER0);
 | 
			
		||||
		tl = (unsigned short*)((char*)tl + l);
 | 
			
		||||
		len -= l;
 | 
			
		||||
		if (len == 0)
 | 
			
		||||
			break;
 | 
			
		||||
	}
 | 
			
		||||
	put_unaligned(TT_END, tl++); /* Close the tag list */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -2508,6 +2571,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
 | 
			
		|||
		(struct drbd_nl_cfg_reply *)cn_reply->data;
 | 
			
		||||
	int rr;
 | 
			
		||||
 | 
			
		||||
	memset(buffer, 0, sizeof(buffer));
 | 
			
		||||
	cn_reply->id = req->id;
 | 
			
		||||
 | 
			
		||||
	cn_reply->seq = req->seq;
 | 
			
		||||
| 
						 | 
				
			
			@ -2515,6 +2579,7 @@ void drbd_nl_send_reply(struct cn_msg *req, int ret_code)
 | 
			
		|||
	cn_reply->len = sizeof(struct drbd_nl_cfg_reply);
 | 
			
		||||
	cn_reply->flags = 0;
 | 
			
		||||
 | 
			
		||||
	reply->packet_type = P_return_code_only;
 | 
			
		||||
	reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor;
 | 
			
		||||
	reply->ret_code = ret_code;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -34,6 +34,7 @@
 | 
			
		|||
#include "drbd_int.h"
 | 
			
		||||
 | 
			
		||||
static int drbd_proc_open(struct inode *inode, struct file *file);
 | 
			
		||||
static int drbd_proc_release(struct inode *inode, struct file *file);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
struct proc_dir_entry *drbd_proc;
 | 
			
		||||
| 
						 | 
				
			
			@ -42,9 +43,22 @@ const struct file_operations drbd_proc_fops = {
 | 
			
		|||
	.open		= drbd_proc_open,
 | 
			
		||||
	.read		= seq_read,
 | 
			
		||||
	.llseek		= seq_lseek,
 | 
			
		||||
	.release	= single_release,
 | 
			
		||||
	.release	= drbd_proc_release,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
 | 
			
		||||
{
 | 
			
		||||
	/* v is in kB/sec. We don't expect TiByte/sec yet. */
 | 
			
		||||
	if (unlikely(v >= 1000000)) {
 | 
			
		||||
		/* cool: > GiByte/s */
 | 
			
		||||
		seq_printf(seq, "%ld,", v / 1000000);
 | 
			
		||||
		v /= 1000000;
 | 
			
		||||
		seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
 | 
			
		||||
	} else if (likely(v >= 1000))
 | 
			
		||||
		seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
 | 
			
		||||
	else
 | 
			
		||||
		seq_printf(seq, "%ld", v);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*lge
 | 
			
		||||
 * progress bars shamelessly adapted from driver/md/md.c
 | 
			
		||||
| 
						 | 
				
			
			@ -71,10 +85,15 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
 | 
			
		|||
		seq_printf(seq, ".");
 | 
			
		||||
	seq_printf(seq, "] ");
 | 
			
		||||
 | 
			
		||||
	seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10);
 | 
			
		||||
	/* if more than 1 GB display in MB */
 | 
			
		||||
	if (mdev->rs_total > 0x100000L)
 | 
			
		||||
		seq_printf(seq, "(%lu/%lu)M\n\t",
 | 
			
		||||
	if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
 | 
			
		||||
		seq_printf(seq, "verified:");
 | 
			
		||||
	else
 | 
			
		||||
		seq_printf(seq, "sync'ed:");
 | 
			
		||||
	seq_printf(seq, "%3u.%u%% ", res / 10, res % 10);
 | 
			
		||||
 | 
			
		||||
	/* if more than a few GB, display in MB */
 | 
			
		||||
	if (mdev->rs_total > (4UL << (30 - BM_BLOCK_SHIFT)))
 | 
			
		||||
		seq_printf(seq, "(%lu/%lu)M",
 | 
			
		||||
			    (unsigned long) Bit2KB(rs_left >> 10),
 | 
			
		||||
			    (unsigned long) Bit2KB(mdev->rs_total >> 10));
 | 
			
		||||
	else
 | 
			
		||||
| 
						 | 
				
			
			@ -94,6 +113,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
 | 
			
		|||
	/* Rolling marks. last_mark+1 may just now be modified.  last_mark+2 is
 | 
			
		||||
	 * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at
 | 
			
		||||
	 * least DRBD_SYNC_MARK_STEP time before it will be modified. */
 | 
			
		||||
	/* ------------------------ ~18s average ------------------------ */
 | 
			
		||||
	i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS;
 | 
			
		||||
	dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
 | 
			
		||||
	if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS))
 | 
			
		||||
| 
						 | 
				
			
			@ -107,14 +127,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
 | 
			
		|||
	seq_printf(seq, "finish: %lu:%02lu:%02lu",
 | 
			
		||||
		rt / 3600, (rt % 3600) / 60, rt % 60);
 | 
			
		||||
 | 
			
		||||
	/* current speed average over (SYNC_MARKS * SYNC_MARK_STEP) jiffies */
 | 
			
		||||
	dbdt = Bit2KB(db/dt);
 | 
			
		||||
	if (dbdt > 1000)
 | 
			
		||||
		seq_printf(seq, " speed: %ld,%03ld",
 | 
			
		||||
			dbdt/1000, dbdt % 1000);
 | 
			
		||||
	else
 | 
			
		||||
		seq_printf(seq, " speed: %ld", dbdt);
 | 
			
		||||
	seq_printf(seq, " speed: ");
 | 
			
		||||
	seq_printf_with_thousands_grouping(seq, dbdt);
 | 
			
		||||
	seq_printf(seq, " (");
 | 
			
		||||
	/* ------------------------- ~3s average ------------------------ */
 | 
			
		||||
	if (proc_details >= 1) {
 | 
			
		||||
		/* this is what drbd_rs_should_slow_down() uses */
 | 
			
		||||
		i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
 | 
			
		||||
		dt = (jiffies - mdev->rs_mark_time[i]) / HZ;
 | 
			
		||||
		if (!dt)
 | 
			
		||||
			dt++;
 | 
			
		||||
		db = mdev->rs_mark_left[i] - rs_left;
 | 
			
		||||
		dbdt = Bit2KB(db/dt);
 | 
			
		||||
		seq_printf_with_thousands_grouping(seq, dbdt);
 | 
			
		||||
		seq_printf(seq, " -- ");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* --------------------- long term average ---------------------- */
 | 
			
		||||
	/* mean speed since syncer started
 | 
			
		||||
	 * we do account for PausedSync periods */
 | 
			
		||||
	dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
 | 
			
		||||
| 
						 | 
				
			
			@ -122,20 +152,34 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
 | 
			
		|||
		dt = 1;
 | 
			
		||||
	db = mdev->rs_total - rs_left;
 | 
			
		||||
	dbdt = Bit2KB(db/dt);
 | 
			
		||||
	if (dbdt > 1000)
 | 
			
		||||
		seq_printf(seq, " (%ld,%03ld)",
 | 
			
		||||
			dbdt/1000, dbdt % 1000);
 | 
			
		||||
	else
 | 
			
		||||
		seq_printf(seq, " (%ld)", dbdt);
 | 
			
		||||
	seq_printf_with_thousands_grouping(seq, dbdt);
 | 
			
		||||
	seq_printf(seq, ")");
 | 
			
		||||
 | 
			
		||||
	if (mdev->state.conn == C_SYNC_TARGET) {
 | 
			
		||||
		if (mdev->c_sync_rate > 1000)
 | 
			
		||||
			seq_printf(seq, " want: %d,%03d",
 | 
			
		||||
				   mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000);
 | 
			
		||||
		else
 | 
			
		||||
			seq_printf(seq, " want: %d", mdev->c_sync_rate);
 | 
			
		||||
	if (mdev->state.conn == C_SYNC_TARGET ||
 | 
			
		||||
	    mdev->state.conn == C_VERIFY_S) {
 | 
			
		||||
		seq_printf(seq, " want: ");
 | 
			
		||||
		seq_printf_with_thousands_grouping(seq, mdev->c_sync_rate);
 | 
			
		||||
	}
 | 
			
		||||
	seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
 | 
			
		||||
 | 
			
		||||
	if (proc_details >= 1) {
 | 
			
		||||
		/* 64 bit:
 | 
			
		||||
		 * we convert to sectors in the display below. */
 | 
			
		||||
		unsigned long bm_bits = drbd_bm_bits(mdev);
 | 
			
		||||
		unsigned long bit_pos;
 | 
			
		||||
		if (mdev->state.conn == C_VERIFY_S ||
 | 
			
		||||
		    mdev->state.conn == C_VERIFY_T)
 | 
			
		||||
			bit_pos = bm_bits - mdev->ov_left;
 | 
			
		||||
		else
 | 
			
		||||
			bit_pos = mdev->bm_resync_fo;
 | 
			
		||||
		/* Total sectors may be slightly off for oddly
 | 
			
		||||
		 * sized devices. So what. */
 | 
			
		||||
		seq_printf(seq,
 | 
			
		||||
			"\t%3d%% sector pos: %llu/%llu\n",
 | 
			
		||||
			(int)(bit_pos / (bm_bits/100+1)),
 | 
			
		||||
			(unsigned long long)bit_pos * BM_SECT_PER_BIT,
 | 
			
		||||
			(unsigned long long)bm_bits * BM_SECT_PER_BIT);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void resync_dump_detail(struct seq_file *seq, struct lc_element *e)
 | 
			
		||||
| 
						 | 
				
			
			@ -232,20 +276,16 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
 | 
			
		|||
			   mdev->epochs,
 | 
			
		||||
			   write_ordering_chars[mdev->write_ordering]
 | 
			
		||||
			);
 | 
			
		||||
			seq_printf(seq, " oos:%lu\n",
 | 
			
		||||
				   Bit2KB(drbd_bm_total_weight(mdev)));
 | 
			
		||||
			seq_printf(seq, " oos:%llu\n",
 | 
			
		||||
				   Bit2KB((unsigned long long)
 | 
			
		||||
					   drbd_bm_total_weight(mdev)));
 | 
			
		||||
		}
 | 
			
		||||
		if (mdev->state.conn == C_SYNC_SOURCE ||
 | 
			
		||||
		    mdev->state.conn == C_SYNC_TARGET)
 | 
			
		||||
		    mdev->state.conn == C_SYNC_TARGET ||
 | 
			
		||||
		    mdev->state.conn == C_VERIFY_S ||
 | 
			
		||||
		    mdev->state.conn == C_VERIFY_T)
 | 
			
		||||
			drbd_syncer_progress(mdev, seq);
 | 
			
		||||
 | 
			
		||||
		if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
 | 
			
		||||
			seq_printf(seq, "\t%3d%%      %lu/%lu\n",
 | 
			
		||||
				   (int)((mdev->rs_total-mdev->ov_left) /
 | 
			
		||||
					 (mdev->rs_total/100+1)),
 | 
			
		||||
				   mdev->rs_total - mdev->ov_left,
 | 
			
		||||
				   mdev->rs_total);
 | 
			
		||||
 | 
			
		||||
		if (proc_details >= 1 && get_ldev_if_state(mdev, D_FAILED)) {
 | 
			
		||||
			lc_seq_printf_stats(seq, mdev->resync);
 | 
			
		||||
			lc_seq_printf_stats(seq, mdev->act_log);
 | 
			
		||||
| 
						 | 
				
			
			@ -265,7 +305,15 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
 | 
			
		|||
 | 
			
		||||
static int drbd_proc_open(struct inode *inode, struct file *file)
 | 
			
		||||
{
 | 
			
		||||
	return single_open(file, drbd_seq_show, PDE(inode)->data);
 | 
			
		||||
	if (try_module_get(THIS_MODULE))
 | 
			
		||||
		return single_open(file, drbd_seq_show, PDE(inode)->data);
 | 
			
		||||
	return -ENODEV;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int drbd_proc_release(struct inode *inode, struct file *file)
 | 
			
		||||
{
 | 
			
		||||
	module_put(THIS_MODULE);
 | 
			
		||||
	return single_release(inode, file);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* PROC FS stuff end */
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
										
											
												File diff suppressed because it is too large
												Load diff
											
										
									
								
							| 
						 | 
				
			
			@ -140,9 +140,14 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev,
 | 
			
		|||
	struct hlist_node *n;
 | 
			
		||||
	struct hlist_head *slot;
 | 
			
		||||
 | 
			
		||||
	/* before we can signal completion to the upper layers,
 | 
			
		||||
	 * we may need to close the current epoch */
 | 
			
		||||
	/* Before we can signal completion to the upper layers,
 | 
			
		||||
	 * we may need to close the current epoch.
 | 
			
		||||
	 * We can skip this, if this request has not even been sent, because we
 | 
			
		||||
	 * did not have a fully established connection yet/anymore, during
 | 
			
		||||
	 * bitmap exchange, or while we are C_AHEAD due to congestion policy.
 | 
			
		||||
	 */
 | 
			
		||||
	if (mdev->state.conn >= C_CONNECTED &&
 | 
			
		||||
	    (s & RQ_NET_SENT) != 0 &&
 | 
			
		||||
	    req->epoch == mdev->newest_tle->br_number)
 | 
			
		||||
		queue_barrier(mdev);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -440,7 +445,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
		req->rq_state |= RQ_LOCAL_COMPLETED;
 | 
			
		||||
		req->rq_state &= ~RQ_LOCAL_PENDING;
 | 
			
		||||
 | 
			
		||||
		__drbd_chk_io_error(mdev, FALSE);
 | 
			
		||||
		__drbd_chk_io_error(mdev, false);
 | 
			
		||||
		_req_may_be_done_not_susp(req, m);
 | 
			
		||||
		put_ldev(mdev);
 | 
			
		||||
		break;
 | 
			
		||||
| 
						 | 
				
			
			@ -461,7 +466,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
 | 
			
		||||
		D_ASSERT(!(req->rq_state & RQ_NET_MASK));
 | 
			
		||||
 | 
			
		||||
		__drbd_chk_io_error(mdev, FALSE);
 | 
			
		||||
		__drbd_chk_io_error(mdev, false);
 | 
			
		||||
		put_ldev(mdev);
 | 
			
		||||
 | 
			
		||||
		/* no point in retrying if there is no good remote data,
 | 
			
		||||
| 
						 | 
				
			
			@ -545,6 +550,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	case queue_for_send_oos:
 | 
			
		||||
		req->rq_state |= RQ_NET_QUEUED;
 | 
			
		||||
		req->w.cb =  w_send_oos;
 | 
			
		||||
		drbd_queue_work(&mdev->data.work, &req->w);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	case oos_handed_to_network:
 | 
			
		||||
		/* actually the same */
 | 
			
		||||
	case send_canceled:
 | 
			
		||||
		/* treat it the same */
 | 
			
		||||
	case send_failed:
 | 
			
		||||
| 
						 | 
				
			
			@ -558,6 +571,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
 | 
			
		||||
	case handed_over_to_network:
 | 
			
		||||
		/* assert something? */
 | 
			
		||||
		if (bio_data_dir(req->master_bio) == WRITE)
 | 
			
		||||
			atomic_add(req->size>>9, &mdev->ap_in_flight);
 | 
			
		||||
 | 
			
		||||
		if (bio_data_dir(req->master_bio) == WRITE &&
 | 
			
		||||
		    mdev->net_conf->wire_protocol == DRBD_PROT_A) {
 | 
			
		||||
			/* this is what is dangerous about protocol A:
 | 
			
		||||
| 
						 | 
				
			
			@ -591,6 +607,9 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
			dec_ap_pending(mdev);
 | 
			
		||||
		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
 | 
			
		||||
		req->rq_state |= RQ_NET_DONE;
 | 
			
		||||
		if (req->rq_state & RQ_NET_SENT && req->rq_state & RQ_WRITE)
 | 
			
		||||
			atomic_sub(req->size>>9, &mdev->ap_in_flight);
 | 
			
		||||
 | 
			
		||||
		/* if it is still queued, we may not complete it here.
 | 
			
		||||
		 * it will be canceled soon. */
 | 
			
		||||
		if (!(req->rq_state & RQ_NET_QUEUED))
 | 
			
		||||
| 
						 | 
				
			
			@ -628,14 +647,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
		req->rq_state |= RQ_NET_OK;
 | 
			
		||||
		D_ASSERT(req->rq_state & RQ_NET_PENDING);
 | 
			
		||||
		dec_ap_pending(mdev);
 | 
			
		||||
		atomic_sub(req->size>>9, &mdev->ap_in_flight);
 | 
			
		||||
		req->rq_state &= ~RQ_NET_PENDING;
 | 
			
		||||
		_req_may_be_done_not_susp(req, m);
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
	case neg_acked:
 | 
			
		||||
		/* assert something? */
 | 
			
		||||
		if (req->rq_state & RQ_NET_PENDING)
 | 
			
		||||
		if (req->rq_state & RQ_NET_PENDING) {
 | 
			
		||||
			dec_ap_pending(mdev);
 | 
			
		||||
			atomic_sub(req->size>>9, &mdev->ap_in_flight);
 | 
			
		||||
		}
 | 
			
		||||
		req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING);
 | 
			
		||||
 | 
			
		||||
		req->rq_state |= RQ_NET_DONE;
 | 
			
		||||
| 
						 | 
				
			
			@ -690,8 +712,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
			dev_err(DEV, "FIXME (barrier_acked but pending)\n");
 | 
			
		||||
			list_move(&req->tl_requests, &mdev->out_of_sequence_requests);
 | 
			
		||||
		}
 | 
			
		||||
		D_ASSERT(req->rq_state & RQ_NET_SENT);
 | 
			
		||||
		req->rq_state |= RQ_NET_DONE;
 | 
			
		||||
		if ((req->rq_state & RQ_NET_MASK) != 0) {
 | 
			
		||||
			req->rq_state |= RQ_NET_DONE;
 | 
			
		||||
			if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
 | 
			
		||||
				atomic_sub(req->size>>9, &mdev->ap_in_flight);
 | 
			
		||||
		}
 | 
			
		||||
		_req_may_be_done(req, m); /* Allowed while state.susp */
 | 
			
		||||
		break;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -738,14 +763,14 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
 | 
			
		|||
	return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		||||
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
 | 
			
		||||
{
 | 
			
		||||
	const int rw = bio_rw(bio);
 | 
			
		||||
	const int size = bio->bi_size;
 | 
			
		||||
	const sector_t sector = bio->bi_sector;
 | 
			
		||||
	struct drbd_tl_epoch *b = NULL;
 | 
			
		||||
	struct drbd_request *req;
 | 
			
		||||
	int local, remote;
 | 
			
		||||
	int local, remote, send_oos = 0;
 | 
			
		||||
	int err = -EIO;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -759,6 +784,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
		bio_endio(bio, -ENOMEM);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
	req->start_time = start_time;
 | 
			
		||||
 | 
			
		||||
	local = get_ldev(mdev);
 | 
			
		||||
	if (!local) {
 | 
			
		||||
| 
						 | 
				
			
			@ -808,9 +834,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
		drbd_al_begin_io(mdev, sector);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	remote = remote && (mdev->state.pdsk == D_UP_TO_DATE ||
 | 
			
		||||
			    (mdev->state.pdsk == D_INCONSISTENT &&
 | 
			
		||||
			     mdev->state.conn >= C_CONNECTED));
 | 
			
		||||
	remote = remote && drbd_should_do_remote(mdev->state);
 | 
			
		||||
	send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
 | 
			
		||||
	D_ASSERT(!(remote && send_oos));
 | 
			
		||||
 | 
			
		||||
	if (!(local || remote) && !is_susp(mdev->state)) {
 | 
			
		||||
		if (__ratelimit(&drbd_ratelimit_state))
 | 
			
		||||
| 
						 | 
				
			
			@ -824,7 +850,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
	 * but there is a race between testing the bit and pointer outside the
 | 
			
		||||
	 * spinlock, and grabbing the spinlock.
 | 
			
		||||
	 * if we lost that race, we retry.  */
 | 
			
		||||
	if (rw == WRITE && remote &&
 | 
			
		||||
	if (rw == WRITE && (remote || send_oos) &&
 | 
			
		||||
	    mdev->unused_spare_tle == NULL &&
 | 
			
		||||
	    test_bit(CREATE_BARRIER, &mdev->flags)) {
 | 
			
		||||
allocate_barrier:
 | 
			
		||||
| 
						 | 
				
			
			@ -842,18 +868,19 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
	if (is_susp(mdev->state)) {
 | 
			
		||||
		/* If we got suspended, use the retry mechanism of
 | 
			
		||||
		   generic_make_request() to restart processing of this
 | 
			
		||||
		   bio. In the next call to drbd_make_request_26
 | 
			
		||||
		   bio. In the next call to drbd_make_request
 | 
			
		||||
		   we sleep in inc_ap_bio() */
 | 
			
		||||
		ret = 1;
 | 
			
		||||
		spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
		goto fail_free_complete;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (remote) {
 | 
			
		||||
		remote = (mdev->state.pdsk == D_UP_TO_DATE ||
 | 
			
		||||
			    (mdev->state.pdsk == D_INCONSISTENT &&
 | 
			
		||||
			     mdev->state.conn >= C_CONNECTED));
 | 
			
		||||
		if (!remote)
 | 
			
		||||
	if (remote || send_oos) {
 | 
			
		||||
		remote = drbd_should_do_remote(mdev->state);
 | 
			
		||||
		send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
 | 
			
		||||
		D_ASSERT(!(remote && send_oos));
 | 
			
		||||
 | 
			
		||||
		if (!(remote || send_oos))
 | 
			
		||||
			dev_warn(DEV, "lost connection while grabbing the req_lock!\n");
 | 
			
		||||
		if (!(local || remote)) {
 | 
			
		||||
			dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
 | 
			
		||||
| 
						 | 
				
			
			@ -866,7 +893,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
		mdev->unused_spare_tle = b;
 | 
			
		||||
		b = NULL;
 | 
			
		||||
	}
 | 
			
		||||
	if (rw == WRITE && remote &&
 | 
			
		||||
	if (rw == WRITE && (remote || send_oos) &&
 | 
			
		||||
	    mdev->unused_spare_tle == NULL &&
 | 
			
		||||
	    test_bit(CREATE_BARRIER, &mdev->flags)) {
 | 
			
		||||
		/* someone closed the current epoch
 | 
			
		||||
| 
						 | 
				
			
			@ -889,7 +916,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
	 * barrier packet.  To get the write ordering right, we only have to
 | 
			
		||||
	 * make sure that, if this is a write request and it triggered a
 | 
			
		||||
	 * barrier packet, this request is queued within the same spinlock. */
 | 
			
		||||
	if (remote && mdev->unused_spare_tle &&
 | 
			
		||||
	if ((remote || send_oos) && mdev->unused_spare_tle &&
 | 
			
		||||
	    test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
 | 
			
		||||
		_tl_add_barrier(mdev, mdev->unused_spare_tle);
 | 
			
		||||
		mdev->unused_spare_tle = NULL;
 | 
			
		||||
| 
						 | 
				
			
			@ -937,6 +964,34 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
				? queue_for_net_write
 | 
			
		||||
				: queue_for_net_read);
 | 
			
		||||
	}
 | 
			
		||||
	if (send_oos && drbd_set_out_of_sync(mdev, sector, size))
 | 
			
		||||
		_req_mod(req, queue_for_send_oos);
 | 
			
		||||
 | 
			
		||||
	if (remote &&
 | 
			
		||||
	    mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
 | 
			
		||||
		int congested = 0;
 | 
			
		||||
 | 
			
		||||
		if (mdev->net_conf->cong_fill &&
 | 
			
		||||
		    atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
 | 
			
		||||
			dev_info(DEV, "Congestion-fill threshold reached\n");
 | 
			
		||||
			congested = 1;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
 | 
			
		||||
			dev_info(DEV, "Congestion-extents threshold reached\n");
 | 
			
		||||
			congested = 1;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (congested) {
 | 
			
		||||
			queue_barrier(mdev); /* last barrier, after mirrored writes */
 | 
			
		||||
 | 
			
		||||
			if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
 | 
			
		||||
				_drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
 | 
			
		||||
			else  /*mdev->net_conf->on_congestion == OC_DISCONNECT */
 | 
			
		||||
				_drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
	kfree(b); /* if someone else has beaten us to it... */
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -949,9 +1004,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
 | 
			
		|||
		 * stable storage, and this is a WRITE, we may not even submit
 | 
			
		||||
		 * this bio. */
 | 
			
		||||
		if (get_ldev(mdev)) {
 | 
			
		||||
			if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
 | 
			
		||||
					     : rw == READ  ? DRBD_FAULT_DT_RD
 | 
			
		||||
					     :               DRBD_FAULT_DT_RA))
 | 
			
		||||
			if (drbd_insert_fault(mdev,   rw == WRITE ? DRBD_FAULT_DT_WR
 | 
			
		||||
						    : rw == READ  ? DRBD_FAULT_DT_RD
 | 
			
		||||
						    :               DRBD_FAULT_DT_RA))
 | 
			
		||||
				bio_endio(req->private_bio, -EIO);
 | 
			
		||||
			else
 | 
			
		||||
				generic_make_request(req->private_bio);
 | 
			
		||||
| 
						 | 
				
			
			@ -1018,16 +1073,19 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write)
 | 
			
		|||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 | 
			
		||||
int drbd_make_request(struct request_queue *q, struct bio *bio)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int s_enr, e_enr;
 | 
			
		||||
	struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata;
 | 
			
		||||
	unsigned long start_time;
 | 
			
		||||
 | 
			
		||||
	if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) {
 | 
			
		||||
		bio_endio(bio, -EPERM);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	start_time = jiffies;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * what we "blindly" assume:
 | 
			
		||||
	 */
 | 
			
		||||
| 
						 | 
				
			
			@ -1042,12 +1100,12 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 | 
			
		|||
 | 
			
		||||
	if (likely(s_enr == e_enr)) {
 | 
			
		||||
		inc_ap_bio(mdev, 1);
 | 
			
		||||
		return drbd_make_request_common(mdev, bio);
 | 
			
		||||
		return drbd_make_request_common(mdev, bio, start_time);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* can this bio be split generically?
 | 
			
		||||
	 * Maybe add our own split-arbitrary-bios function. */
 | 
			
		||||
	if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_SEGMENT_SIZE) {
 | 
			
		||||
	if (bio->bi_vcnt != 1 || bio->bi_idx != 0 || bio->bi_size > DRBD_MAX_BIO_SIZE) {
 | 
			
		||||
		/* rather error out here than BUG in bio_split */
 | 
			
		||||
		dev_err(DEV, "bio would need to, but cannot, be split: "
 | 
			
		||||
		    "(vcnt=%u,idx=%u,size=%u,sector=%llu)\n",
 | 
			
		||||
| 
						 | 
				
			
			@ -1069,11 +1127,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 | 
			
		|||
		const int sps = 1 << HT_SHIFT; /* sectors per slot */
 | 
			
		||||
		const int mask = sps - 1;
 | 
			
		||||
		const sector_t first_sectors = sps - (sect & mask);
 | 
			
		||||
		bp = bio_split(bio,
 | 
			
		||||
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
 | 
			
		||||
				bio_split_pool,
 | 
			
		||||
#endif
 | 
			
		||||
				first_sectors);
 | 
			
		||||
		bp = bio_split(bio, first_sectors);
 | 
			
		||||
 | 
			
		||||
		/* we need to get a "reference count" (ap_bio_cnt)
 | 
			
		||||
		 * to avoid races with the disconnect/reconnect/suspend code.
 | 
			
		||||
| 
						 | 
				
			
			@ -1084,10 +1138,10 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 | 
			
		|||
 | 
			
		||||
		D_ASSERT(e_enr == s_enr + 1);
 | 
			
		||||
 | 
			
		||||
		while (drbd_make_request_common(mdev, &bp->bio1))
 | 
			
		||||
		while (drbd_make_request_common(mdev, &bp->bio1, start_time))
 | 
			
		||||
			inc_ap_bio(mdev, 1);
 | 
			
		||||
 | 
			
		||||
		while (drbd_make_request_common(mdev, &bp->bio2))
 | 
			
		||||
		while (drbd_make_request_common(mdev, &bp->bio2, start_time))
 | 
			
		||||
			inc_ap_bio(mdev, 1);
 | 
			
		||||
 | 
			
		||||
		dec_ap_bio(mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -1098,7 +1152,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 | 
			
		|||
}
 | 
			
		||||
 | 
			
		||||
/* This is called by bio_add_page().  With this function we reduce
 | 
			
		||||
 * the number of BIOs that span over multiple DRBD_MAX_SEGMENT_SIZEs
 | 
			
		||||
 * the number of BIOs that span over multiple DRBD_MAX_BIO_SIZEs
 | 
			
		||||
 * units (was AL_EXTENTs).
 | 
			
		||||
 *
 | 
			
		||||
 * we do the calculation within the lower 32bit of the byte offsets,
 | 
			
		||||
| 
						 | 
				
			
			@ -1108,7 +1162,7 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
 | 
			
		|||
 * As long as the BIO is empty we have to allow at least one bvec,
 | 
			
		||||
 * regardless of size and offset.  so the resulting bio may still
 | 
			
		||||
 * cross extent boundaries.  those are dealt with (bio_split) in
 | 
			
		||||
 * drbd_make_request_26.
 | 
			
		||||
 * drbd_make_request.
 | 
			
		||||
 */
 | 
			
		||||
int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
 | 
			
		||||
{
 | 
			
		||||
| 
						 | 
				
			
			@ -1118,8 +1172,8 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
 | 
			
		|||
	unsigned int bio_size = bvm->bi_size;
 | 
			
		||||
	int limit, backing_limit;
 | 
			
		||||
 | 
			
		||||
	limit = DRBD_MAX_SEGMENT_SIZE
 | 
			
		||||
	      - ((bio_offset & (DRBD_MAX_SEGMENT_SIZE-1)) + bio_size);
 | 
			
		||||
	limit = DRBD_MAX_BIO_SIZE
 | 
			
		||||
	      - ((bio_offset & (DRBD_MAX_BIO_SIZE-1)) + bio_size);
 | 
			
		||||
	if (limit < 0)
 | 
			
		||||
		limit = 0;
 | 
			
		||||
	if (bio_size == 0) {
 | 
			
		||||
| 
						 | 
				
			
			@ -1136,3 +1190,42 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
 | 
			
		|||
	}
 | 
			
		||||
	return limit;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void request_timer_fn(unsigned long data)
 | 
			
		||||
{
 | 
			
		||||
	struct drbd_conf *mdev = (struct drbd_conf *) data;
 | 
			
		||||
	struct drbd_request *req; /* oldest request */
 | 
			
		||||
	struct list_head *le;
 | 
			
		||||
	unsigned long et = 0; /* effective timeout = ko_count * timeout */
 | 
			
		||||
 | 
			
		||||
	if (get_net_conf(mdev)) {
 | 
			
		||||
		et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
 | 
			
		||||
		put_net_conf(mdev);
 | 
			
		||||
	}
 | 
			
		||||
	if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
 | 
			
		||||
		return; /* Recurring timer stopped */
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
	le = &mdev->oldest_tle->requests;
 | 
			
		||||
	if (list_empty(le)) {
 | 
			
		||||
		spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
		mod_timer(&mdev->request_timer, jiffies + et);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	le = le->prev;
 | 
			
		||||
	req = list_entry(le, struct drbd_request, tl_requests);
 | 
			
		||||
	if (time_is_before_eq_jiffies(req->start_time + et)) {
 | 
			
		||||
		if (req->rq_state & RQ_NET_PENDING) {
 | 
			
		||||
			dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
 | 
			
		||||
			_drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
 | 
			
		||||
		} else {
 | 
			
		||||
			dev_warn(DEV, "Local backing block device frozen?\n");
 | 
			
		||||
			mod_timer(&mdev->request_timer, jiffies + et);
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		mod_timer(&mdev->request_timer, req->start_time + et);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -82,14 +82,16 @@ enum drbd_req_event {
 | 
			
		|||
	to_be_submitted,
 | 
			
		||||
 | 
			
		||||
	/* XXX yes, now I am inconsistent...
 | 
			
		||||
	 * these two are not "events" but "actions"
 | 
			
		||||
	 * these are not "events" but "actions"
 | 
			
		||||
	 * oh, well... */
 | 
			
		||||
	queue_for_net_write,
 | 
			
		||||
	queue_for_net_read,
 | 
			
		||||
	queue_for_send_oos,
 | 
			
		||||
 | 
			
		||||
	send_canceled,
 | 
			
		||||
	send_failed,
 | 
			
		||||
	handed_over_to_network,
 | 
			
		||||
	oos_handed_to_network,
 | 
			
		||||
	connection_lost_while_pending,
 | 
			
		||||
	read_retry_remote_canceled,
 | 
			
		||||
	recv_acked_by_peer,
 | 
			
		||||
| 
						 | 
				
			
			@ -289,7 +291,6 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
 | 
			
		|||
		req->epoch       = 0;
 | 
			
		||||
		req->sector      = bio_src->bi_sector;
 | 
			
		||||
		req->size        = bio_src->bi_size;
 | 
			
		||||
		req->start_time  = jiffies;
 | 
			
		||||
		INIT_HLIST_NODE(&req->colision);
 | 
			
		||||
		INIT_LIST_HEAD(&req->tl_requests);
 | 
			
		||||
		INIT_LIST_HEAD(&req->w.list);
 | 
			
		||||
| 
						 | 
				
			
			@ -321,6 +322,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 | 
			
		|||
		struct bio_and_error *m);
 | 
			
		||||
extern void complete_master_bio(struct drbd_conf *mdev,
 | 
			
		||||
		struct bio_and_error *m);
 | 
			
		||||
extern void request_timer_fn(unsigned long data);
 | 
			
		||||
 | 
			
		||||
/* use this if you don't want to deal with calling complete_master_bio()
 | 
			
		||||
 * outside the spinlock, e.g. when walking some list on cleanup. */
 | 
			
		||||
| 
						 | 
				
			
			@ -338,23 +340,43 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what)
 | 
			
		|||
	return rv;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/* completion of master bio is outside of spinlock.
 | 
			
		||||
 * If you need it irqsave, do it your self!
 | 
			
		||||
 * Which means: don't use from bio endio callback. */
 | 
			
		||||
/* completion of master bio is outside of our spinlock.
 | 
			
		||||
 * We still may or may not be inside some irqs disabled section
 | 
			
		||||
 * of the lower level driver completion callback, so we need to
 | 
			
		||||
 * spin_lock_irqsave here. */
 | 
			
		||||
static inline int req_mod(struct drbd_request *req,
 | 
			
		||||
		enum drbd_req_event what)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
	struct drbd_conf *mdev = req->mdev;
 | 
			
		||||
	struct bio_and_error m;
 | 
			
		||||
	int rv;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
	spin_lock_irqsave(&mdev->req_lock, flags);
 | 
			
		||||
	rv = __req_mod(req, what, &m);
 | 
			
		||||
	spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
	spin_unlock_irqrestore(&mdev->req_lock, flags);
 | 
			
		||||
 | 
			
		||||
	if (m.bio)
 | 
			
		||||
		complete_master_bio(mdev, &m);
 | 
			
		||||
 | 
			
		||||
	return rv;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool drbd_should_do_remote(union drbd_state s)
 | 
			
		||||
{
 | 
			
		||||
	return s.pdsk == D_UP_TO_DATE ||
 | 
			
		||||
		(s.pdsk >= D_INCONSISTENT &&
 | 
			
		||||
		 s.conn >= C_WF_BITMAP_T &&
 | 
			
		||||
		 s.conn < C_AHEAD);
 | 
			
		||||
	/* Before proto 96 that was >= CONNECTED instead of >= C_WF_BITMAP_T.
 | 
			
		||||
	   That is equivalent since before 96 IO was frozen in the C_WF_BITMAP*
 | 
			
		||||
	   states. */
 | 
			
		||||
}
 | 
			
		||||
static inline bool drbd_should_send_oos(union drbd_state s)
 | 
			
		||||
{
 | 
			
		||||
	return s.conn == C_AHEAD || s.conn == C_WF_BITMAP_S;
 | 
			
		||||
	/* pdsk = D_INCONSISTENT as a consequence. Protocol 96 check not necessary
 | 
			
		||||
	   since we enter state C_AHEAD only if proto >= 96 */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -48,6 +48,8 @@ static const char *drbd_conn_s_names[] = {
 | 
			
		|||
	[C_PAUSED_SYNC_T]    = "PausedSyncT",
 | 
			
		||||
	[C_VERIFY_S]         = "VerifyS",
 | 
			
		||||
	[C_VERIFY_T]         = "VerifyT",
 | 
			
		||||
	[C_AHEAD]            = "Ahead",
 | 
			
		||||
	[C_BEHIND]           = "Behind",
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static const char *drbd_role_s_names[] = {
 | 
			
		||||
| 
						 | 
				
			
			@ -92,7 +94,7 @@ static const char *drbd_state_sw_errors[] = {
 | 
			
		|||
const char *drbd_conn_str(enum drbd_conns s)
 | 
			
		||||
{
 | 
			
		||||
	/* enums are unsigned... */
 | 
			
		||||
	return s > C_PAUSED_SYNC_T ? "TOO_LARGE" : drbd_conn_s_names[s];
 | 
			
		||||
	return s > C_BEHIND ? "TOO_LARGE" : drbd_conn_s_names[s];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const char *drbd_role_str(enum drbd_role s)
 | 
			
		||||
| 
						 | 
				
			
			@ -105,7 +107,7 @@ const char *drbd_disk_str(enum drbd_disk_state s)
 | 
			
		|||
	return s > D_UP_TO_DATE    ? "TOO_LARGE" : drbd_disk_s_names[s];
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
const char *drbd_set_st_err_str(enum drbd_state_ret_codes err)
 | 
			
		||||
const char *drbd_set_st_err_str(enum drbd_state_rv err)
 | 
			
		||||
{
 | 
			
		||||
	return err <= SS_AFTER_LAST_ERROR ? "TOO_SMALL" :
 | 
			
		||||
	       err > SS_TWO_PRIMARIES ? "TOO_LARGE"
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -39,18 +39,17 @@
 | 
			
		|||
#include "drbd_req.h"
 | 
			
		||||
 | 
			
		||||
static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
 | 
			
		||||
static int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		||||
				 struct drbd_work *w, int cancel);
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* defined here:
 | 
			
		||||
   drbd_md_io_complete
 | 
			
		||||
   drbd_endio_sec
 | 
			
		||||
   drbd_endio_pri
 | 
			
		||||
 | 
			
		||||
 * more endio handlers:
 | 
			
		||||
   atodb_endio in drbd_actlog.c
 | 
			
		||||
   drbd_bm_async_io_complete in drbd_bitmap.c
 | 
			
		||||
 | 
			
		||||
/* endio handlers:
 | 
			
		||||
 *   drbd_md_io_complete (defined here)
 | 
			
		||||
 *   drbd_endio_pri (defined here)
 | 
			
		||||
 *   drbd_endio_sec (defined here)
 | 
			
		||||
 *   bm_async_io_complete (defined in drbd_bitmap.c)
 | 
			
		||||
 *
 | 
			
		||||
 * For all these callbacks, note the following:
 | 
			
		||||
 * The callbacks will be called in irq context by the IDE drivers,
 | 
			
		||||
 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
 | 
			
		||||
| 
						 | 
				
			
			@ -94,7 +93,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
 | 
			
		|||
	if (list_empty(&mdev->read_ee))
 | 
			
		||||
		wake_up(&mdev->ee_wait);
 | 
			
		||||
	if (test_bit(__EE_WAS_ERROR, &e->flags))
 | 
			
		||||
		__drbd_chk_io_error(mdev, FALSE);
 | 
			
		||||
		__drbd_chk_io_error(mdev, false);
 | 
			
		||||
	spin_unlock_irqrestore(&mdev->req_lock, flags);
 | 
			
		||||
 | 
			
		||||
	drbd_queue_work(&mdev->data.work, &e->w);
 | 
			
		||||
| 
						 | 
				
			
			@ -137,7 +136,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
 | 
			
		|||
		: list_empty(&mdev->active_ee);
 | 
			
		||||
 | 
			
		||||
	if (test_bit(__EE_WAS_ERROR, &e->flags))
 | 
			
		||||
		__drbd_chk_io_error(mdev, FALSE);
 | 
			
		||||
		__drbd_chk_io_error(mdev, false);
 | 
			
		||||
	spin_unlock_irqrestore(&mdev->req_lock, flags);
 | 
			
		||||
 | 
			
		||||
	if (is_syncer_req)
 | 
			
		||||
| 
						 | 
				
			
			@ -163,14 +162,15 @@ void drbd_endio_sec(struct bio *bio, int error)
 | 
			
		|||
	int uptodate = bio_flagged(bio, BIO_UPTODATE);
 | 
			
		||||
	int is_write = bio_data_dir(bio) == WRITE;
 | 
			
		||||
 | 
			
		||||
	if (error)
 | 
			
		||||
	if (error && __ratelimit(&drbd_ratelimit_state))
 | 
			
		||||
		dev_warn(DEV, "%s: error=%d s=%llus\n",
 | 
			
		||||
				is_write ? "write" : "read", error,
 | 
			
		||||
				(unsigned long long)e->sector);
 | 
			
		||||
	if (!error && !uptodate) {
 | 
			
		||||
		dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
 | 
			
		||||
				is_write ? "write" : "read",
 | 
			
		||||
				(unsigned long long)e->sector);
 | 
			
		||||
		if (__ratelimit(&drbd_ratelimit_state))
 | 
			
		||||
			dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
 | 
			
		||||
					is_write ? "write" : "read",
 | 
			
		||||
					(unsigned long long)e->sector);
 | 
			
		||||
		/* strange behavior of some lower level drivers...
 | 
			
		||||
		 * fail the request by clearing the uptodate flag,
 | 
			
		||||
		 * but do not return any error?! */
 | 
			
		||||
| 
						 | 
				
			
			@ -250,13 +250,6 @@ int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		|||
	return w_send_read_req(mdev, w, 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		||||
{
 | 
			
		||||
	ERR_IF(cancel) return 1;
 | 
			
		||||
	dev_err(DEV, "resync inactive, but callback triggered??\n");
 | 
			
		||||
	return 1; /* Simply ignore this! */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
 | 
			
		||||
{
 | 
			
		||||
	struct hash_desc desc;
 | 
			
		||||
| 
						 | 
				
			
			@ -355,7 +348,7 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
 | 
			
		|||
	if (!get_ldev(mdev))
 | 
			
		||||
		return -EIO;
 | 
			
		||||
 | 
			
		||||
	if (drbd_rs_should_slow_down(mdev))
 | 
			
		||||
	if (drbd_rs_should_slow_down(mdev, sector))
 | 
			
		||||
		goto defer;
 | 
			
		||||
 | 
			
		||||
	/* GFP_TRY, because if there is no memory available right now, this may
 | 
			
		||||
| 
						 | 
				
			
			@ -373,9 +366,10 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
 | 
			
		|||
	if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	/* drbd_submit_ee currently fails for one reason only:
 | 
			
		||||
	 * not being able to allocate enough bios.
 | 
			
		||||
	 * Is dropping the connection going to help? */
 | 
			
		||||
	/* If it failed because of ENOMEM, retry should help.  If it failed
 | 
			
		||||
	 * because bio_add_page failed (probably broken lower level driver),
 | 
			
		||||
	 * retry may or may not help.
 | 
			
		||||
	 * If it does not, you may need to force disconnect. */
 | 
			
		||||
	spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
	list_del(&e->w.list);
 | 
			
		||||
	spin_unlock_irq(&mdev->req_lock);
 | 
			
		||||
| 
						 | 
				
			
			@ -386,26 +380,25 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
 | 
			
		|||
	return -EAGAIN;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int w_resync_timer(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		||||
{
 | 
			
		||||
	switch (mdev->state.conn) {
 | 
			
		||||
	case C_VERIFY_S:
 | 
			
		||||
		w_make_ov_request(mdev, w, cancel);
 | 
			
		||||
		break;
 | 
			
		||||
	case C_SYNC_TARGET:
 | 
			
		||||
		w_make_resync_request(mdev, w, cancel);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void resync_timer_fn(unsigned long data)
 | 
			
		||||
{
 | 
			
		||||
	struct drbd_conf *mdev = (struct drbd_conf *) data;
 | 
			
		||||
	int queue;
 | 
			
		||||
 | 
			
		||||
	queue = 1;
 | 
			
		||||
	switch (mdev->state.conn) {
 | 
			
		||||
	case C_VERIFY_S:
 | 
			
		||||
		mdev->resync_work.cb = w_make_ov_request;
 | 
			
		||||
		break;
 | 
			
		||||
	case C_SYNC_TARGET:
 | 
			
		||||
		mdev->resync_work.cb = w_make_resync_request;
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		queue = 0;
 | 
			
		||||
		mdev->resync_work.cb = w_resync_inactive;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* harmless race: list_empty outside data.work.q_lock */
 | 
			
		||||
	if (list_empty(&mdev->resync_work.list) && queue)
 | 
			
		||||
	if (list_empty(&mdev->resync_work.list))
 | 
			
		||||
		drbd_queue_work(&mdev->data.work, &mdev->resync_work);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -438,7 +431,7 @@ static void fifo_add_val(struct fifo_buffer *fb, int value)
 | 
			
		|||
		fb->values[i] += value;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int drbd_rs_controller(struct drbd_conf *mdev)
 | 
			
		||||
static int drbd_rs_controller(struct drbd_conf *mdev)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int sect_in;  /* Number of sectors that came in since the last turn */
 | 
			
		||||
	unsigned int want;     /* The number of sectors we want in the proxy */
 | 
			
		||||
| 
						 | 
				
			
			@ -492,29 +485,36 @@ int drbd_rs_controller(struct drbd_conf *mdev)
 | 
			
		|||
	return req_sect;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		||||
		struct drbd_work *w, int cancel)
 | 
			
		||||
static int drbd_rs_number_requests(struct drbd_conf *mdev)
 | 
			
		||||
{
 | 
			
		||||
	int number;
 | 
			
		||||
	if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
 | 
			
		||||
		number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
 | 
			
		||||
		mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
 | 
			
		||||
	} else {
 | 
			
		||||
		mdev->c_sync_rate = mdev->sync_conf.rate;
 | 
			
		||||
		number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* ignore the amount of pending requests, the resync controller should
 | 
			
		||||
	 * throttle down to incoming reply rate soon enough anyways. */
 | 
			
		||||
	return number;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		||||
				 struct drbd_work *w, int cancel)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long bit;
 | 
			
		||||
	sector_t sector;
 | 
			
		||||
	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
 | 
			
		||||
	int max_segment_size;
 | 
			
		||||
	int number, rollback_i, size, pe, mx;
 | 
			
		||||
	int max_bio_size;
 | 
			
		||||
	int number, rollback_i, size;
 | 
			
		||||
	int align, queued, sndbuf;
 | 
			
		||||
	int i = 0;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(cancel))
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(mdev->state.conn < C_CONNECTED)) {
 | 
			
		||||
		dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (mdev->state.conn != C_SYNC_TARGET)
 | 
			
		||||
		dev_err(DEV, "%s in w_make_resync_request\n",
 | 
			
		||||
			drbd_conn_str(mdev->state.conn));
 | 
			
		||||
 | 
			
		||||
	if (mdev->rs_total == 0) {
 | 
			
		||||
		/* empty resync? */
 | 
			
		||||
		drbd_resync_finished(mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -527,49 +527,19 @@ int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		|||
		   to continue resync with a broken disk makes no sense at
 | 
			
		||||
		   all */
 | 
			
		||||
		dev_err(DEV, "Disk broke down during resync!\n");
 | 
			
		||||
		mdev->resync_work.cb = w_resync_inactive;
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* starting with drbd 8.3.8, we can handle multi-bio EEs,
 | 
			
		||||
	 * if it should be necessary */
 | 
			
		||||
	max_segment_size =
 | 
			
		||||
		mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) :
 | 
			
		||||
		mdev->agreed_pro_version < 95 ?	DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE;
 | 
			
		||||
	max_bio_size =
 | 
			
		||||
		mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
 | 
			
		||||
		mdev->agreed_pro_version < 95 ?	DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
 | 
			
		||||
 | 
			
		||||
	if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
 | 
			
		||||
		number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
 | 
			
		||||
		mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
 | 
			
		||||
	} else {
 | 
			
		||||
		mdev->c_sync_rate = mdev->sync_conf.rate;
 | 
			
		||||
		number = SLEEP_TIME * mdev->c_sync_rate  / ((BM_BLOCK_SIZE / 1024) * HZ);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Throttle resync on lower level disk activity, which may also be
 | 
			
		||||
	 * caused by application IO on Primary/SyncTarget.
 | 
			
		||||
	 * Keep this after the call to drbd_rs_controller, as that assumes
 | 
			
		||||
	 * to be called as precisely as possible every SLEEP_TIME,
 | 
			
		||||
	 * and would be confused otherwise. */
 | 
			
		||||
	if (drbd_rs_should_slow_down(mdev))
 | 
			
		||||
	number = drbd_rs_number_requests(mdev);
 | 
			
		||||
	if (number == 0)
 | 
			
		||||
		goto requeue;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&mdev->data.mutex);
 | 
			
		||||
	if (mdev->data.socket)
 | 
			
		||||
		mx = mdev->data.socket->sk->sk_rcvbuf / sizeof(struct p_block_req);
 | 
			
		||||
	else
 | 
			
		||||
		mx = 1;
 | 
			
		||||
	mutex_unlock(&mdev->data.mutex);
 | 
			
		||||
 | 
			
		||||
	/* For resync rates >160MB/sec, allow more pending RS requests */
 | 
			
		||||
	if (number > mx)
 | 
			
		||||
		mx = number;
 | 
			
		||||
 | 
			
		||||
	/* Limit the number of pending RS requests to no more than the peer's receive buffer */
 | 
			
		||||
	pe = atomic_read(&mdev->rs_pending_cnt);
 | 
			
		||||
	if ((pe + number) > mx) {
 | 
			
		||||
		number = mx - pe;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for (i = 0; i < number; i++) {
 | 
			
		||||
		/* Stop generating RS requests, when half of the send buffer is filled */
 | 
			
		||||
		mutex_lock(&mdev->data.mutex);
 | 
			
		||||
| 
						 | 
				
			
			@ -588,16 +558,16 @@ int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		|||
		size = BM_BLOCK_SIZE;
 | 
			
		||||
		bit  = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
 | 
			
		||||
 | 
			
		||||
		if (bit == -1UL) {
 | 
			
		||||
		if (bit == DRBD_END_OF_BITMAP) {
 | 
			
		||||
			mdev->bm_resync_fo = drbd_bm_bits(mdev);
 | 
			
		||||
			mdev->resync_work.cb = w_resync_inactive;
 | 
			
		||||
			put_ldev(mdev);
 | 
			
		||||
			return 1;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		sector = BM_BIT_TO_SECT(bit);
 | 
			
		||||
 | 
			
		||||
		if (drbd_try_rs_begin_io(mdev, sector)) {
 | 
			
		||||
		if (drbd_rs_should_slow_down(mdev, sector) ||
 | 
			
		||||
		    drbd_try_rs_begin_io(mdev, sector)) {
 | 
			
		||||
			mdev->bm_resync_fo = bit;
 | 
			
		||||
			goto requeue;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -608,7 +578,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		|||
			goto next_sector;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
#if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
 | 
			
		||||
#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
 | 
			
		||||
		/* try to find some adjacent bits.
 | 
			
		||||
		 * we stop if we have already the maximum req size.
 | 
			
		||||
		 *
 | 
			
		||||
| 
						 | 
				
			
			@ -618,7 +588,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		|||
		align = 1;
 | 
			
		||||
		rollback_i = i;
 | 
			
		||||
		for (;;) {
 | 
			
		||||
			if (size + BM_BLOCK_SIZE > max_segment_size)
 | 
			
		||||
			if (size + BM_BLOCK_SIZE > max_bio_size)
 | 
			
		||||
				break;
 | 
			
		||||
 | 
			
		||||
			/* Be always aligned */
 | 
			
		||||
| 
						 | 
				
			
			@ -685,7 +655,6 @@ int w_make_resync_request(struct drbd_conf *mdev,
 | 
			
		|||
		 * resync data block, and the last bit is cleared.
 | 
			
		||||
		 * until then resync "work" is "inactive" ...
 | 
			
		||||
		 */
 | 
			
		||||
		mdev->resync_work.cb = w_resync_inactive;
 | 
			
		||||
		put_ldev(mdev);
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -706,27 +675,18 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
 | 
			
		|||
	if (unlikely(cancel))
 | 
			
		||||
		return 1;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(mdev->state.conn < C_CONNECTED)) {
 | 
			
		||||
		dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	number = SLEEP_TIME*mdev->sync_conf.rate / ((BM_BLOCK_SIZE/1024)*HZ);
 | 
			
		||||
	if (atomic_read(&mdev->rs_pending_cnt) > number)
 | 
			
		||||
		goto requeue;
 | 
			
		||||
 | 
			
		||||
	number -= atomic_read(&mdev->rs_pending_cnt);
 | 
			
		||||
	number = drbd_rs_number_requests(mdev);
 | 
			
		||||
 | 
			
		||||
	sector = mdev->ov_position;
 | 
			
		||||
	for (i = 0; i < number; i++) {
 | 
			
		||||
		if (sector >= capacity) {
 | 
			
		||||
			mdev->resync_work.cb = w_resync_inactive;
 | 
			
		||||
			return 1;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		size = BM_BLOCK_SIZE;
 | 
			
		||||
 | 
			
		||||
		if (drbd_try_rs_begin_io(mdev, sector)) {
 | 
			
		||||
		if (drbd_rs_should_slow_down(mdev, sector) ||
 | 
			
		||||
		    drbd_try_rs_begin_io(mdev, sector)) {
 | 
			
		||||
			mdev->ov_position = sector;
 | 
			
		||||
			goto requeue;
 | 
			
		||||
		}
 | 
			
		||||
| 
						 | 
				
			
			@ -744,11 +704,33 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
 | 
			
		|||
	mdev->ov_position = sector;
 | 
			
		||||
 | 
			
		||||
 requeue:
 | 
			
		||||
	mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
 | 
			
		||||
	mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
void start_resync_timer_fn(unsigned long data)
 | 
			
		||||
{
 | 
			
		||||
	struct drbd_conf *mdev = (struct drbd_conf *) data;
 | 
			
		||||
 | 
			
		||||
	drbd_queue_work(&mdev->data.work, &mdev->start_resync_work);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		||||
{
 | 
			
		||||
	if (atomic_read(&mdev->unacked_cnt) || atomic_read(&mdev->rs_pending_cnt)) {
 | 
			
		||||
		dev_warn(DEV, "w_start_resync later...\n");
 | 
			
		||||
		mdev->start_resync_timer.expires = jiffies + HZ/10;
 | 
			
		||||
		add_timer(&mdev->start_resync_timer);
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	drbd_start_resync(mdev, C_SYNC_SOURCE);
 | 
			
		||||
	clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		||||
{
 | 
			
		||||
	kfree(w);
 | 
			
		||||
| 
						 | 
				
			
			@ -782,6 +764,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
 | 
			
		|||
	union drbd_state os, ns;
 | 
			
		||||
	struct drbd_work *w;
 | 
			
		||||
	char *khelper_cmd = NULL;
 | 
			
		||||
	int verify_done = 0;
 | 
			
		||||
 | 
			
		||||
	/* Remove all elements from the resync LRU. Since future actions
 | 
			
		||||
	 * might set bits in the (main) bitmap, then the entries in the
 | 
			
		||||
| 
						 | 
				
			
			@ -792,8 +775,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
 | 
			
		|||
		 * queue (or even the read operations for those packets
 | 
			
		||||
		 * is not finished by now).   Retry in 100ms. */
 | 
			
		||||
 | 
			
		||||
		__set_current_state(TASK_INTERRUPTIBLE);
 | 
			
		||||
		schedule_timeout(HZ / 10);
 | 
			
		||||
		schedule_timeout_interruptible(HZ / 10);
 | 
			
		||||
		w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
 | 
			
		||||
		if (w) {
 | 
			
		||||
			w->cb = w_resync_finished;
 | 
			
		||||
| 
						 | 
				
			
			@ -818,6 +800,8 @@ int drbd_resync_finished(struct drbd_conf *mdev)
 | 
			
		|||
	spin_lock_irq(&mdev->req_lock);
 | 
			
		||||
	os = mdev->state;
 | 
			
		||||
 | 
			
		||||
	verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
 | 
			
		||||
 | 
			
		||||
	/* This protects us against multiple calls (that can happen in the presence
 | 
			
		||||
	   of application IO), and against connectivity loss just before we arrive here. */
 | 
			
		||||
	if (os.conn <= C_CONNECTED)
 | 
			
		||||
| 
						 | 
				
			
			@ -827,8 +811,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
 | 
			
		|||
	ns.conn = C_CONNECTED;
 | 
			
		||||
 | 
			
		||||
	dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
 | 
			
		||||
	     (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) ?
 | 
			
		||||
	     "Online verify " : "Resync",
 | 
			
		||||
	     verify_done ? "Online verify " : "Resync",
 | 
			
		||||
	     dt + mdev->rs_paused, mdev->rs_paused, dbdt);
 | 
			
		||||
 | 
			
		||||
	n_oos = drbd_bm_total_weight(mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -886,14 +869,18 @@ int drbd_resync_finished(struct drbd_conf *mdev)
 | 
			
		|||
			}
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		drbd_uuid_set_bm(mdev, 0UL);
 | 
			
		||||
 | 
			
		||||
		if (mdev->p_uuid) {
 | 
			
		||||
			/* Now the two UUID sets are equal, update what we
 | 
			
		||||
			 * know of the peer. */
 | 
			
		||||
			int i;
 | 
			
		||||
			for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
 | 
			
		||||
				mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
 | 
			
		||||
		if (!(os.conn == C_VERIFY_S || os.conn == C_VERIFY_T)) {
 | 
			
		||||
			/* for verify runs, we don't update uuids here,
 | 
			
		||||
			 * so there would be nothing to report. */
 | 
			
		||||
			drbd_uuid_set_bm(mdev, 0UL);
 | 
			
		||||
			drbd_print_uuids(mdev, "updated UUIDs");
 | 
			
		||||
			if (mdev->p_uuid) {
 | 
			
		||||
				/* Now the two UUID sets are equal, update what we
 | 
			
		||||
				 * know of the peer. */
 | 
			
		||||
				int i;
 | 
			
		||||
				for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
 | 
			
		||||
					mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -905,15 +892,11 @@ int drbd_resync_finished(struct drbd_conf *mdev)
 | 
			
		|||
	mdev->rs_total  = 0;
 | 
			
		||||
	mdev->rs_failed = 0;
 | 
			
		||||
	mdev->rs_paused = 0;
 | 
			
		||||
	mdev->ov_start_sector = 0;
 | 
			
		||||
	if (verify_done)
 | 
			
		||||
		mdev->ov_start_sector = 0;
 | 
			
		||||
 | 
			
		||||
	drbd_md_sync(mdev);
 | 
			
		||||
 | 
			
		||||
	if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
 | 
			
		||||
		dev_info(DEV, "Writing the whole bitmap\n");
 | 
			
		||||
		drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (khelper_cmd)
 | 
			
		||||
		drbd_khelper(mdev, khelper_cmd);
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -994,7 +977,9 @@ int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		|||
		put_ldev(mdev);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 | 
			
		||||
	if (mdev->state.conn == C_AHEAD) {
 | 
			
		||||
		ok = drbd_send_ack(mdev, P_RS_CANCEL, e);
 | 
			
		||||
	} else if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 | 
			
		||||
		if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
 | 
			
		||||
			inc_rs_pending(mdev);
 | 
			
		||||
			ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
 | 
			
		||||
| 
						 | 
				
			
			@ -1096,25 +1081,27 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		|||
	if (unlikely(cancel))
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	if (unlikely((e->flags & EE_WAS_ERROR) != 0))
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	digest_size = crypto_hash_digestsize(mdev->verify_tfm);
 | 
			
		||||
	/* FIXME if this allocation fails, online verify will not terminate! */
 | 
			
		||||
	digest = kmalloc(digest_size, GFP_NOIO);
 | 
			
		||||
	if (digest) {
 | 
			
		||||
		drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
 | 
			
		||||
		inc_rs_pending(mdev);
 | 
			
		||||
		ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
 | 
			
		||||
					     digest, digest_size, P_OV_REPLY);
 | 
			
		||||
		if (!ok)
 | 
			
		||||
			dec_rs_pending(mdev);
 | 
			
		||||
		kfree(digest);
 | 
			
		||||
	if (!digest) {
 | 
			
		||||
		ok = 0;	/* terminate the connection in case the allocation failed */
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (likely(!(e->flags & EE_WAS_ERROR)))
 | 
			
		||||
		drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
 | 
			
		||||
	else
 | 
			
		||||
		memset(digest, 0, digest_size);
 | 
			
		||||
 | 
			
		||||
	inc_rs_pending(mdev);
 | 
			
		||||
	ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
 | 
			
		||||
				     digest, digest_size, P_OV_REPLY);
 | 
			
		||||
	if (!ok)
 | 
			
		||||
		dec_rs_pending(mdev);
 | 
			
		||||
	kfree(digest);
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	drbd_free_ee(mdev, e);
 | 
			
		||||
 | 
			
		||||
	dec_unacked(mdev);
 | 
			
		||||
 | 
			
		||||
	return ok;
 | 
			
		||||
| 
						 | 
				
			
			@ -1129,7 +1116,6 @@ void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
 | 
			
		|||
		mdev->ov_last_oos_size = size>>9;
 | 
			
		||||
	}
 | 
			
		||||
	drbd_set_out_of_sync(mdev, sector, size);
 | 
			
		||||
	set_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		||||
| 
						 | 
				
			
			@ -1165,10 +1151,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		|||
			eq = !memcmp(digest, di->digest, digest_size);
 | 
			
		||||
			kfree(digest);
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
 | 
			
		||||
		if (__ratelimit(&drbd_ratelimit_state))
 | 
			
		||||
			dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	dec_unacked(mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -1182,7 +1164,13 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		|||
 | 
			
		||||
	drbd_free_ee(mdev, e);
 | 
			
		||||
 | 
			
		||||
	if (--mdev->ov_left == 0) {
 | 
			
		||||
	--mdev->ov_left;
 | 
			
		||||
 | 
			
		||||
	/* let's advance progress step marks only for every other megabyte */
 | 
			
		||||
	if ((mdev->ov_left & 0x200) == 0x200)
 | 
			
		||||
		drbd_advance_rs_marks(mdev, mdev->ov_left);
 | 
			
		||||
 | 
			
		||||
	if (mdev->ov_left == 0) {
 | 
			
		||||
		ov_oos_print(mdev);
 | 
			
		||||
		drbd_resync_finished(mdev);
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			@ -1235,6 +1223,22 @@ int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		|||
	return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
 | 
			
		||||
{
 | 
			
		||||
	struct drbd_request *req = container_of(w, struct drbd_request, w);
 | 
			
		||||
	int ok;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(cancel)) {
 | 
			
		||||
		req_mod(req, send_canceled);
 | 
			
		||||
		return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ok = drbd_send_oos(mdev, req);
 | 
			
		||||
	req_mod(req, oos_handed_to_network);
 | 
			
		||||
 | 
			
		||||
	return ok;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
 | 
			
		||||
 * @mdev:	DRBD device.
 | 
			
		||||
| 
						 | 
				
			
			@ -1430,6 +1434,17 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na)
 | 
			
		|||
	return retcode;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void drbd_rs_controller_reset(struct drbd_conf *mdev)
 | 
			
		||||
{
 | 
			
		||||
	atomic_set(&mdev->rs_sect_in, 0);
 | 
			
		||||
	atomic_set(&mdev->rs_sect_ev, 0);
 | 
			
		||||
	mdev->rs_in_flight = 0;
 | 
			
		||||
	mdev->rs_planed = 0;
 | 
			
		||||
	spin_lock(&mdev->peer_seq_lock);
 | 
			
		||||
	fifo_set(&mdev->rs_plan_s, 0);
 | 
			
		||||
	spin_unlock(&mdev->peer_seq_lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * drbd_start_resync() - Start the resync process
 | 
			
		||||
 * @mdev:	DRBD device.
 | 
			
		||||
| 
						 | 
				
			
			@ -1443,13 +1458,18 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
 | 
			
		|||
	union drbd_state ns;
 | 
			
		||||
	int r;
 | 
			
		||||
 | 
			
		||||
	if (mdev->state.conn >= C_SYNC_SOURCE) {
 | 
			
		||||
	if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
 | 
			
		||||
		dev_err(DEV, "Resync already running!\n");
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* In case a previous resync run was aborted by an IO error/detach on the peer. */
 | 
			
		||||
	drbd_rs_cancel_all(mdev);
 | 
			
		||||
	if (mdev->state.conn < C_AHEAD) {
 | 
			
		||||
		/* In case a previous resync run was aborted by an IO error/detach on the peer. */
 | 
			
		||||
		drbd_rs_cancel_all(mdev);
 | 
			
		||||
		/* This should be done when we abort the resync. We definitely do not
 | 
			
		||||
		   want to have this for connections going back and forth between
 | 
			
		||||
		   Ahead/Behind and SyncSource/SyncTarget */
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (side == C_SYNC_TARGET) {
 | 
			
		||||
		/* Since application IO was locked out during C_WF_BITMAP_T and
 | 
			
		||||
| 
						 | 
				
			
			@ -1463,6 +1483,20 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
 | 
			
		|||
			drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
 | 
			
		||||
			return;
 | 
			
		||||
		}
 | 
			
		||||
	} else /* C_SYNC_SOURCE */ {
 | 
			
		||||
		r = drbd_khelper(mdev, "before-resync-source");
 | 
			
		||||
		r = (r >> 8) & 0xff;
 | 
			
		||||
		if (r > 0) {
 | 
			
		||||
			if (r == 3) {
 | 
			
		||||
				dev_info(DEV, "before-resync-source handler returned %d, "
 | 
			
		||||
					 "ignoring. Old userland tools?", r);
 | 
			
		||||
			} else {
 | 
			
		||||
				dev_info(DEV, "before-resync-source handler returned %d, "
 | 
			
		||||
					 "dropping connection.\n", r);
 | 
			
		||||
				drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
 | 
			
		||||
				return;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	drbd_state_lock(mdev);
 | 
			
		||||
| 
						 | 
				
			
			@ -1472,18 +1506,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
 | 
			
		|||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (side == C_SYNC_TARGET) {
 | 
			
		||||
		mdev->bm_resync_fo = 0;
 | 
			
		||||
	} else /* side == C_SYNC_SOURCE */ {
 | 
			
		||||
		u64 uuid;
 | 
			
		||||
 | 
			
		||||
		get_random_bytes(&uuid, sizeof(u64));
 | 
			
		||||
		drbd_uuid_set(mdev, UI_BITMAP, uuid);
 | 
			
		||||
		drbd_send_sync_uuid(mdev, uuid);
 | 
			
		||||
 | 
			
		||||
		D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	write_lock_irq(&global_state_lock);
 | 
			
		||||
	ns = mdev->state;
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -1521,13 +1543,24 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
 | 
			
		|||
		_drbd_pause_after(mdev);
 | 
			
		||||
	}
 | 
			
		||||
	write_unlock_irq(&global_state_lock);
 | 
			
		||||
	put_ldev(mdev);
 | 
			
		||||
 | 
			
		||||
	if (r == SS_SUCCESS) {
 | 
			
		||||
		dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
 | 
			
		||||
		     drbd_conn_str(ns.conn),
 | 
			
		||||
		     (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
 | 
			
		||||
		     (unsigned long) mdev->rs_total);
 | 
			
		||||
		if (side == C_SYNC_TARGET)
 | 
			
		||||
			mdev->bm_resync_fo = 0;
 | 
			
		||||
 | 
			
		||||
		/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
 | 
			
		||||
		 * with w_send_oos, or the sync target will get confused as to
 | 
			
		||||
		 * how much bits to resync.  We cannot do that always, because for an
 | 
			
		||||
		 * empty resync and protocol < 95, we need to do it here, as we call
 | 
			
		||||
		 * drbd_resync_finished from here in that case.
 | 
			
		||||
		 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
 | 
			
		||||
		 * and from after_state_ch otherwise. */
 | 
			
		||||
		if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
 | 
			
		||||
			drbd_gen_and_send_sync_uuid(mdev);
 | 
			
		||||
 | 
			
		||||
		if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
 | 
			
		||||
			/* This still has a race (about when exactly the peers
 | 
			
		||||
| 
						 | 
				
			
			@ -1547,13 +1580,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
 | 
			
		|||
			drbd_resync_finished(mdev);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		atomic_set(&mdev->rs_sect_in, 0);
 | 
			
		||||
		atomic_set(&mdev->rs_sect_ev, 0);
 | 
			
		||||
		mdev->rs_in_flight = 0;
 | 
			
		||||
		mdev->rs_planed = 0;
 | 
			
		||||
		spin_lock(&mdev->peer_seq_lock);
 | 
			
		||||
		fifo_set(&mdev->rs_plan_s, 0);
 | 
			
		||||
		spin_unlock(&mdev->peer_seq_lock);
 | 
			
		||||
		drbd_rs_controller_reset(mdev);
 | 
			
		||||
		/* ns.conn may already be != mdev->state.conn,
 | 
			
		||||
		 * we may have been paused in between, or become paused until
 | 
			
		||||
		 * the timer triggers.
 | 
			
		||||
| 
						 | 
				
			
			@ -1563,6 +1590,7 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
 | 
			
		|||
 | 
			
		||||
		drbd_md_sync(mdev);
 | 
			
		||||
	}
 | 
			
		||||
	put_ldev(mdev);
 | 
			
		||||
	drbd_state_unlock(mdev);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -39,7 +39,7 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
 | 
			
		|||
		return;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (FAULT_ACTIVE(mdev, fault_type))
 | 
			
		||||
	if (drbd_insert_fault(mdev, fault_type))
 | 
			
		||||
		bio_endio(bio, -EIO);
 | 
			
		||||
	else
 | 
			
		||||
		generic_make_request(bio);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -53,10 +53,10 @@
 | 
			
		|||
 | 
			
		||||
 | 
			
		||||
extern const char *drbd_buildtag(void);
 | 
			
		||||
#define REL_VERSION "8.3.9"
 | 
			
		||||
#define REL_VERSION "8.3.10"
 | 
			
		||||
#define API_VERSION 88
 | 
			
		||||
#define PRO_VERSION_MIN 86
 | 
			
		||||
#define PRO_VERSION_MAX 95
 | 
			
		||||
#define PRO_VERSION_MAX 96
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
enum drbd_io_error_p {
 | 
			
		||||
| 
						 | 
				
			
			@ -96,8 +96,14 @@ enum drbd_on_no_data {
 | 
			
		|||
	OND_SUSPEND_IO
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum drbd_on_congestion {
 | 
			
		||||
	OC_BLOCK,
 | 
			
		||||
	OC_PULL_AHEAD,
 | 
			
		||||
	OC_DISCONNECT,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/* KEEP the order, do not delete or insert. Only append. */
 | 
			
		||||
enum drbd_ret_codes {
 | 
			
		||||
enum drbd_ret_code {
 | 
			
		||||
	ERR_CODE_BASE		= 100,
 | 
			
		||||
	NO_ERROR		= 101,
 | 
			
		||||
	ERR_LOCAL_ADDR		= 102,
 | 
			
		||||
| 
						 | 
				
			
			@ -146,6 +152,9 @@ enum drbd_ret_codes {
 | 
			
		|||
	ERR_PERM		= 152,
 | 
			
		||||
	ERR_NEED_APV_93		= 153,
 | 
			
		||||
	ERR_STONITH_AND_PROT_A  = 154,
 | 
			
		||||
	ERR_CONG_NOT_PROTO_A	= 155,
 | 
			
		||||
	ERR_PIC_AFTER_DEP	= 156,
 | 
			
		||||
	ERR_PIC_PEER_DEP	= 157,
 | 
			
		||||
 | 
			
		||||
	/* insert new ones above this line */
 | 
			
		||||
	AFTER_LAST_ERR_CODE
 | 
			
		||||
| 
						 | 
				
			
			@ -199,6 +208,10 @@ enum drbd_conns {
 | 
			
		|||
	C_VERIFY_T,
 | 
			
		||||
	C_PAUSED_SYNC_S,
 | 
			
		||||
	C_PAUSED_SYNC_T,
 | 
			
		||||
 | 
			
		||||
	C_AHEAD,
 | 
			
		||||
	C_BEHIND,
 | 
			
		||||
 | 
			
		||||
	C_MASK = 31
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -259,7 +272,7 @@ union drbd_state {
 | 
			
		|||
	unsigned int i;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
enum drbd_state_ret_codes {
 | 
			
		||||
enum drbd_state_rv {
 | 
			
		||||
	SS_CW_NO_NEED = 4,
 | 
			
		||||
	SS_CW_SUCCESS = 3,
 | 
			
		||||
	SS_NOTHING_TO_DO = 2,
 | 
			
		||||
| 
						 | 
				
			
			@ -290,7 +303,7 @@ enum drbd_state_ret_codes {
 | 
			
		|||
extern const char *drbd_conn_str(enum drbd_conns);
 | 
			
		||||
extern const char *drbd_role_str(enum drbd_role);
 | 
			
		||||
extern const char *drbd_disk_str(enum drbd_disk_state);
 | 
			
		||||
extern const char *drbd_set_st_err_str(enum drbd_state_ret_codes);
 | 
			
		||||
extern const char *drbd_set_st_err_str(enum drbd_state_rv);
 | 
			
		||||
 | 
			
		||||
#define SHARED_SECRET_MAX 64
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -16,7 +16,8 @@
 | 
			
		|||
#define DEBUG_RANGE_CHECK 0
 | 
			
		||||
 | 
			
		||||
#define DRBD_MINOR_COUNT_MIN 1
 | 
			
		||||
#define DRBD_MINOR_COUNT_MAX 255
 | 
			
		||||
#define DRBD_MINOR_COUNT_MAX 256
 | 
			
		||||
#define DRBD_MINOR_COUNT_DEF 32
 | 
			
		||||
 | 
			
		||||
#define DRBD_DIALOG_REFRESH_MIN 0
 | 
			
		||||
#define DRBD_DIALOG_REFRESH_MAX 600
 | 
			
		||||
| 
						 | 
				
			
			@ -129,6 +130,7 @@
 | 
			
		|||
#define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT
 | 
			
		||||
#define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT
 | 
			
		||||
#define DRBD_ON_NO_DATA_DEF OND_IO_ERROR
 | 
			
		||||
#define DRBD_ON_CONGESTION_DEF OC_BLOCK
 | 
			
		||||
 | 
			
		||||
#define DRBD_MAX_BIO_BVECS_MIN 0
 | 
			
		||||
#define DRBD_MAX_BIO_BVECS_MAX 128
 | 
			
		||||
| 
						 | 
				
			
			@ -154,5 +156,13 @@
 | 
			
		|||
#define DRBD_C_MIN_RATE_MAX     (4 << 20)
 | 
			
		||||
#define DRBD_C_MIN_RATE_DEF     4096
 | 
			
		||||
 | 
			
		||||
#define DRBD_CONG_FILL_MIN	0
 | 
			
		||||
#define DRBD_CONG_FILL_MAX	(10<<21) /* 10GByte in sectors */
 | 
			
		||||
#define DRBD_CONG_FILL_DEF	0
 | 
			
		||||
 | 
			
		||||
#define DRBD_CONG_EXTENTS_MIN	DRBD_AL_EXTENTS_MIN
 | 
			
		||||
#define DRBD_CONG_EXTENTS_MAX	DRBD_AL_EXTENTS_MAX
 | 
			
		||||
#define DRBD_CONG_EXTENTS_DEF	DRBD_AL_EXTENTS_DEF
 | 
			
		||||
 | 
			
		||||
#undef RANGE
 | 
			
		||||
#endif
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -56,6 +56,9 @@ NL_PACKET(net_conf, 5,
 | 
			
		|||
	NL_INTEGER(	39,	T_MAY_IGNORE,	rr_conflict)
 | 
			
		||||
	NL_INTEGER(	40,	T_MAY_IGNORE,	ping_timeo)
 | 
			
		||||
	NL_INTEGER(	67,	T_MAY_IGNORE,	rcvbuf_size)
 | 
			
		||||
	NL_INTEGER(	81,	T_MAY_IGNORE,	on_congestion)
 | 
			
		||||
	NL_INTEGER(	82,	T_MAY_IGNORE,	cong_fill)
 | 
			
		||||
	NL_INTEGER(	83,	T_MAY_IGNORE,	cong_extents)
 | 
			
		||||
	  /* 59 addr_family was available in GIT, never released */
 | 
			
		||||
	NL_BIT(		60,	T_MANDATORY,	mind_af)
 | 
			
		||||
	NL_BIT(		27,	T_MAY_IGNORE,	want_lose)
 | 
			
		||||
| 
						 | 
				
			
			@ -66,7 +69,9 @@ NL_PACKET(net_conf, 5,
 | 
			
		|||
	NL_BIT(		70,	T_MANDATORY,	dry_run)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
NL_PACKET(disconnect, 6, )
 | 
			
		||||
NL_PACKET(disconnect, 6,
 | 
			
		||||
	NL_BIT(		84,	T_MAY_IGNORE,	force)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
NL_PACKET(resize, 7,
 | 
			
		||||
	NL_INT64(		29,	T_MAY_IGNORE,	resize_size)
 | 
			
		||||
| 
						 | 
				
			
			@ -143,9 +148,13 @@ NL_PACKET(new_c_uuid, 26,
 | 
			
		|||
       NL_BIT(		63,	T_MANDATORY,	clear_bm)
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
#ifdef NL_RESPONSE
 | 
			
		||||
NL_RESPONSE(return_code_only, 27)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#undef NL_PACKET
 | 
			
		||||
#undef NL_INTEGER
 | 
			
		||||
#undef NL_INT64
 | 
			
		||||
#undef NL_BIT
 | 
			
		||||
#undef NL_STRING
 | 
			
		||||
 | 
			
		||||
#undef NL_RESPONSE
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -7,6 +7,7 @@
 | 
			
		|||
/* declare packet_type enums */
 | 
			
		||||
enum packet_types {
 | 
			
		||||
#define NL_PACKET(name, number, fields) P_ ## name = number,
 | 
			
		||||
#define NL_RESPONSE(name, number) P_ ## name = number,
 | 
			
		||||
#define NL_INTEGER(pn, pr, member)
 | 
			
		||||
#define NL_INT64(pn, pr, member)
 | 
			
		||||
#define NL_BIT(pn, pr, member)
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue