⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 dlmrecovery.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
 * we really cannot afford to fail an alloc in recovery * do we spin?  returning an error only delays the problem really */int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,			    void **ret_data){	struct dlm_ctxt *dlm = data;	struct dlm_migratable_lockres *mres =		(struct dlm_migratable_lockres *)msg->buf;	int ret = 0;	u8 real_master;	char *buf = NULL;	struct dlm_work_item *item = NULL;	struct dlm_lock_resource *res = NULL;	if (!dlm_grab(dlm))		return -EINVAL;	BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));	real_master = mres->master;	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {		/* cannot migrate a lockres with no master */		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));	}	mlog(0, "%s message received from node %u\n",		  (mres->flags & DLM_MRES_RECOVERY) ?		  "recovery" : "migration", mres->master);	if (mres->flags & DLM_MRES_ALL_DONE)		mlog(0, "all done flag.  all lockres data received!\n");	ret = -ENOMEM;	buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);	item = kzalloc(sizeof(*item), GFP_NOFS);	if (!buf || !item)		goto leave;	/* lookup the lock to see if we have a secondary queue for this	 * already...  just add the locks in and this will have its owner	 * and RECOVERY flag changed when it completes. */	res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);	if (res) {	 	/* this will get a ref on res */		/* mark it as recovering/migrating and hash it */		spin_lock(&res->spinlock);		if (mres->flags & DLM_MRES_RECOVERY) {			res->state |= DLM_LOCK_RES_RECOVERING;		} else {			if (res->state & DLM_LOCK_RES_MIGRATING) {				/* this is at least the second				 * lockres message */				mlog(0, "lock %.*s is already migrating\n",					  mres->lockname_len,					  mres->lockname);			} else if (res->state & DLM_LOCK_RES_RECOVERING) {				/* caller should BUG */				mlog(ML_ERROR, "node is attempting to migrate "				     "lock %.*s, but marked as recovering!\n",				     mres->lockname_len, mres->lockname);				ret = -EFAULT;				spin_unlock(&res->spinlock);				goto leave;			}			res->state |= DLM_LOCK_RES_MIGRATING;		}		spin_unlock(&res->spinlock);	} else {		/* need to allocate, just like if it was		 * mastered here normally  */		res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);		if (!res)			goto leave;		/* to match the ref that we would have gotten if		 * dlm_lookup_lockres had succeeded */		dlm_lockres_get(res);		/* mark it as recovering/migrating and hash it */		if (mres->flags & DLM_MRES_RECOVERY)			res->state |= DLM_LOCK_RES_RECOVERING;		else			res->state |= DLM_LOCK_RES_MIGRATING;		spin_lock(&dlm->spinlock);		__dlm_insert_lockres(dlm, res);		spin_unlock(&dlm->spinlock);		/* now that the new lockres is inserted,		 * make it usable by other processes */		spin_lock(&res->spinlock);		res->state &= ~DLM_LOCK_RES_IN_PROGRESS;		spin_unlock(&res->spinlock);		wake_up(&res->wq);		/* add an extra ref for just-allocated lockres 		 * otherwise the lockres will be purged immediately */		dlm_lockres_get(res);	}	/* at this point we have allocated everything we need,	 * and we have a hashed lockres with an extra ref and	 * the proper res->state flags. */	ret = 0;	spin_lock(&res->spinlock);	/* drop this either when master requery finds a different master	 * or when a lock is added by the recovery worker */	dlm_lockres_grab_inflight_ref(dlm, res);	if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {		/* migration cannot have an unknown master */		BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));		mlog(0, "recovery has passed me a lockres with an "			  "unknown owner.. will need to requery: "			  "%.*s\n", mres->lockname_len, mres->lockname);	} else {		/* take a reference now to pin the lockres, drop it		 * when locks are added in the worker */		dlm_change_lockres_owner(dlm, res, dlm->node_num);	}	spin_unlock(&res->spinlock);	/* queue up work for dlm_mig_lockres_worker */	dlm_grab(dlm);  /* get an extra ref for the work item */	memcpy(buf, msg->buf, be16_to_cpu(msg->data_len));  /* copy the whole message */	dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);	item->u.ml.lockres = res; /* already have a ref */	item->u.ml.real_master = real_master;	spin_lock(&dlm->work_lock);	list_add_tail(&item->list, &dlm->work_list);	spin_unlock(&dlm->work_lock);	queue_work(dlm->dlm_worker, &dlm->dispatched_work);leave:	dlm_put(dlm);	if (ret < 0) {		if (buf)			kfree(buf);		if (item)			kfree(item);	}	mlog_exit(ret);	return ret;}static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data){	struct dlm_ctxt *dlm = data;	struct dlm_migratable_lockres *mres;	int ret = 0;	struct dlm_lock_resource *res;	u8 real_master;	dlm = item->dlm;	mres = (struct dlm_migratable_lockres *)data;	res = item->u.ml.lockres;	real_master = item->u.ml.real_master;	if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {		/* this case is super-rare. only occurs if		 * node death happens during migration. */again:		ret = dlm_lockres_master_requery(dlm, res, &real_master);		if (ret < 0) {			mlog(0, "dlm_lockres_master_requery ret=%d\n",				  ret);			goto again;		}		if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {			mlog(0, "lockres %.*s not claimed.  "				   "this node will take it.\n",				   res->lockname.len, res->lockname.name);		} else {			spin_lock(&res->spinlock);			dlm_lockres_drop_inflight_ref(dlm, res);			spin_unlock(&res->spinlock);			mlog(0, "master needs to respond to sender "				  "that node %u still owns %.*s\n",				  real_master, res->lockname.len,				  res->lockname.name);			/* cannot touch this lockres */			goto leave;		}	}	ret = dlm_process_recovery_data(dlm, res, mres);	if (ret < 0)		mlog(0, "dlm_process_recovery_data returned  %d\n", ret);	else		mlog(0, "dlm_process_recovery_data succeeded\n");	if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==	                   (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {		ret = dlm_finish_migration(dlm, res, mres->master);		if (ret < 0)			mlog_errno(ret);	}leave:	kfree(data);	mlog_exit(ret);}static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,				      struct dlm_lock_resource *res,				      u8 *real_master){	struct dlm_node_iter iter;	int nodenum;	int ret = 0;	*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;	/* we only reach here if one of the two nodes in a	 * migration died while the migration was in progress.	 * at this point we need to requery the master.  we	 * know that the new_master got as far as creating	 * an mle on at least one node, but we do not know	 * if any nodes had actually cleared the mle and set	 * the master to the new_master.  the old master	 * is supposed to set the owner to UNKNOWN in the	 * event of a new_master death, so the only possible	 * responses that we can get from nodes here are	 * that the master is new_master, or that the master	 * is UNKNOWN.	 * if all nodes come back with UNKNOWN then we know	 * the lock needs remastering here.	 * if any node comes back with a valid master, check	 * to see if that master is the one that we are	 * recovering.  if so, then the new_master died and	 * we need to remaster this lock.  if not, then the	 * new_master survived and that node will respond to	 * other nodes about the owner.	 * if there is an owner, this node needs to dump this	 * lockres and alert the sender that this lockres	 * was rejected. */	spin_lock(&dlm->spinlock);	dlm_node_iter_init(dlm->domain_map, &iter);	spin_unlock(&dlm->spinlock);	while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {		/* do not send to self */		if (nodenum == dlm->node_num)			continue;		ret = dlm_do_master_requery(dlm, res, nodenum, real_master);		if (ret < 0) {			mlog_errno(ret);			if (!dlm_is_host_down(ret))				BUG();			/* host is down, so answer for that node would be			 * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */		}		if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {			mlog(0, "lock master is %u\n", *real_master);			break;		}	}	return ret;}int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,			  u8 nodenum, u8 *real_master){	int ret = -EINVAL;	struct dlm_master_requery req;	int status = DLM_LOCK_RES_OWNER_UNKNOWN;	memset(&req, 0, sizeof(req));	req.node_idx = dlm->node_num;	req.namelen = res->lockname.len;	memcpy(req.name, res->lockname.name, res->lockname.len);	ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,				 &req, sizeof(req), nodenum, &status);	/* XXX: negative status not handled properly here. */	if (ret < 0)		mlog_errno(ret);	else {		BUG_ON(status < 0);		BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);		*real_master = (u8) (status & 0xff);		mlog(0, "node %u responded to master requery with %u\n",			  nodenum, *real_master);		ret = 0;	}	return ret;}/* this function cannot error, so unless the sending * or receiving of the message failed, the owner can * be trusted */int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,			       void **ret_data){	struct dlm_ctxt *dlm = data;	struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;	struct dlm_lock_resource *res = NULL;	unsigned int hash;	int master = DLM_LOCK_RES_OWNER_UNKNOWN;	u32 flags = DLM_ASSERT_MASTER_REQUERY;	if (!dlm_grab(dlm)) {		/* since the domain has gone away on this		 * node, the proper response is UNKNOWN */		return master;	}	hash = dlm_lockid_hash(req->name, req->namelen);	spin_lock(&dlm->spinlock);	res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);	if (res) {		spin_lock(&res->spinlock);		master = res->owner;		if (master == dlm->node_num) {			int ret = dlm_dispatch_assert_master(dlm, res,							     0, 0, flags);			if (ret < 0) {				mlog_errno(-ENOMEM);				/* retry!? */				BUG();			}		}		spin_unlock(&res->spinlock);	}	spin_unlock(&dlm->spinlock);	dlm_put(dlm);	return master;}static inline struct list_head *dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num){	struct list_head *ret;	BUG_ON(list_num < 0);	BUG_ON(list_num > 2);	ret = &(res->granted);	ret += list_num;	return ret;}/* TODO: do ast flush business * TODO: do MIGRATING and RECOVERING spinning *//** NOTE about in-flight requests during migration:** Before attempting the migrate, the master has marked the lockres as* MIGRATING and then flushed all of its pending ASTS.  So any in-flight* requests either got queued before the MIGRATING flag got set, in which* case the lock data will reflect the change and a return message is on* the way, or the request failed to get in before MIGRATING got set.  In* this case, the caller will be told to spin and wait for the MIGRATING* flag to be dropped, then recheck the master.* This holds true for the convert, cancel and unlock cases, and since lvb* updates are tied to these same messages, it applies to lvb updates as* well.  For the lock case, there is no way a lock can be on the master* queue and not be on the secondary queue since the lock is always added* locally first.  This means that the new target node will never be sent* a lock that he doesn't already have on the list.* In total, this means that the local lock is correct and should not be* updated to match the one sent by the master.  Any messages sent back* from the master before the MIGRATING flag will bring the lock properly* up-to-date, and the change will be ordered properly for the waiter.* We will *not* attempt to modify the lock underneath the waiter.*/static int dlm_process_recovery_data(struct dlm_ctxt *dlm,				     struct dlm_lock_resource *res,				     struct dlm_migratable_lockres *mres){	struct dlm_migratable_lock *ml;	struct list_head *queue;	struct list_head *tmpq = NULL;	struct dlm_lock *newlock = NULL;	struct dlm_lockstatus *lksb = NULL;	int ret = 0;	int i, j, bad;	struct dlm_lock *lock = NULL;	u8 from = O2NM_MAX_NODES;	unsigned int added = 0;	mlog(0, "running %d locks for this lockres\n", mres->num_locks);	for (i=0; i<mres->num_locks; i++) {		ml = &(mres->ml[i]);		if (dlm_is_dummy_lock(dlm, ml, &from)) {			/* placeholder, just need to set the refmap bit */			BUG_ON(mres->num_locks != 1);			mlog(0, "%s:%.*s: dummy lock for %u\n",			     dlm->name, mres->lockname_len, mres->lockname,			     from);			spin_lock(&res->spinlock);			dlm_lockres_set_refmap_bit(from, res);			spin_unlock(&res->spinlock);			added++;			break;		}		BUG_ON(ml->highest_blocked != LKM_IVMODE);		newlock = NULL;		lksb = NULL;		queue = dlm_list_num_to_pointer(res, ml->list);		tmpq = NULL;		/* if the lock is for the local node it needs to		 * be moved to the proper location within the queue.		 * do not allocate a new lock structure. */		if (ml->node == dlm->node_num) {			/* MIGRATION ONLY! */			BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));			spin_lock(&res->spinlock);			for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {				tmpq = dlm_list_idx_to_ptr(res, j);				list_for_each_entry(lock, tmpq, list) {					if (lock->ml.cookie != ml->cookie)						lock = NULL;					else						break;				}				if (lock)					break;			}			/* lock is always created locally first, and			 * destroyed locally last.  it must be on the list */			if (!lock) {				__be64 c = ml->cookie;				mlog(ML_ERROR, "could not find local lock "					       "with cookie %u:%llu!\n",

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -