ldlm_flock.c

来自「lustre 1.6.5 source code」· C语言 代码 · 共 580 行 · 第 1/2 页

C
580
字号
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * *  Copyright (c) 2003 Hewlett-Packard Development Company LP. *   Developed under the sponsorship of the US Government under *   Subcontract No. B514193 * *   This file is part of the Lustre file system, http://www.lustre.org *   Lustre is a trademark of Cluster File Systems, Inc. * *   You may have signed or agreed to another license before downloading *   this software.  If so, you are bound by the terms and conditions *   of that agreement, and the following does not apply to you.  See the *   LICENSE file included with this distribution for more information. * *   If you did not agree to a different license, then this copy of Lustre *   is open source software; you can redistribute it and/or modify it *   under the terms of version 2 of the GNU General Public License as *   published by the Free Software Foundation. * *   In either case, Lustre is distributed in the hope that it will be *   useful, but WITHOUT ANY WARRANTY; without even the implied warranty *   of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the *   license text for more details. */#define DEBUG_SUBSYSTEM S_LDLM#ifdef __KERNEL__#include <lustre_dlm.h>#include <obd_support.h>#include <obd_class.h>#include <lustre_lib.h>#include <libcfs/list.h>#else#include <liblustre.h>#include <obd_class.h>#endif#include "ldlm_internal.h"#define l_flock_waitq   l_lrustatic struct list_head ldlm_flock_waitq = CFS_LIST_HEAD_INIT(ldlm_flock_waitq);int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,                            void *data, int flag);/** * list_for_remaining_safe - iterate over the remaining entries in a list *              and safeguard against removal of a list entry. * @pos:        the &struct list_head to use as a loop counter. pos MUST *              have been initialized prior to using it in this macro. * @n:          another &struct list_head to use as temporary storage * @head:       the head for your list. */#define list_for_remaining_safe(pos, n, head) \        for (n = pos->next; pos != (head); pos = n, n = pos->next)static inline intldlm_same_flock_owner(struct ldlm_lock *lock, struct ldlm_lock *new){        return((new->l_policy_data.l_flock.pid ==                lock->l_policy_data.l_flock.pid) &&               (new->l_export == lock->l_export));}static inline intldlm_flocks_overlap(struct ldlm_lock *lock, struct ldlm_lock *new){        return((new->l_policy_data.l_flock.start <=                lock->l_policy_data.l_flock.end) &&               (new->l_policy_data.l_flock.end >=                lock->l_policy_data.l_flock.start));}static inline voidldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, int flags){        ENTRY;        LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%x)",                   mode, flags);        LASSERT(list_empty(&lock->l_flock_waitq));        list_del_init(&lock->l_res_link);        if (flags == LDLM_FL_WAIT_NOREPROC) {                /* client side - set a flag to prevent sending a CANCEL */                lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;                /* when reaching here, it is under lock_res_and_lock(). Thus,                    need call the nolock version of ldlm_lock_decref_internal*/                ldlm_lock_decref_internal_nolock(lock, mode);        }        ldlm_lock_destroy_nolock(lock);        EXIT;}static intldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *blocking_lock){        struct obd_export *req_export = req->l_export;        struct obd_export *blocking_export = blocking_lock->l_export;        pid_t req_pid = req->l_policy_data.l_flock.pid;        pid_t blocking_pid = blocking_lock->l_policy_data.l_flock.pid;        struct ldlm_lock *lock;restart:        list_for_each_entry(lock, &ldlm_flock_waitq, l_flock_waitq) {                if ((lock->l_policy_data.l_flock.pid != blocking_pid) ||                    (lock->l_export != blocking_export))                        continue;                blocking_pid = lock->l_policy_data.l_flock.blocking_pid;                blocking_export = (struct obd_export *)(long)                        lock->l_policy_data.l_flock.blocking_export;                if (blocking_pid == req_pid && blocking_export == req_export)                        return 1;                goto restart;        }        return 0;}intldlm_process_flock_lock(struct ldlm_lock *req, int *flags, int first_enq,                        ldlm_error_t *err, struct list_head *work_list){        struct ldlm_resource *res = req->l_resource;        struct ldlm_namespace *ns = res->lr_namespace;        struct list_head *tmp;        struct list_head *ownlocks = NULL;        struct ldlm_lock *lock = NULL;        struct ldlm_lock *new = req;        struct ldlm_lock *new2 = NULL;        ldlm_mode_t mode = req->l_req_mode;        int local = ns_is_client(ns);        int added = (mode == LCK_NL);        int overlaps = 0;        int splitted = 0;        ENTRY;        CDEBUG(D_DLMTRACE, "flags %#x pid %u mode %u start "LPU64" end "LPU64               "\n", *flags, new->l_policy_data.l_flock.pid, mode,               req->l_policy_data.l_flock.start,               req->l_policy_data.l_flock.end);        *err = ELDLM_OK;        if (local) {                /* No blocking ASTs are sent to the clients for                 * Posix file & record locks */                req->l_blocking_ast = NULL;        } else {                /* Called on the server for lock cancels. */                req->l_blocking_ast = ldlm_flock_blocking_ast;        }reprocess:        if ((*flags == LDLM_FL_WAIT_NOREPROC) || (mode == LCK_NL)) {                /* This loop determines where this processes locks start                 * in the resource lr_granted list. */                list_for_each(tmp, &res->lr_granted) {                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);                        if (ldlm_same_flock_owner(lock, req)) {                                ownlocks = tmp;                                break;                        }                }        } else {                lockmode_verify(mode);                /* This loop determines if there are existing locks                 * that conflict with the new lock request. */                list_for_each(tmp, &res->lr_granted) {                        lock = list_entry(tmp, struct ldlm_lock, l_res_link);                        if (ldlm_same_flock_owner(lock, req)) {                                if (!ownlocks)                                        ownlocks = tmp;                                continue;                        }                        /* locks are compatible, overlap doesn't matter */                        if (lockmode_compat(lock->l_granted_mode, mode))                                continue;                        if (!ldlm_flocks_overlap(lock, req))                                continue;                        if (!first_enq)                                RETURN(LDLM_ITER_CONTINUE);                        if (*flags & LDLM_FL_BLOCK_NOWAIT) {                                ldlm_flock_destroy(req, mode, *flags);                                *err = -EAGAIN;                                RETURN(LDLM_ITER_STOP);                        }                        if (*flags & LDLM_FL_TEST_LOCK) {                                ldlm_flock_destroy(req, mode, *flags);                                req->l_req_mode = lock->l_granted_mode;                                req->l_policy_data.l_flock.pid =                                        lock->l_policy_data.l_flock.pid;                                req->l_policy_data.l_flock.start =                                        lock->l_policy_data.l_flock.start;                                req->l_policy_data.l_flock.end =                                        lock->l_policy_data.l_flock.end;                                *flags |= LDLM_FL_LOCK_CHANGED;                                RETURN(LDLM_ITER_STOP);                        }                        if (ldlm_flock_deadlock(req, lock)) {                                ldlm_flock_destroy(req, mode, *flags);                                *err = -EDEADLK;                                RETURN(LDLM_ITER_STOP);                        }                        req->l_policy_data.l_flock.blocking_pid =                                lock->l_policy_data.l_flock.pid;                        req->l_policy_data.l_flock.blocking_export =                                (long)(void *)lock->l_export;                        LASSERT(list_empty(&req->l_flock_waitq));                        list_add_tail(&req->l_flock_waitq, &ldlm_flock_waitq);                        ldlm_resource_add_lock(res, &res->lr_waiting, req);                        *flags |= LDLM_FL_BLOCK_GRANTED;                        RETURN(LDLM_ITER_STOP);                }        }        if (*flags & LDLM_FL_TEST_LOCK) {                ldlm_flock_destroy(req, mode, *flags);                req->l_req_mode = LCK_NL;                *flags |= LDLM_FL_LOCK_CHANGED;                RETURN(LDLM_ITER_STOP);        }        /* In case we had slept on this lock request take it off of the         * deadlock detection waitq. */        list_del_init(&req->l_flock_waitq);        /* Scan the locks owned by this process that overlap this request.         * We may have to merge or split existing locks. */        if (!ownlocks)                ownlocks = &res->lr_granted;        list_for_remaining_safe(ownlocks, tmp, &res->lr_granted) {                lock = list_entry(ownlocks, struct ldlm_lock, l_res_link);                if (!ldlm_same_flock_owner(lock, new))                        break;                if (lock->l_granted_mode == mode) {                        /* If the modes are the same then we need to process                         * locks that overlap OR adjoin the new lock. The extra                         * logic condition is necessary to deal with arithmetic                         * overflow and underflow. */                        if ((new->l_policy_data.l_flock.start >                             (lock->l_policy_data.l_flock.end + 1))                            && (lock->l_policy_data.l_flock.end !=                                OBD_OBJECT_EOF))                                continue;                        if ((new->l_policy_data.l_flock.end <                             (lock->l_policy_data.l_flock.start - 1))                            && (lock->l_policy_data.l_flock.start != 0))                                break;                        if (new->l_policy_data.l_flock.start <                            lock->l_policy_data.l_flock.start) {                                lock->l_policy_data.l_flock.start =                                        new->l_policy_data.l_flock.start;                        } else {                                new->l_policy_data.l_flock.start =                                        lock->l_policy_data.l_flock.start;                        }                        if (new->l_policy_data.l_flock.end >                            lock->l_policy_data.l_flock.end) {                                lock->l_policy_data.l_flock.end =                                        new->l_policy_data.l_flock.end;                        } else {                                new->l_policy_data.l_flock.end =                                        lock->l_policy_data.l_flock.end;

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?