📄 parallel_grouplock.c
字号:
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * * Copyright (C) 2002 Cluster File Systems, Inc. * Author: You Feng <youfeng@clusterfs.com> * * This file is part of Lustre, http://www.lustre.org. * * Lustre is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * Lustre is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Lustre; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */#include <mpi.h>#include <stdio.h>#include <stdlib.h>#include <string.h>#include <sys/types.h>#include <sys/stat.h>#include <fcntl.h>#include <sys/ioctl.h>#include <unistd.h>#include <time.h>#include <errno.h>#include <lustre/lustre_user.h>#include "lp_utils.h"#define LPGL_FILEN 700000#define LPGL_TEST_ITEMS 7#define MAX_GLHOST 4/* waiting time in 0.1 s */#define MAX_WAITING_TIME 20int rank = 0;int size = 0;char *testdir = NULL;/* * process1 attempts CW(gid=1) -- granted immediately * process2 attempts PR -- blocked, goes on waiting list * process3 attempts CW(gid=1) -> should be granted, but may go on * the waiting list */void grouplock_test1(char *filename, int fd, char *errmsg){ int rc, count, gid = 1; char buf[LPGL_FILEN]; char zeros[LPGL_FILEN]; MPI_Request req1, req2; int temp1, temp2; if (rank == 0) { if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) { sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d", filename, rc); FAIL(errmsg); } } MPI_Barrier(MPI_COMM_WORLD); if (rank == 1) { memset(zeros, 0x0, sizeof(zeros)); lseek(fd, 0, SEEK_SET); MPI_Send(&gid, 1, MPI_INT, 2, 1, MPI_COMM_WORLD); count = read(fd, buf, sizeof(buf)); if (count != sizeof(buf)) { if (count > 0) dump_diff(zeros, buf, count, 0); sprintf(errmsg, "read of file %s return %d", filename, count); FAIL(errmsg); } MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); } if (rank == 2) { int temp; /* Wait for reading task to progress, this is probably somewhat racey, though, may be adding usleep here would make things better here. */ usleep(100); MPI_Recv(&temp, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) { sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d", filename, rc); FAIL(errmsg); } MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); } if (rank == 0) { int iter = MAX_WAITING_TIME; int flag1, flag2; /* reading task will tell us when it completes */ MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1); /* 2nd locking task will tell us when it completes */ MPI_Irecv(&temp2, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req2); do { iter--; if (!iter) { FAIL("2nd locking task is not progressing\n"); } usleep(100); MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE); MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE); if (flag1) { FAIL("PR task progressed even though GROUP lock" " is held\n"); } } while (!flag2); } /* Now we need to release the lock */ if (rank == 0 || rank == 2) { if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) { sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d", filename, rc); FAIL(errmsg); } } if (rank == 0) { int iter = MAX_WAITING_TIME; int flag1; do { iter--; if (!iter) { FAIL("reading task is not progressing even " "though GROUP lock was released\n"); break; } usleep(100); MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE); } while (!flag1); } MPI_Barrier(MPI_COMM_WORLD);}/* * process1 attempts CW(gid=1) -- granted immediately * process2 attempts CW(gid=2) -- blocked * process3 attempts PR -- blocked * process4 attempts CW(gid=2) -- blocked * process1 releases CW(gid=1) -- this allows process2's CW lock to be granted process3 remains blocked */void grouplock_test2(char *filename, int fd, char *errmsg){ int rc, count, gid = 1; char buf[LPGL_FILEN]; char zeros[LPGL_FILEN]; MPI_Request req1, req2, req3; int temp1, temp2, temp3; if (rank == 0) { if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) { sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d", filename, rc); FAIL(errmsg); } } MPI_Barrier(MPI_COMM_WORLD); if (rank == 1 || rank == 3) { gid = 2; if (rank == 3) { MPI_Recv(&temp1, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); usleep(100); } if ((rc = ioctl(fd, LL_IOC_GROUP_LOCK, gid)) == -1) { sprintf(errmsg, "ioctl GROUP_LOCK of file %s return %d", filename, rc); FAIL(errmsg); } MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); } if (rank == 2) { memset(zeros, 0x0, sizeof(zeros)); lseek(fd, 0, SEEK_SET); MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD); count = read(fd, buf, sizeof(buf)); if (count != sizeof(buf)) { if (count > 0) dump_diff(zeros, buf, count, 0); sprintf(errmsg, "read of file %s return %d", filename, count); FAIL(errmsg); } MPI_Send(&gid, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); } if (rank == 0) { int iter = MAX_WAITING_TIME; int flag1, flag2, flag3; /* 2nd locking task will tell us when it completes */ MPI_Irecv(&temp1, 1, MPI_INT, 1, 1, MPI_COMM_WORLD, &req1); /* 3nd locking task will tell us when it completes */ MPI_Irecv(&temp2, 1, MPI_INT, 3, 1, MPI_COMM_WORLD, &req2); /* reading task will tell us when it completes */ MPI_Irecv(&temp3, 1, MPI_INT, 2, 1, MPI_COMM_WORLD, &req3); do { iter--; usleep(100); MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE); MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE); MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE); if (flag3) { FAIL("PR task progressed even though GROUP lock" " is held\n"); } if (flag1 || flag2) { FAIL("GROUP (gid=2) task progressed even though" " GROUP (gid=1) lock is held\n"); } } while (iter); /* Now let's release first lock */ if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) { sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d", filename, rc); FAIL(errmsg); } iter = MAX_WAITING_TIME; do { iter--; if (!iter) { FAIL("GROUP(gid=2) tasks are not progressing\n"); } usleep(100); MPI_Test(&req2, &flag2, MPI_STATUS_IGNORE); MPI_Test(&req1, &flag1, MPI_STATUS_IGNORE); MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE); if (flag3) { fprintf(stderr, "task1 %d, task3 %d\n", flag1, flag2); FAIL("PR task progressed even though GROUP lock" " was on the queue task\n"); } } while (!(flag1 && flag2)); MPI_Send(&gid, 1, MPI_INT, 1, 1, MPI_COMM_WORLD); MPI_Send(&gid, 1, MPI_INT, 3, 1, MPI_COMM_WORLD); } if (rank == 1 || rank == 3) { /* Do not release the locks until task 0 is ready to watch for reading task only */ MPI_Recv(&temp1, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); if ((rc = ioctl(fd, LL_IOC_GROUP_UNLOCK, gid)) == -1) { sprintf(errmsg, "ioctl GROUP_UNLOCK of file %s return %d", filename, rc); FAIL(errmsg); } } if (rank == 0) { int iter = MAX_WAITING_TIME; int flag3; do { iter--; if (!iter) { FAIL("reading task is not progressing even " "though GROUP locks are released\n"); break; } usleep(100); MPI_Test(&req3, &flag3, MPI_STATUS_IGNORE); } while (!flag3); } MPI_Barrier(MPI_COMM_WORLD);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -