⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 quota.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. *//* * Quota change tags are associated with each transaction that allocates or * deallocates space.  Those changes are accumulated locally to each node (in a * per-node file) and then are periodically synced to the quota file.  This * avoids the bottleneck of constantly touching the quota file, but introduces * fuzziness in the current usage value of IDs that are being used on different * nodes in the cluster simultaneously.  So, it is possible for a user on * multiple nodes to overrun their quota, but that overrun is controlable. * Since quota tags are part of transactions, there is no need to a quota check * program to be run on node crashes or anything like that. * * There are couple of knobs that let the administrator manage the quota * fuzziness.  "quota_quantum" sets the maximum time a quota change can be * sitting on one node before being synced to the quota file.  (The default is * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency * of quota file syncs increases as the user moves closer to their limit.  The * more frequent the syncs, the more accurate the quota enforcement, but that * means that there is more contention between the nodes for the quota file. * The default value is one.  This sets the maximum theoretical quota overrun * (with infinite node with infinite bandwidth) to twice the user's limit.  (In * practice, the maximum overrun you see should be much less.)  A "quota_scale" * number greater than one makes quota syncs more frequent and reduces the * maximum overrun.  Numbers less than one (but greater than zero) make quota * syncs less frequent. * * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of * the quota file, so it is not being constantly read. */#include <linux/sched.h>#include <linux/slab.h>#include <linux/spinlock.h>#include <linux/completion.h>#include <linux/buffer_head.h>#include <linux/sort.h>#include <linux/fs.h>#include <linux/bio.h>#include <linux/gfs2_ondisk.h>#include <linux/lm_interface.h>#include "gfs2.h"#include "incore.h"#include "bmap.h"#include "glock.h"#include "glops.h"#include "log.h"#include "meta_io.h"#include "quota.h"#include "rgrp.h"#include "super.h"#include "trans.h"#include "inode.h"#include "ops_file.h"#include "ops_address.h"#include "util.h"#define QUOTA_USER 1#define QUOTA_GROUP 0struct gfs2_quota_host {	u64 qu_limit;	u64 qu_warn;	s64 qu_value;	u32 qu_ll_next;};struct gfs2_quota_change_host {	u64 qc_change;	u32 qc_flags; /* GFS2_QCF_... */	u32 qc_id;};static u64 qd2offset(struct gfs2_quota_data *qd){	u64 offset;	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);	offset *= sizeof(struct gfs2_quota);	return offset;}static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,		    struct gfs2_quota_data **qdp){	struct gfs2_quota_data *qd;	int error;	qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_KERNEL);	if (!qd)		return -ENOMEM;	qd->qd_count = 1;	qd->qd_id = id;	if (user)		set_bit(QDF_USER, &qd->qd_flags);	qd->qd_slot = -1;	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,			      &gfs2_quota_glops, CREATE, &qd->qd_gl);	if (error)		goto fail;	error = gfs2_lvb_hold(qd->qd_gl);	gfs2_glock_put(qd->qd_gl);	if (error)		goto fail;	*qdp = qd;	return 0;fail:	kfree(qd);	return error;}static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,		  struct gfs2_quota_data **qdp){	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;	int error, found;	*qdp = NULL;	for (;;) {		found = 0;		spin_lock(&sdp->sd_quota_spin);		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {			if (qd->qd_id == id &&			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {				qd->qd_count++;				found = 1;				break;			}		}		if (!found)			qd = NULL;		if (!qd && new_qd) {			qd = new_qd;			list_add(&qd->qd_list, &sdp->sd_quota_list);			atomic_inc(&sdp->sd_quota_count);			new_qd = NULL;		}		spin_unlock(&sdp->sd_quota_spin);		if (qd || !create) {			if (new_qd) {				gfs2_lvb_unhold(new_qd->qd_gl);				kfree(new_qd);			}			*qdp = qd;			return 0;		}		error = qd_alloc(sdp, user, id, &new_qd);		if (error)			return error;	}}static void qd_hold(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	spin_lock(&sdp->sd_quota_spin);	gfs2_assert(sdp, qd->qd_count);	qd->qd_count++;	spin_unlock(&sdp->sd_quota_spin);}static void qd_put(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	spin_lock(&sdp->sd_quota_spin);	gfs2_assert(sdp, qd->qd_count);	if (!--qd->qd_count)		qd->qd_last_touched = jiffies;	spin_unlock(&sdp->sd_quota_spin);}static int slot_get(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	unsigned int c, o = 0, b;	unsigned char byte = 0;	spin_lock(&sdp->sd_quota_spin);	if (qd->qd_slot_count++) {		spin_unlock(&sdp->sd_quota_spin);		return 0;	}	for (c = 0; c < sdp->sd_quota_chunks; c++)		for (o = 0; o < PAGE_SIZE; o++) {			byte = sdp->sd_quota_bitmap[c][o];			if (byte != 0xFF)				goto found;		}	goto fail;found:	for (b = 0; b < 8; b++)		if (!(byte & (1 << b)))			break;	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;	if (qd->qd_slot >= sdp->sd_quota_slots)		goto fail;	sdp->sd_quota_bitmap[c][o] |= 1 << b;	spin_unlock(&sdp->sd_quota_spin);	return 0;fail:	qd->qd_slot_count--;	spin_unlock(&sdp->sd_quota_spin);	return -ENOSPC;}static void slot_hold(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	spin_lock(&sdp->sd_quota_spin);	gfs2_assert(sdp, qd->qd_slot_count);	qd->qd_slot_count++;	spin_unlock(&sdp->sd_quota_spin);}static void slot_put(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	spin_lock(&sdp->sd_quota_spin);	gfs2_assert(sdp, qd->qd_slot_count);	if (!--qd->qd_slot_count) {		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);		qd->qd_slot = -1;	}	spin_unlock(&sdp->sd_quota_spin);}static int bh_get(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);	unsigned int block, offset;	struct buffer_head *bh;	int error;	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };	mutex_lock(&sdp->sd_quota_mutex);	if (qd->qd_bh_count++) {		mutex_unlock(&sdp->sd_quota_mutex);		return 0;	}	block = qd->qd_slot / sdp->sd_qc_per_block;	offset = qd->qd_slot % sdp->sd_qc_per_block;;	bh_map.b_size = 1 << ip->i_inode.i_blkbits;	error = gfs2_block_map(&ip->i_inode, block, 0, &bh_map);	if (error)		goto fail;	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);	if (error)		goto fail;	error = -EIO;	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))		goto fail_brelse;	qd->qd_bh = bh;	qd->qd_bh_qc = (struct gfs2_quota_change *)		(bh->b_data + sizeof(struct gfs2_meta_header) +		 offset * sizeof(struct gfs2_quota_change));	mutex_unlock(&sdp->sd_quota_mutex);	return 0;fail_brelse:	brelse(bh);fail:	qd->qd_bh_count--;	mutex_unlock(&sdp->sd_quota_mutex);	return error;}static void bh_put(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	mutex_lock(&sdp->sd_quota_mutex);	gfs2_assert(sdp, qd->qd_bh_count);	if (!--qd->qd_bh_count) {		brelse(qd->qd_bh);		qd->qd_bh = NULL;		qd->qd_bh_qc = NULL;	}	mutex_unlock(&sdp->sd_quota_mutex);}static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp){	struct gfs2_quota_data *qd = NULL;	int error;	int found = 0;	*qdp = NULL;	if (sdp->sd_vfs->s_flags & MS_RDONLY)		return 0;	spin_lock(&sdp->sd_quota_spin);	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)			continue;		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);		set_bit(QDF_LOCKED, &qd->qd_flags);		gfs2_assert_warn(sdp, qd->qd_count);		qd->qd_count++;		qd->qd_change_sync = qd->qd_change;		gfs2_assert_warn(sdp, qd->qd_slot_count);		qd->qd_slot_count++;		found = 1;		break;	}	if (!found)		qd = NULL;	spin_unlock(&sdp->sd_quota_spin);	if (qd) {		gfs2_assert_warn(sdp, qd->qd_change_sync);		error = bh_get(qd);		if (error) {			clear_bit(QDF_LOCKED, &qd->qd_flags);			slot_put(qd);			qd_put(qd);			return error;		}	}	*qdp = qd;	return 0;}static int qd_trylock(struct gfs2_quota_data *qd){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	if (sdp->sd_vfs->s_flags & MS_RDONLY)		return 0;	spin_lock(&sdp->sd_quota_spin);	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {		spin_unlock(&sdp->sd_quota_spin);		return 0;	}	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);	set_bit(QDF_LOCKED, &qd->qd_flags);	gfs2_assert_warn(sdp, qd->qd_count);	qd->qd_count++;	qd->qd_change_sync = qd->qd_change;	gfs2_assert_warn(sdp, qd->qd_slot_count);	qd->qd_slot_count++;	spin_unlock(&sdp->sd_quota_spin);	gfs2_assert_warn(sdp, qd->qd_change_sync);	if (bh_get(qd)) {		clear_bit(QDF_LOCKED, &qd->qd_flags);		slot_put(qd);		qd_put(qd);		return 0;	}	return 1;}static void qd_unlock(struct gfs2_quota_data *qd){	gfs2_assert_warn(qd->qd_gl->gl_sbd,			 test_bit(QDF_LOCKED, &qd->qd_flags));	clear_bit(QDF_LOCKED, &qd->qd_flags);	bh_put(qd);	slot_put(qd);	qd_put(qd);}static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,		    struct gfs2_quota_data **qdp){	int error;	error = qd_get(sdp, user, id, create, qdp);	if (error)		return error;	error = slot_get(*qdp);	if (error)		goto fail;	error = bh_get(*qdp);	if (error)		goto fail_slot;	return 0;fail_slot:	slot_put(*qdp);fail:	qd_put(*qdp);	return error;}static void qdsb_put(struct gfs2_quota_data *qd){	bh_put(qd);	slot_put(qd);	qd_put(qd);}int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid){	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);	struct gfs2_alloc *al = &ip->i_alloc;	struct gfs2_quota_data **qd = al->al_qd;	int error;	if (gfs2_assert_warn(sdp, !al->al_qd_num) ||	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))		return -EIO;	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)		return 0;	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);	if (error)		goto out;	al->al_qd_num++;	qd++;	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);	if (error)		goto out;	al->al_qd_num++;	qd++;	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {		error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);		if (error)			goto out;		al->al_qd_num++;		qd++;	}	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {		error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);		if (error)			goto out;		al->al_qd_num++;		qd++;	}out:	if (error)		gfs2_quota_unhold(ip);	return error;}void gfs2_quota_unhold(struct gfs2_inode *ip){	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);	struct gfs2_alloc *al = &ip->i_alloc;	unsigned int x;	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));	for (x = 0; x < al->al_qd_num; x++) {		qdsb_put(al->al_qd[x]);		al->al_qd[x] = NULL;	}	al->al_qd_num = 0;}static int sort_qd(const void *a, const void *b){	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=	    !test_bit(QDF_USER, &qd_b->qd_flags)) {		if (test_bit(QDF_USER, &qd_a->qd_flags))			return -1;		else			return 1;	}	if (qd_a->qd_id < qd_b->qd_id)		return -1;	if (qd_a->qd_id > qd_b->qd_id)		return 1;	return 0;}static void do_qc(struct gfs2_quota_data *qd, s64 change){	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);	struct gfs2_quota_change *qc = qd->qd_bh_qc;	s64 x;	mutex_lock(&sdp->sd_quota_mutex);	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {		qc->qc_change = 0;		qc->qc_flags = 0;		if (test_bit(QDF_USER, &qd->qd_flags))			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);		qc->qc_id = cpu_to_be32(qd->qd_id);	}	x = be64_to_cpu(qc->qc_change) + change;	qc->qc_change = cpu_to_be64(x);	spin_lock(&sdp->sd_quota_spin);	qd->qd_change = x;	spin_unlock(&sdp->sd_quota_spin);	if (!x) {		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));		clear_bit(QDF_CHANGE, &qd->qd_flags);		qc->qc_flags = 0;		qc->qc_id = 0;		slot_put(qd);		qd_put(qd);	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {		qd_hold(qd);		slot_hold(qd);	}	mutex_unlock(&sdp->sd_quota_mutex);}static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf){	const struct gfs2_quota *str = buf;	qu->qu_limit = be64_to_cpu(str->qu_limit);	qu->qu_warn = be64_to_cpu(str->qu_warn);	qu->qu_value = be64_to_cpu(str->qu_value);	qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);}static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf){	struct gfs2_quota *str = buf;	str->qu_limit = cpu_to_be64(qu->qu_limit);	str->qu_warn = cpu_to_be64(qu->qu_warn);	str->qu_value = cpu_to_be64(qu->qu_value);	str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);	memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));}/** * gfs2_adjust_quota * * This function was mostly borrowed from gfs2_block_truncate_page which was * in turn mostly borrowed from ext3 */static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,			     s64 change, struct gfs2_quota_data *qd){	struct inode *inode = &ip->i_inode;	struct address_space *mapping = inode->i_mapping;	unsigned long index = loc >> PAGE_CACHE_SHIFT;	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);	unsigned blocksize, iblock, pos;	struct buffer_head *bh;	struct page *page;	void *kaddr;	char *ptr;	struct gfs2_quota_host qp;	s64 value;	int err = -EIO;	if (gfs2_is_stuffed(ip)) {		struct gfs2_alloc *al = NULL;		al = gfs2_alloc_get(ip);		/* just request 1 blk */		al->al_requested = 1;		gfs2_inplace_reserve(ip);		gfs2_unstuff_dinode(ip, NULL);		gfs2_inplace_release(ip);		gfs2_alloc_put(ip);	}	page = grab_cache_page(mapping, index);	if (!page)		return -ENOMEM;	blocksize = inode->i_sb->s_blocksize;	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);	if (!page_has_buffers(page))		create_empty_buffers(page, blocksize, 0);	bh = page_buffers(page);	pos = blocksize;	while (offset >= pos) {		bh = bh->b_this_page;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -