📄 quota.c
字号:
iblock++; pos += blocksize; } if (!buffer_mapped(bh)) { gfs2_get_block(inode, iblock, bh, 1); if (!buffer_mapped(bh)) goto unlock; } if (PageUptodate(page)) set_buffer_uptodate(bh); if (!buffer_uptodate(bh)) { ll_rw_block(READ_META, 1, &bh); wait_on_buffer(bh); if (!buffer_uptodate(bh)) goto unlock; } gfs2_trans_add_bh(ip->i_gl, bh, 0); kaddr = kmap_atomic(page, KM_USER0); ptr = kaddr + offset; gfs2_quota_in(&qp, ptr); qp.qu_value += change; value = qp.qu_value; gfs2_quota_out(&qp, ptr); flush_dcache_page(page); kunmap_atomic(kaddr, KM_USER0); err = 0; qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); qd->qd_qb.qb_value = cpu_to_be64(value); ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);unlock: unlock_page(page); page_cache_release(page); return err;}static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda){ struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); unsigned int data_blocks, ind_blocks; struct gfs2_holder *ghs, i_gh; unsigned int qx, x; struct gfs2_quota_data *qd; loff_t offset; unsigned int nalloc = 0; struct gfs2_alloc *al = NULL; int error; gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), &data_blocks, &ind_blocks); ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_KERNEL); if (!ghs) return -ENOMEM; sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); for (qx = 0; qx < num_qd; qx++) { error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &ghs[qx]); if (error) goto out; } error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); if (error) goto out; for (x = 0; x < num_qd; x++) { int alloc_required; offset = qd2offset(qda[x]); error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), &alloc_required); if (error) goto out_gunlock; if (alloc_required) nalloc++; } if (nalloc) { al = gfs2_alloc_get(ip); al->al_requested = nalloc * (data_blocks + ind_blocks); error = gfs2_inplace_reserve(ip); if (error) goto out_alloc; error = gfs2_trans_begin(sdp, al->al_rgd->rd_length + num_qd * data_blocks + nalloc * ind_blocks + RES_DINODE + num_qd + RES_STATFS, 0); if (error) goto out_ipres; } else { error = gfs2_trans_begin(sdp, num_qd * data_blocks + RES_DINODE + num_qd, 0); if (error) goto out_gunlock; } for (x = 0; x < num_qd; x++) { qd = qda[x]; offset = qd2offset(qd); error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, (struct gfs2_quota_data *) qd); if (error) goto out_end_trans; do_qc(qd, -qd->qd_change_sync); } error = 0;out_end_trans: gfs2_trans_end(sdp);out_ipres: if (nalloc) gfs2_inplace_release(ip);out_alloc: if (nalloc) gfs2_alloc_put(ip);out_gunlock: gfs2_glock_dq_uninit(&i_gh);out: while (qx--) gfs2_glock_dq_uninit(&ghs[qx]); kfree(ghs); gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); return error;}static int do_glock(struct gfs2_quota_data *qd, int force_refresh, struct gfs2_holder *q_gh){ struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); struct gfs2_holder i_gh; struct gfs2_quota_host q; char buf[sizeof(struct gfs2_quota)]; struct file_ra_state ra_state; int error; struct gfs2_quota_lvb *qlvb; file_ra_state_init(&ra_state, sdp->sd_quota_inode->i_mapping);restart: error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); if (error) return error; qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { loff_t pos; gfs2_glock_dq_uninit(q_gh); error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, q_gh); if (error) return error; error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); if (error) goto fail; memset(buf, 0, sizeof(struct gfs2_quota)); pos = qd2offset(qd); error = gfs2_internal_read(ip, &ra_state, buf, &pos, sizeof(struct gfs2_quota)); if (error < 0) goto fail_gunlock; gfs2_glock_dq_uninit(&i_gh); gfs2_quota_in(&q, buf); qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); qlvb->__pad = 0; qlvb->qb_limit = cpu_to_be64(q.qu_limit); qlvb->qb_warn = cpu_to_be64(q.qu_warn); qlvb->qb_value = cpu_to_be64(q.qu_value); qd->qd_qb = *qlvb; if (gfs2_glock_is_blocking(qd->qd_gl)) { gfs2_glock_dq_uninit(q_gh); force_refresh = 0; goto restart; } } return 0;fail_gunlock: gfs2_glock_dq_uninit(&i_gh);fail: gfs2_glock_dq_uninit(q_gh); return error;}int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid){ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_alloc *al = &ip->i_alloc; unsigned int x; int error = 0; gfs2_quota_hold(ip, uid, gid); if (capable(CAP_SYS_RESOURCE) || sdp->sd_args.ar_quota != GFS2_QUOTA_ON) return 0; sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *), sort_qd, NULL); for (x = 0; x < al->al_qd_num; x++) { error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]); if (error) break; } if (!error) set_bit(GIF_QD_LOCKED, &ip->i_flags); else { while (x--) gfs2_glock_dq_uninit(&al->al_qd_ghs[x]); gfs2_quota_unhold(ip); } return error;}static int need_sync(struct gfs2_quota_data *qd){ struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; struct gfs2_tune *gt = &sdp->sd_tune; s64 value; unsigned int num, den; int do_sync = 1; if (!qd->qd_qb.qb_limit) return 0; spin_lock(&sdp->sd_quota_spin); value = qd->qd_change; spin_unlock(&sdp->sd_quota_spin); spin_lock(>->gt_spin); num = gt->gt_quota_scale_num; den = gt->gt_quota_scale_den; spin_unlock(>->gt_spin); if (value < 0) do_sync = 0; else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >= (s64)be64_to_cpu(qd->qd_qb.qb_limit)) do_sync = 0; else { value *= gfs2_jindex_size(sdp) * num; do_div(value, den); value += (s64)be64_to_cpu(qd->qd_qb.qb_value); if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit)) do_sync = 0; } return do_sync;}void gfs2_quota_unlock(struct gfs2_inode *ip){ struct gfs2_alloc *al = &ip->i_alloc; struct gfs2_quota_data *qda[4]; unsigned int count = 0; unsigned int x; if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) goto out; for (x = 0; x < al->al_qd_num; x++) { struct gfs2_quota_data *qd; int sync; qd = al->al_qd[x]; sync = need_sync(qd); gfs2_glock_dq_uninit(&al->al_qd_ghs[x]); if (sync && qd_trylock(qd)) qda[count++] = qd; } if (count) { do_sync(count, qda); for (x = 0; x < count; x++) qd_unlock(qda[x]); }out: gfs2_quota_unhold(ip);}#define MAX_LINE 256static int print_message(struct gfs2_quota_data *qd, char *type){ struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", sdp->sd_fsname, type, (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", qd->qd_id); return 0;}int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid){ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_alloc *al = &ip->i_alloc; struct gfs2_quota_data *qd; s64 value; unsigned int x; int error = 0; if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) return 0; if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON) return 0; for (x = 0; x < al->al_qd_num; x++) { qd = al->al_qd[x]; if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags)))) continue; value = (s64)be64_to_cpu(qd->qd_qb.qb_value); spin_lock(&sdp->sd_quota_spin); value += qd->qd_change; spin_unlock(&sdp->sd_quota_spin); if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { print_message(qd, "exceeded"); error = -EDQUOT; break; } else if (be64_to_cpu(qd->qd_qb.qb_warn) && (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value && time_after_eq(jiffies, qd->qd_last_warn + gfs2_tune_get(sdp, gt_quota_warn_period) * HZ)) { error = print_message(qd, "warning"); qd->qd_last_warn = jiffies; } } return error;}void gfs2_quota_change(struct gfs2_inode *ip, s64 change, u32 uid, u32 gid){ struct gfs2_alloc *al = &ip->i_alloc; struct gfs2_quota_data *qd; unsigned int x; unsigned int found = 0; if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) return; if (ip->i_di.di_flags & GFS2_DIF_SYSTEM) return; for (x = 0; x < al->al_qd_num; x++) { qd = al->al_qd[x]; if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) || (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) { do_qc(qd, change); found++; } }}int gfs2_quota_sync(struct gfs2_sbd *sdp){ struct gfs2_quota_data **qda; unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); unsigned int num_qd; unsigned int x; int error = 0; sdp->sd_quota_sync_gen++; qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL); if (!qda) return -ENOMEM; do { num_qd = 0; for (;;) { error = qd_fish(sdp, qda + num_qd); if (error || !qda[num_qd]) break; if (++num_qd == max_qd) break; } if (num_qd) { if (!error) error = do_sync(num_qd, qda); if (!error) for (x = 0; x < num_qd; x++) qda[x]->qd_sync_gen = sdp->sd_quota_sync_gen; for (x = 0; x < num_qd; x++) qd_unlock(qda[x]); } } while (!error && num_qd == max_qd); kfree(qda); return error;}int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id){ struct gfs2_quota_data *qd; struct gfs2_holder q_gh; int error; error = qd_get(sdp, user, id, CREATE, &qd); if (error) return error; error = do_glock(qd, FORCE, &q_gh); if (!error) gfs2_glock_dq_uninit(&q_gh); qd_put(qd); return error;}static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf){ const struct gfs2_quota_change *str = buf; qc->qc_change = be64_to_cpu(str->qc_change); qc->qc_flags = be32_to_cpu(str->qc_flags); qc->qc_id = be32_to_cpu(str->qc_id);}int gfs2_quota_init(struct gfs2_sbd *sdp){ struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; unsigned int x, slot = 0; unsigned int found = 0; u64 dblock; u32 extlen = 0; int error; if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) || ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) { gfs2_consist_inode(ip); return -EIO; } sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); error = -ENOMEM; sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks, sizeof(unsigned char *), GFP_KERNEL); if (!sdp->sd_quota_bitmap) return error; for (x = 0; x < sdp->sd_quota_chunks; x++) { sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!sdp->sd_quota_bitmap[x]) goto fail; } for (x = 0; x < blocks; x++) { struct buffer_head *bh; unsigned int y; if (!extlen) { int new = 0; error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen); if (error) goto fail; } error = -EIO; bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); if (!bh) goto fail; if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) { brelse(bh); goto fail; } for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots; y++, slot++) { struct gfs2_quota_change_host qc; struct gfs2_quota_data *qd; gfs2_quota_change_in(&qc, bh->b_data + sizeof(struct gfs2_meta_header) + y * sizeof(struct gfs2_quota_change)); if (!qc.qc_change) continue; error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER), qc.qc_id, &qd); if (error) { brelse(bh); goto fail; } set_bit(QDF_CHANGE, &qd->qd_flags); qd->qd_change = qc.qc_change; qd->qd_slot = slot; qd->qd_slot_count = 1; qd->qd_last_touched = jiffies; spin_lock(&sdp->sd_quota_spin); gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1); list_add(&qd->qd_list, &sdp->sd_quota_list); atomic_inc(&sdp->sd_quota_count); spin_unlock(&sdp->sd_quota_spin); found++; } brelse(bh); dblock++; extlen--; } if (found) fs_info(sdp, "found %u quota changes\n", found); return 0;fail: gfs2_quota_cleanup(sdp); return error;}void gfs2_quota_scan(struct gfs2_sbd *sdp){ struct gfs2_quota_data *qd, *safe; LIST_HEAD(dead); spin_lock(&sdp->sd_quota_spin); list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) { if (!qd->qd_count && time_after_eq(jiffies, qd->qd_last_touched + gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) { list_move(&qd->qd_list, &dead); gfs2_assert_warn(sdp, atomic_read(&sdp->sd_quota_count) > 0); atomic_dec(&sdp->sd_quota_count); } } spin_unlock(&sdp->sd_quota_spin); while (!list_empty(&dead)) { qd = list_entry(dead.next, struct gfs2_quota_data, qd_list); list_del(&qd->qd_list); gfs2_assert_warn(sdp, !qd->qd_change); gfs2_assert_warn(sdp, !qd->qd_slot_count); gfs2_assert_warn(sdp, !qd->qd_bh_count); gfs2_lvb_unhold(qd->qd_gl); kfree(qd); }}void gfs2_quota_cleanup(struct gfs2_sbd *sdp){ struct list_head *head = &sdp->sd_quota_list; struct gfs2_quota_data *qd; unsigned int x; spin_lock(&sdp->sd_quota_spin); while (!list_empty(head)) { qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); if (qd->qd_count > 1 || (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) { list_move(&qd->qd_list, head); spin_unlock(&sdp->sd_quota_spin); schedule(); spin_lock(&sdp->sd_quota_spin); continue; } list_del(&qd->qd_list); atomic_dec(&sdp->sd_quota_count); spin_unlock(&sdp->sd_quota_spin); if (!qd->qd_count) { gfs2_assert_warn(sdp, !qd->qd_change); gfs2_assert_warn(sdp, !qd->qd_slot_count); } else gfs2_assert_warn(sdp, qd->qd_slot_count == 1); gfs2_assert_warn(sdp, !qd->qd_bh_count); gfs2_lvb_unhold(qd->qd_gl); kfree(qd); spin_lock(&sdp->sd_quota_spin); } spin_unlock(&sdp->sd_quota_spin); gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count)); if (sdp->sd_quota_bitmap) { for (x = 0; x < sdp->sd_quota_chunks; x++) kfree(sdp->sd_quota_bitmap[x]); kfree(sdp->sd_quota_bitmap); }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -