📄 avc.c
字号:
/* * Implementation of the kernel access vector cache (AVC). * * Authors: Stephen Smalley, <sds@epoch.ncsc.mil> * James Morris <jmorris@redhat.com> * * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com> * Replaced the avc_lock spinlock by RCU. * * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, * as published by the Free Software Foundation. */ /* Ported to Xen 3.0, George Coker, <gscoker@alpha.ncsc.mil> */ #include <xen/lib.h>#include <xen/xmalloc.h>#include <xen/types.h>#include <xen/list.h>#include <xen/spinlock.h>#include <xen/prefetch.h>#include <xen/kernel.h>#include <xen/sched.h>#include <xen/init.h>#include <xen/rcupdate.h>#include <asm/atomic.h>#include <asm/current.h>#include "avc.h"#include "avc_ss.h"static const struct av_perm_to_string{ u16 tclass; u32 value; const char *name;} av_perm_to_string[] = {#define S_(c, v, s) { c, v, s },#include "av_perm_to_string.h"#undef S_};static const char *class_to_string[] = {#define S_(s) s,#include "class_to_string.h"#undef S_};#define TB_(s) static const char * s [] = {#define TE_(s) };#define S_(s) s,#include "common_perm_to_string.h"#undef TB_#undef TE_#undef S_static const struct av_inherit{ u16 tclass; const char **common_pts; u32 common_base;} av_inherit[] = {#define S_(c, i, b) { c, common_##i##_perm_to_string, b },#include "av_inherit.h"#undef S_};#define AVC_CACHE_SLOTS 512#define AVC_DEF_CACHE_THRESHOLD 512#define AVC_CACHE_RECLAIM 16#ifdef FLASK_AVC_STATS#define avc_cache_stats_incr(field) \do { \ __get_cpu_var(avc_cache_stats).field++; \} while (0)#else#define avc_cache_stats_incr(field) do {} while (0)#endifstruct avc_entry { u32 ssid; u32 tsid; u16 tclass; struct av_decision avd; atomic_t used; /* used recently */};struct avc_node { struct avc_entry ae; struct list_head list; struct rcu_head rhead;};struct avc_cache { struct list_head slots[AVC_CACHE_SLOTS]; spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ atomic_t lru_hint; /* LRU hint for reclaim scan */ atomic_t active_nodes; u32 latest_notif; /* latest revocation notification */};struct avc_callback_node { int (*callback) (u32 event, u32 ssid, u32 tsid, u16 tclass, u32 perms, u32 *out_retained); u32 events; u32 ssid; u32 tsid; u16 tclass; u32 perms; struct avc_callback_node *next;};/* Exported via Flask hypercall */unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;#ifdef FLASK_AVC_STATSDEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 };#endifstatic struct avc_cache avc_cache;static struct avc_callback_node *avc_callbacks;static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass){ return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);}/** * avc_dump_av - Display an access vector in human-readable form. * @tclass: target security class * @av: access vector */static void avc_dump_av(u16 tclass, u32 av){ const char **common_pts = NULL; u32 common_base = 0; int i, i2, perm; if ( av == 0 ) { printk(" null"); return; } for ( i = 0; i < ARRAY_SIZE(av_inherit); i++ ) { if (av_inherit[i].tclass == tclass) { common_pts = av_inherit[i].common_pts; common_base = av_inherit[i].common_base; break; } } printk(" {"); i = 0; perm = 1; while ( perm < common_base ) { if (perm & av) { printk(" %s", common_pts[i]); av &= ~perm; } i++; perm <<= 1; } while ( i < sizeof(av) * 8 ) { if ( perm & av ) { for ( i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++ ) { if ( (av_perm_to_string[i2].tclass == tclass) && (av_perm_to_string[i2].value == perm) ) break; } if ( i2 < ARRAY_SIZE(av_perm_to_string) ) { printk(" %s", av_perm_to_string[i2].name); av &= ~perm; } } i++; perm <<= 1; } if ( av ) printk(" 0x%x", av); printk(" }");}/** * avc_dump_query - Display a SID pair and a class in human-readable form. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class */static void avc_dump_query(u32 ssid, u32 tsid, u16 tclass){ int rc; char *scontext; u32 scontext_len; rc = security_sid_to_context(ssid, &scontext, &scontext_len); if ( rc ) printk("ssid=%d", ssid); else { printk("scontext=%s", scontext); xfree(scontext); } rc = security_sid_to_context(tsid, &scontext, &scontext_len); if ( rc ) printk(" tsid=%d", tsid); else { printk(" tcontext=%s", scontext); xfree(scontext); } printk("\n"); printk("tclass=%s", class_to_string[tclass]);}/** * avc_init - Initialize the AVC. * * Initialize the access vector cache. */void __init avc_init(void){ int i; for ( i = 0; i < AVC_CACHE_SLOTS; i++ ) { INIT_LIST_HEAD(&avc_cache.slots[i]); spin_lock_init(&avc_cache.slots_lock[i]); } atomic_set(&avc_cache.active_nodes, 0); atomic_set(&avc_cache.lru_hint, 0); printk("AVC INITIALIZED\n");}int avc_get_hash_stats(char *page){ int i, chain_len, max_chain_len, slots_used; struct avc_node *node; rcu_read_lock(); slots_used = 0; max_chain_len = 0; for ( i = 0; i < AVC_CACHE_SLOTS; i++ ) { if ( !list_empty(&avc_cache.slots[i]) ) { slots_used++; chain_len = 0; list_for_each_entry_rcu(node, &avc_cache.slots[i], list) chain_len++; if ( chain_len > max_chain_len ) max_chain_len = chain_len; } } rcu_read_unlock(); return snprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" "longest chain: %d\n", atomic_read(&avc_cache.active_nodes), slots_used, AVC_CACHE_SLOTS, max_chain_len);}static void avc_node_free(struct rcu_head *rhead){ struct avc_node *node = container_of(rhead, struct avc_node, rhead); xfree(node); avc_cache_stats_incr(frees);}static void avc_node_delete(struct avc_node *node){ list_del_rcu(&node->list); call_rcu(&node->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes);}static void avc_node_kill(struct avc_node *node){ xfree(node); avc_cache_stats_incr(frees); atomic_dec(&avc_cache.active_nodes);}static void avc_node_replace(struct avc_node *new, struct avc_node *old){ list_replace_rcu(&old->list, &new->list); call_rcu(&old->rhead, avc_node_free); atomic_dec(&avc_cache.active_nodes);}static inline int avc_reclaim_node(void){ struct avc_node *node; int hvalue, try, ecx; unsigned long flags; for ( try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ ) { atomic_inc(&avc_cache.lru_hint); hvalue = atomic_read(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flags); list_for_each_entry(node, &avc_cache.slots[hvalue], list) { if ( atomic_dec_and_test(&node->ae.used) ) { /* Recently Unused */ avc_node_delete(node); avc_cache_stats_incr(reclaims); ecx++; if ( ecx >= AVC_CACHE_RECLAIM ) { spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); goto out; } } } spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); } out: return ecx;}static struct avc_node *avc_alloc_node(void){ struct avc_node *node; node = xmalloc(struct avc_node); if (!node) goto out; memset(node, 0, sizeof(*node)); INIT_RCU_HEAD(&node->rhead); INIT_LIST_HEAD(&node->list); atomic_set(&node->ae.used, 1); avc_cache_stats_incr(allocations); atomic_inc(&avc_cache.active_nodes); if ( atomic_read(&avc_cache.active_nodes) > avc_cache_threshold ) avc_reclaim_node();out: return node;}static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae){ node->ae.ssid = ssid; node->ae.tsid = tsid; node->ae.tclass = tclass; memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd));}static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass){ struct avc_node *node, *ret = NULL; int hvalue; hvalue = avc_hash(ssid, tsid, tclass); list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) { if ( ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid ) { ret = node; break; } } if ( ret == NULL ) { /* cache miss */ goto out; } /* cache hit */ if ( atomic_read(&ret->ae.used) != 1 ) atomic_set(&ret->ae.used, 1);out: return ret;}/** * avc_lookup - Look up an AVC entry. * @ssid: source security identifier * @tsid: target security identifier * @tclass: target security class * @requested: requested permissions, interpreted based on @tclass
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -