⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vmstat.c

📁 最新最稳定的Linux内存管理模块源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* Print out the free pages at each order for each migatetype */static int pagetypeinfo_showfree(struct seq_file *m, void *arg){	int order;	pg_data_t *pgdat = (pg_data_t *)arg;	/* Print header */	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");	for (order = 0; order < MAX_ORDER; ++order)		seq_printf(m, "%6d ", order);	seq_putc(m, '\n');	walk_zones_in_node(m, pgdat, pagetypeinfo_showfree_print);	return 0;}static void pagetypeinfo_showblockcount_print(struct seq_file *m,					pg_data_t *pgdat, struct zone *zone){	int mtype;	unsigned long pfn;	unsigned long start_pfn = zone->zone_start_pfn;	unsigned long end_pfn = start_pfn + zone->spanned_pages;	unsigned long count[MIGRATE_TYPES] = { 0, };	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {		struct page *page;		if (!pfn_valid(pfn))			continue;		page = pfn_to_page(pfn);#ifdef CONFIG_ARCH_FLATMEM_HAS_HOLES		/*		 * Ordinarily, memory holes in flatmem still have a valid		 * memmap for the PFN range. However, an architecture for		 * embedded systems (e.g. ARM) can free up the memmap backing		 * holes to save memory on the assumption the memmap is		 * never used. The page_zone linkages are then broken even		 * though pfn_valid() returns true. Skip the page if the		 * linkages are broken. Even if this test passed, the impact		 * is that the counters for the movable type are off but		 * fragmentation monitoring is likely meaningless on small		 * systems.		 */		if (page_zone(page) != zone)			continue;#endif		mtype = get_pageblock_migratetype(page);		if (mtype < MIGRATE_TYPES)			count[mtype]++;	}	/* Print counts */	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)		seq_printf(m, "%12lu ", count[mtype]);	seq_putc(m, '\n');}/* Print out the free pages at each order for each migratetype */static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg){	int mtype;	pg_data_t *pgdat = (pg_data_t *)arg;	seq_printf(m, "\n%-23s", "Number of blocks type ");	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)		seq_printf(m, "%12s ", migratetype_names[mtype]);	seq_putc(m, '\n');	walk_zones_in_node(m, pgdat, pagetypeinfo_showblockcount_print);	return 0;}/* * This prints out statistics in relation to grouping pages by mobility. * It is expensive to collect so do not constantly read the file. */static int pagetypeinfo_show(struct seq_file *m, void *arg){	pg_data_t *pgdat = (pg_data_t *)arg;	/* check memoryless node */	if (!node_state(pgdat->node_id, N_HIGH_MEMORY))		return 0;	seq_printf(m, "Page block order: %d\n", pageblock_order);	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);	seq_putc(m, '\n');	pagetypeinfo_showfree(m, pgdat);	pagetypeinfo_showblockcount(m, pgdat);	return 0;}static const struct seq_operations fragmentation_op = {	.start	= frag_start,	.next	= frag_next,	.stop	= frag_stop,	.show	= frag_show,};static int fragmentation_open(struct inode *inode, struct file *file){	return seq_open(file, &fragmentation_op);}static const struct file_operations fragmentation_file_operations = {	.open		= fragmentation_open,	.read		= seq_read,	.llseek		= seq_lseek,	.release	= seq_release,};static const struct seq_operations pagetypeinfo_op = {	.start	= frag_start,	.next	= frag_next,	.stop	= frag_stop,	.show	= pagetypeinfo_show,};static int pagetypeinfo_open(struct inode *inode, struct file *file){	return seq_open(file, &pagetypeinfo_op);}static const struct file_operations pagetypeinfo_file_ops = {	.open		= pagetypeinfo_open,	.read		= seq_read,	.llseek		= seq_lseek,	.release	= seq_release,};#ifdef CONFIG_ZONE_DMA#define TEXT_FOR_DMA(xx) xx "_dma",#else#define TEXT_FOR_DMA(xx)#endif#ifdef CONFIG_ZONE_DMA32#define TEXT_FOR_DMA32(xx) xx "_dma32",#else#define TEXT_FOR_DMA32(xx)#endif#ifdef CONFIG_HIGHMEM#define TEXT_FOR_HIGHMEM(xx) xx "_high",#else#define TEXT_FOR_HIGHMEM(xx)#endif#define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \					TEXT_FOR_HIGHMEM(xx) xx "_movable",static const char * const vmstat_text[] = {	/* Zoned VM counters */	"nr_free_pages",	"nr_inactive_anon",	"nr_active_anon",	"nr_inactive_file",	"nr_active_file",#ifdef CONFIG_UNEVICTABLE_LRU	"nr_unevictable",	"nr_mlock",#endif	"nr_anon_pages",	"nr_mapped",	"nr_file_pages",	"nr_dirty",	"nr_writeback",	"nr_slab_reclaimable",	"nr_slab_unreclaimable",	"nr_page_table_pages",	"nr_unstable",	"nr_bounce",	"nr_vmscan_write",	"nr_writeback_temp",#ifdef CONFIG_NUMA	"numa_hit",	"numa_miss",	"numa_foreign",	"numa_interleave",	"numa_local",	"numa_other",#endif#ifdef CONFIG_VM_EVENT_COUNTERS	"pgpgin",	"pgpgout",	"pswpin",	"pswpout",	TEXTS_FOR_ZONES("pgalloc")	"pgfree",	"pgactivate",	"pgdeactivate",	"pgfault",	"pgmajfault",	TEXTS_FOR_ZONES("pgrefill")	TEXTS_FOR_ZONES("pgsteal")	TEXTS_FOR_ZONES("pgscan_kswapd")	TEXTS_FOR_ZONES("pgscan_direct")	"pginodesteal",	"slabs_scanned",	"kswapd_steal",	"kswapd_inodesteal",	"pageoutrun",	"allocstall",	"pgrotated",#ifdef CONFIG_HUGETLB_PAGE	"htlb_buddy_alloc_success",	"htlb_buddy_alloc_fail",#endif#ifdef CONFIG_UNEVICTABLE_LRU	"unevictable_pgs_culled",	"unevictable_pgs_scanned",	"unevictable_pgs_rescued",	"unevictable_pgs_mlocked",	"unevictable_pgs_munlocked",	"unevictable_pgs_cleared",	"unevictable_pgs_stranded",	"unevictable_pgs_mlockfreed",#endif#endif};static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,							struct zone *zone){	int i;	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);	seq_printf(m,		   "\n  pages free     %lu"		   "\n        min      %lu"		   "\n        low      %lu"		   "\n        high     %lu"		   "\n        scanned  %lu (aa: %lu ia: %lu af: %lu if: %lu)"		   "\n        spanned  %lu"		   "\n        present  %lu",		   zone_page_state(zone, NR_FREE_PAGES),		   zone->pages_min,		   zone->pages_low,		   zone->pages_high,		   zone->pages_scanned,		   zone->lru[LRU_ACTIVE_ANON].nr_scan,		   zone->lru[LRU_INACTIVE_ANON].nr_scan,		   zone->lru[LRU_ACTIVE_FILE].nr_scan,		   zone->lru[LRU_INACTIVE_FILE].nr_scan,		   zone->spanned_pages,		   zone->present_pages);	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)		seq_printf(m, "\n    %-12s %lu", vmstat_text[i],				zone_page_state(zone, i));	seq_printf(m,		   "\n        protection: (%lu",		   zone->lowmem_reserve[0]);	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)		seq_printf(m, ", %lu", zone->lowmem_reserve[i]);	seq_printf(m,		   ")"		   "\n  pagesets");	for_each_online_cpu(i) {		struct per_cpu_pageset *pageset;		pageset = zone_pcp(zone, i);		seq_printf(m,			   "\n    cpu: %i"			   "\n              count: %i"			   "\n              high:  %i"			   "\n              batch: %i",			   i,			   pageset->pcp.count,			   pageset->pcp.high,			   pageset->pcp.batch);#ifdef CONFIG_SMP		seq_printf(m, "\n  vm stats threshold: %d",				pageset->stat_threshold);#endif	}	seq_printf(m,		   "\n  all_unreclaimable: %u"		   "\n  prev_priority:     %i"		   "\n  start_pfn:         %lu"		   "\n  inactive_ratio:    %u",			   zone_is_all_unreclaimable(zone),		   zone->prev_priority,		   zone->zone_start_pfn,		   zone->inactive_ratio);	seq_putc(m, '\n');}/* * Output information about zones in @pgdat. */static int zoneinfo_show(struct seq_file *m, void *arg){	pg_data_t *pgdat = (pg_data_t *)arg;	walk_zones_in_node(m, pgdat, zoneinfo_show_print);	return 0;}static const struct seq_operations zoneinfo_op = {	.start	= frag_start, /* iterate over all zones. The same as in			       * fragmentation. */	.next	= frag_next,	.stop	= frag_stop,	.show	= zoneinfo_show,};static int zoneinfo_open(struct inode *inode, struct file *file){	return seq_open(file, &zoneinfo_op);}static const struct file_operations proc_zoneinfo_file_operations = {	.open		= zoneinfo_open,	.read		= seq_read,	.llseek		= seq_lseek,	.release	= seq_release,};static void *vmstat_start(struct seq_file *m, loff_t *pos){	unsigned long *v;#ifdef CONFIG_VM_EVENT_COUNTERS	unsigned long *e;#endif	int i;	if (*pos >= ARRAY_SIZE(vmstat_text))		return NULL;#ifdef CONFIG_VM_EVENT_COUNTERS	v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long)			+ sizeof(struct vm_event_state), GFP_KERNEL);#else	v = kmalloc(NR_VM_ZONE_STAT_ITEMS * sizeof(unsigned long),			GFP_KERNEL);#endif	m->private = v;	if (!v)		return ERR_PTR(-ENOMEM);	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)		v[i] = global_page_state(i);#ifdef CONFIG_VM_EVENT_COUNTERS	e = v + NR_VM_ZONE_STAT_ITEMS;	all_vm_events(e);	e[PGPGIN] /= 2;		/* sectors -> kbytes */	e[PGPGOUT] /= 2;#endif	return v + *pos;}static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos){	(*pos)++;	if (*pos >= ARRAY_SIZE(vmstat_text))		return NULL;	return (unsigned long *)m->private + *pos;}static int vmstat_show(struct seq_file *m, void *arg){	unsigned long *l = arg;	unsigned long off = l - (unsigned long *)m->private;	seq_printf(m, "%s %lu\n", vmstat_text[off], *l);	return 0;}static void vmstat_stop(struct seq_file *m, void *arg){	kfree(m->private);	m->private = NULL;}static const struct seq_operations vmstat_op = {	.start	= vmstat_start,	.next	= vmstat_next,	.stop	= vmstat_stop,	.show	= vmstat_show,};static int vmstat_open(struct inode *inode, struct file *file){	return seq_open(file, &vmstat_op);}static const struct file_operations proc_vmstat_file_operations = {	.open		= vmstat_open,	.read		= seq_read,	.llseek		= seq_lseek,	.release	= seq_release,};#endif /* CONFIG_PROC_FS */#ifdef CONFIG_SMPstatic DEFINE_PER_CPU(struct delayed_work, vmstat_work);int sysctl_stat_interval __read_mostly = HZ;static void vmstat_update(struct work_struct *w){	refresh_cpu_vm_stats(smp_processor_id());	schedule_delayed_work(&__get_cpu_var(vmstat_work),		sysctl_stat_interval);}static void __cpuinit start_cpu_timer(int cpu){	struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);	INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update);	schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);}/* * Use the cpu notifier to insure that the thresholds are recalculated * when necessary. */static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,		unsigned long action,		void *hcpu){	long cpu = (long)hcpu;	switch (action) {	case CPU_ONLINE:	case CPU_ONLINE_FROZEN:		start_cpu_timer(cpu);		break;	case CPU_DOWN_PREPARE:	case CPU_DOWN_PREPARE_FROZEN:		cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));		per_cpu(vmstat_work, cpu).work.func = NULL;		break;	case CPU_DOWN_FAILED:	case CPU_DOWN_FAILED_FROZEN:		start_cpu_timer(cpu);		break;	case CPU_DEAD:	case CPU_DEAD_FROZEN:		refresh_zone_stat_thresholds();		break;	default:		break;	}	return NOTIFY_OK;}static struct notifier_block __cpuinitdata vmstat_notifier =	{ &vmstat_cpuup_callback, NULL, 0 };#endifstatic int __init setup_vmstat(void){#ifdef CONFIG_SMP	int cpu;	refresh_zone_stat_thresholds();	register_cpu_notifier(&vmstat_notifier);	for_each_online_cpu(cpu)		start_cpu_timer(cpu);#endif#ifdef CONFIG_PROC_FS	proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);	proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);	proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);	proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);#endif	return 0;}module_init(setup_vmstat)

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -