📄 mmzone.h
字号:
int nid;};#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */#ifndef CONFIG_DISCONTIGMEM/* The array of struct pages - for discontigmem use pgdat->lmem_map */extern struct page *mem_map;#endif/* * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM * (mostly NUMA machines?) to denote a higher-level memory zone than the * zone denotes. * * On NUMA machines, each NUMA node would have a pg_data_t to describe * it's memory layout. * * Memory statistics and page replacement data structures are maintained on a * per-zone basis. */struct bootmem_data;typedef struct pglist_data { struct zone node_zones[MAX_NR_ZONES]; struct zonelist node_zonelists[MAX_NR_ZONES]; int nr_zones;#ifdef CONFIG_FLAT_NODE_MEM_MAP struct page *node_mem_map;#endif struct bootmem_data *bdata;#ifdef CONFIG_MEMORY_HOTPLUG /* * Must be held any time you expect node_start_pfn, node_present_pages * or node_spanned_pages stay constant. Holding this will also * guarantee that any pfn_valid() stays that way. * * Nests above zone->lock and zone->size_seqlock. */ spinlock_t node_size_lock;#endif unsigned long node_start_pfn; unsigned long node_present_pages; /* total number of physical pages */ unsigned long node_spanned_pages; /* total size of physical page range, including holes */ int node_id; wait_queue_head_t kswapd_wait; struct task_struct *kswapd; int kswapd_max_order;} pg_data_t;#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)#define node_spanned_pages(nid) (NODE_DATA(nid)->node_spanned_pages)#ifdef CONFIG_FLAT_NODE_MEM_MAP#define pgdat_page_nr(pgdat, pagenr) ((pgdat)->node_mem_map + (pagenr))#else#define pgdat_page_nr(pgdat, pagenr) pfn_to_page((pgdat)->node_start_pfn + (pagenr))#endif#define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr))#include <linux/memory_hotplug.h>void __get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free, struct pglist_data *pgdat);void get_zone_counts(unsigned long *active, unsigned long *inactive, unsigned long *free);void build_all_zonelists(void);void wakeup_kswapd(struct zone *zone, int order);int zone_watermark_ok(struct zone *z, int order, unsigned long mark, int classzone_idx, int alloc_flags);enum memmap_context { MEMMAP_EARLY, MEMMAP_HOTPLUG,};extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size, enum memmap_context context);#ifdef CONFIG_HAVE_MEMORY_PRESENTvoid memory_present(int nid, unsigned long start, unsigned long end);#elsestatic inline void memory_present(int nid, unsigned long start, unsigned long end) {}#endif#ifdef CONFIG_NEED_NODE_MEMMAP_SIZEunsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);#endif/* * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc. */#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)static inline int populated_zone(struct zone *zone){ return (!!zone->present_pages);}static inline int is_highmem_idx(enum zone_type idx){#ifdef CONFIG_HIGHMEM return (idx == ZONE_HIGHMEM);#else return 0;#endif}static inline int is_normal_idx(enum zone_type idx){ return (idx == ZONE_NORMAL);}/** * is_highmem - helper function to quickly check if a struct zone is a * highmem zone or not. This is an attempt to keep references * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. * @zone - pointer to struct zone variable */static inline int is_highmem(struct zone *zone){#ifdef CONFIG_HIGHMEM return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;#else return 0;#endif}static inline int is_normal(struct zone *zone){ return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;}static inline int is_dma32(struct zone *zone){#ifdef CONFIG_ZONE_DMA32 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;#else return 0;#endif}static inline int is_dma(struct zone *zone){ return zone == zone->zone_pgdat->node_zones + ZONE_DMA;}/* These two functions are used to setup the per zone pages min values */struct ctl_table;struct file;int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);#include <linux/topology.h>/* Returns the number of the current Node. */#ifndef numa_node_id#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))#endif#ifndef CONFIG_NEED_MULTIPLE_NODESextern struct pglist_data contig_page_data;#define NODE_DATA(nid) (&contig_page_data)#define NODE_MEM_MAP(nid) mem_map#define MAX_NODES_SHIFT 1#else /* CONFIG_NEED_MULTIPLE_NODES */#include <asm/mmzone.h>#endif /* !CONFIG_NEED_MULTIPLE_NODES */extern struct pglist_data *first_online_pgdat(void);extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);extern struct zone *next_zone(struct zone *zone);/** * for_each_pgdat - helper macro to iterate over all nodes * @pgdat - pointer to a pg_data_t variable */#define for_each_online_pgdat(pgdat) \ for (pgdat = first_online_pgdat(); \ pgdat; \ pgdat = next_online_pgdat(pgdat))/** * for_each_zone - helper macro to iterate over all memory zones * @zone - pointer to struct zone variable * * The user only needs to declare the zone variable, for_each_zone * fills it in. */#define for_each_zone(zone) \ for (zone = (first_online_pgdat())->node_zones; \ zone; \ zone = next_zone(zone))#ifdef CONFIG_SPARSEMEM#include <asm/sparsemem.h>#endif#if BITS_PER_LONG == 32/* * with 32 bit page->flags field, we reserve 9 bits for node/zone info. * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes. */#define FLAGS_RESERVED 9#elif BITS_PER_LONG == 64/* * with 64 bit flags field, there's plenty of room. */#define FLAGS_RESERVED 32#else#error BITS_PER_LONG not defined#endif#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \ !defined(CONFIG_ARCH_POPULATES_NODE_MAP)#define early_pfn_to_nid(nid) (0UL)#endif#ifdef CONFIG_FLATMEM#define pfn_to_nid(pfn) (0)#endif#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)#ifdef CONFIG_SPARSEMEM/* * SECTION_SHIFT #bits space required to store a section # * * PA_SECTION_SHIFT physical address to/from section number * PFN_SECTION_SHIFT pfn to/from section number */#define SECTIONS_SHIFT (MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)#define PA_SECTION_SHIFT (SECTION_SIZE_BITS)#define PFN_SECTION_SHIFT (SECTION_SIZE_BITS - PAGE_SHIFT)#define NR_MEM_SECTIONS (1UL << SECTIONS_SHIFT)#define PAGES_PER_SECTION (1UL << PFN_SECTION_SHIFT)#define PAGE_SECTION_MASK (~(PAGES_PER_SECTION-1))#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS#error Allocator MAX_ORDER exceeds SECTION_SIZE#endifstruct page;struct mem_section { /* * This is, logically, a pointer to an array of struct * pages. However, it is stored with some other magic. * (see sparse.c::sparse_init_one_section()) * * Additionally during early boot we encode node id of * the location of the section here to guide allocation. * (see sparse.c::memory_present()) * * Making it a UL at least makes someone do a cast * before using it wrong. */ unsigned long section_mem_map;};#ifdef CONFIG_SPARSEMEM_EXTREME#define SECTIONS_PER_ROOT (PAGE_SIZE / sizeof (struct mem_section))#else#define SECTIONS_PER_ROOT 1#endif#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT)#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)#ifdef CONFIG_SPARSEMEM_EXTREMEextern struct mem_section *mem_section[NR_SECTION_ROOTS];#elseextern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];#endifstatic inline struct mem_section *__nr_to_section(unsigned long nr){ if (!mem_section[SECTION_NR_TO_ROOT(nr)]) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];}extern int __section_nr(struct mem_section* ms);/* * We use the lower bits of the mem_map pointer to store * a little bit of information. There should be at least * 3 bits here due to 32-bit alignment. */#define SECTION_MARKED_PRESENT (1UL<<0)#define SECTION_HAS_MEM_MAP (1UL<<1)#define SECTION_MAP_LAST_BIT (1UL<<2)#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))#define SECTION_NID_SHIFT 2static inline struct page *__section_mem_map_addr(struct mem_section *section){ unsigned long map = section->section_mem_map; map &= SECTION_MAP_MASK; return (struct page *)map;}static inline int valid_section(struct mem_section *section){ return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));}static inline int section_has_mem_map(struct mem_section *section){ return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));}static inline int valid_section_nr(unsigned long nr){ return valid_section(__nr_to_section(nr));}static inline struct mem_section *__pfn_to_section(unsigned long pfn){ return __nr_to_section(pfn_to_section_nr(pfn));}static inline int pfn_valid(unsigned long pfn){ if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) return 0; return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));}/* * These are _only_ used during initialisation, therefore they * can use __initdata ... They could have names to indicate * this restriction. */#ifdef CONFIG_NUMA#define pfn_to_nid(pfn) \({ \ unsigned long __pfn_to_nid_pfn = (pfn); \ page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \})#else#define pfn_to_nid(pfn) (0)#endif#define early_pfn_valid(pfn) pfn_valid(pfn)void sparse_init(void);#else#define sparse_init() do {} while (0)#define sparse_index_init(_sec, _nid) do {} while (0)#endif /* CONFIG_SPARSEMEM */#ifdef CONFIG_NODES_SPAN_OTHER_NODES#define early_pfn_in_nid(pfn, nid) (early_pfn_to_nid(pfn) == (nid))#else#define early_pfn_in_nid(pfn, nid) (1)#endif#ifndef early_pfn_valid#define early_pfn_valid(pfn) (1)#endifvoid memory_present(int nid, unsigned long start, unsigned long end);unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);#endif /* !__ASSEMBLY__ */#endif /* __KERNEL__ */#endif /* _LINUX_MMZONE_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -