source: trunk/packages/xen-3.1/xen-3.1/linux-2.6-xen-sparse/include/linux/gfp.h @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 5.9 KB
Line 
1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7
8struct vm_area_struct;
9
10/*
11 * GFP bitmasks..
12 */
13/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low three bits) */
14#define __GFP_DMA       ((__force gfp_t)0x01u)
15#define __GFP_HIGHMEM   ((__force gfp_t)0x02u)
16#ifdef CONFIG_DMA_IS_DMA32
17#define __GFP_DMA32     ((__force gfp_t)0x01)   /* ZONE_DMA is ZONE_DMA32 */
18#elif BITS_PER_LONG < 64
19#define __GFP_DMA32     ((__force gfp_t)0x00)   /* ZONE_NORMAL is ZONE_DMA32 */
20#else
21#define __GFP_DMA32     ((__force gfp_t)0x04)   /* Has own ZONE_DMA32 */
22#endif
23
24/*
25 * Action modifiers - doesn't change the zoning
26 *
27 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
28 * _might_ fail.  This depends upon the particular VM implementation.
29 *
30 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
31 * cannot handle allocation failures.
32 *
33 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
34 */
35#define __GFP_WAIT      ((__force gfp_t)0x10u)  /* Can wait and reschedule? */
36#define __GFP_HIGH      ((__force gfp_t)0x20u)  /* Should access emergency pools? */
37#define __GFP_IO        ((__force gfp_t)0x40u)  /* Can start physical IO? */
38#define __GFP_FS        ((__force gfp_t)0x80u)  /* Can call down to low-level FS? */
39#define __GFP_COLD      ((__force gfp_t)0x100u) /* Cache-cold page required */
40#define __GFP_NOWARN    ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
41#define __GFP_REPEAT    ((__force gfp_t)0x400u) /* Retry the allocation.  Might fail */
42#define __GFP_NOFAIL    ((__force gfp_t)0x800u) /* Retry for ever.  Cannot fail */
43#define __GFP_NORETRY   ((__force gfp_t)0x1000u)/* Do not retry.  Might fail */
44#define __GFP_NO_GROW   ((__force gfp_t)0x2000u)/* Slab internal usage */
45#define __GFP_COMP      ((__force gfp_t)0x4000u)/* Add compound page metadata */
46#define __GFP_ZERO      ((__force gfp_t)0x8000u)/* Return zeroed page on success */
47#define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
48#define __GFP_HARDWALL   ((__force gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
49
50#define __GFP_BITS_SHIFT 20     /* Room for 20 __GFP_FOO bits */
51#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
52
53/* if you forget to add the bitmask here kernel will crash, period */
54#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
55                        __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
56                        __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
57                        __GFP_NOMEMALLOC|__GFP_HARDWALL)
58
59/* This equals 0, but use constants in case they ever change */
60#define GFP_NOWAIT      (GFP_ATOMIC & ~__GFP_HIGH)
61/* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
62#define GFP_ATOMIC      (__GFP_HIGH)
63#define GFP_NOIO        (__GFP_WAIT)
64#define GFP_NOFS        (__GFP_WAIT | __GFP_IO)
65#define GFP_KERNEL      (__GFP_WAIT | __GFP_IO | __GFP_FS)
66#define GFP_USER        (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
67#define GFP_HIGHUSER    (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
68                         __GFP_HIGHMEM)
69
70/* Flag - indicates that the buffer will be suitable for DMA.  Ignored on some
71   platforms, used as appropriate on others */
72
73#define GFP_DMA         __GFP_DMA
74
75/* 4GB DMA on some platforms */
76#define GFP_DMA32       __GFP_DMA32
77
78
79static inline int gfp_zone(gfp_t gfp)
80{
81        int zone = GFP_ZONEMASK & (__force int) gfp;
82        BUG_ON(zone >= GFP_ZONETYPES);
83        return zone;
84}
85
86/*
87 * There is only one page-allocator function, and two main namespaces to
88 * it. The alloc_page*() variants return 'struct page *' and as such
89 * can allocate highmem pages, the *get*page*() variants return
90 * virtual kernel addresses to the allocated page(s).
91 */
92
93/*
94 * We get the zone list from the current node and the gfp_mask.
95 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
96 *
97 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
98 * optimized to &contig_page_data at compile-time.
99 */
100
101#ifndef HAVE_ARCH_FREE_PAGE
102/*
103 * If arch_free_page returns non-zero then the generic free_page code can
104 * immediately bail: the arch-specific function has done all the work.
105 */
106static inline int arch_free_page(struct page *page, int order) { return 0; }
107#endif
108
109extern struct page *
110FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
111
112static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
113                                                unsigned int order)
114{
115        if (unlikely(order >= MAX_ORDER))
116                return NULL;
117
118        /* Unknown node is current node */
119        if (nid < 0)
120                nid = numa_node_id();
121
122        return __alloc_pages(gfp_mask, order,
123                NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
124}
125
126#ifdef CONFIG_NUMA
127extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
128
129static inline struct page *
130alloc_pages(gfp_t gfp_mask, unsigned int order)
131{
132        if (unlikely(order >= MAX_ORDER))
133                return NULL;
134
135        return alloc_pages_current(gfp_mask, order);
136}
137extern struct page *alloc_page_vma(gfp_t gfp_mask,
138                        struct vm_area_struct *vma, unsigned long addr);
139#else
140#define alloc_pages(gfp_mask, order) \
141                alloc_pages_node(numa_node_id(), gfp_mask, order)
142#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
143#endif
144#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
145
146extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order));
147extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask));
148
149#define __get_free_page(gfp_mask) \
150                __get_free_pages((gfp_mask),0)
151
152#define __get_dma_pages(gfp_mask, order) \
153                __get_free_pages((gfp_mask) | GFP_DMA,(order))
154
155extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
156extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
157extern void FASTCALL(free_hot_page(struct page *page));
158extern void FASTCALL(free_cold_page(struct page *page));
159
160#define __free_page(page) __free_pages((page), 0)
161#define free_page(addr) free_pages((addr),0)
162
163void page_alloc_init(void);
164#ifdef CONFIG_NUMA
165void drain_node_pages(int node);
166#else
167static inline void drain_node_pages(int node) { };
168#endif
169
170#endif /* __LINUX_GFP_H */
Note: See TracBrowser for help on using the repository browser.