source: trunk/packages/xen-3.1/xen-3.1/linux-2.6-xen-sparse/include/linux/mm.h @ 34

Last change on this file since 34 was 34, checked in by hartmans, 18 years ago

Add xen and xen-common

File size: 36.3 KB
Line 
1#ifndef _LINUX_MM_H
2#define _LINUX_MM_H
3
4#include <linux/sched.h>
5#include <linux/errno.h>
6#include <linux/capability.h>
7
8#ifdef __KERNEL__
9
10#include <linux/gfp.h>
11#include <linux/list.h>
12#include <linux/mmzone.h>
13#include <linux/rbtree.h>
14#include <linux/prio_tree.h>
15#include <linux/fs.h>
16#include <linux/mutex.h>
17#include <linux/debug_locks.h>
18
19struct mempolicy;
20struct anon_vma;
21
22#ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
23extern unsigned long max_mapnr;
24#endif
25
26extern unsigned long num_physpages;
27extern void * high_memory;
28extern unsigned long vmalloc_earlyreserve;
29extern int page_cluster;
30
31#ifdef CONFIG_SYSCTL
32extern int sysctl_legacy_va_layout;
33#else
34#define sysctl_legacy_va_layout 0
35#endif
36
37#include <asm/page.h>
38#include <asm/pgtable.h>
39#include <asm/processor.h>
40
41#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
42
43/*
44 * Linux kernel virtual memory manager primitives.
45 * The idea being to have a "virtual" mm in the same way
46 * we have a virtual fs - giving a cleaner interface to the
47 * mm details, and allowing different kinds of memory mappings
48 * (from shared memory to executable loading to arbitrary
49 * mmap() functions).
50 */
51
52/*
53 * This struct defines a memory VMM memory area. There is one of these
54 * per VM-area/task.  A VM area is any part of the process virtual memory
55 * space that has a special rule for the page-fault handlers (ie a shared
56 * library, the executable area etc).
57 */
58struct vm_area_struct {
59        struct mm_struct * vm_mm;       /* The address space we belong to. */
60        unsigned long vm_start;         /* Our start address within vm_mm. */
61        unsigned long vm_end;           /* The first byte after our end address
62                                           within vm_mm. */
63
64        /* linked list of VM areas per task, sorted by address */
65        struct vm_area_struct *vm_next;
66
67        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
68        unsigned long vm_flags;         /* Flags, listed below. */
69
70        struct rb_node vm_rb;
71
72        /*
73         * For areas with an address space and backing store,
74         * linkage into the address_space->i_mmap prio tree, or
75         * linkage to the list of like vmas hanging off its node, or
76         * linkage of vma in the address_space->i_mmap_nonlinear list.
77         */
78        union {
79                struct {
80                        struct list_head list;
81                        void *parent;   /* aligns with prio_tree_node parent */
82                        struct vm_area_struct *head;
83                } vm_set;
84
85                struct raw_prio_tree_node prio_tree_node;
86        } shared;
87
88        /*
89         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
90         * list, after a COW of one of the file pages.  A MAP_SHARED vma
91         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
92         * or brk vma (with NULL file) can only be in an anon_vma list.
93         */
94        struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
95        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
96
97        /* Function pointers to deal with this struct. */
98        struct vm_operations_struct * vm_ops;
99
100        /* Information about our backing store: */
101        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
102                                           units, *not* PAGE_CACHE_SIZE */
103        struct file * vm_file;          /* File we map to (can be NULL). */
104        void * vm_private_data;         /* was vm_pte (shared mem) */
105        unsigned long vm_truncate_count;/* truncate_count or restart_addr */
106
107#ifndef CONFIG_MMU
108        atomic_t vm_usage;              /* refcount (VMAs shared if !MMU) */
109#endif
110#ifdef CONFIG_NUMA
111        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
112#endif
113};
114
115/*
116 * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
117 * disabled, then there's a single shared list of VMAs maintained by the
118 * system, and mm's subscribe to these individually
119 */
120struct vm_list_struct {
121        struct vm_list_struct   *next;
122        struct vm_area_struct   *vma;
123};
124
125#ifndef CONFIG_MMU
126extern struct rb_root nommu_vma_tree;
127extern struct rw_semaphore nommu_vma_sem;
128
129extern unsigned int kobjsize(const void *objp);
130#endif
131
132/*
133 * vm_flags..
134 */
135#define VM_READ         0x00000001      /* currently active flags */
136#define VM_WRITE        0x00000002
137#define VM_EXEC         0x00000004
138#define VM_SHARED       0x00000008
139
140/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
141#define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
142#define VM_MAYWRITE     0x00000020
143#define VM_MAYEXEC      0x00000040
144#define VM_MAYSHARE     0x00000080
145
146#define VM_GROWSDOWN    0x00000100      /* general info on the segment */
147#define VM_GROWSUP      0x00000200
148#define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
149#define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
150
151#define VM_EXECUTABLE   0x00001000
152#define VM_LOCKED       0x00002000
153#define VM_IO           0x00004000      /* Memory mapped I/O or similar */
154
155                                        /* Used by sys_madvise() */
156#define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
157#define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
158
159#define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
160#define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
161#define VM_RESERVED     0x00080000      /* Count as reserved_vm like IO */
162#define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
163#define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
164#define VM_NONLINEAR    0x00800000      /* Is non-linear (remap_file_pages) */
165#define VM_MAPPED_COPY  0x01000000      /* T if mapped copy of data (nommu mmap) */
166#define VM_INSERTPAGE   0x02000000      /* The vma has had "vm_insert_page()" done on it */
167#ifdef CONFIG_XEN
168#define VM_FOREIGN      0x04000000      /* Has pages belonging to another VM */
169#endif
170
171#ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
172#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
173#endif
174
175#ifdef CONFIG_STACK_GROWSUP
176#define VM_STACK_FLAGS  (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
177#else
178#define VM_STACK_FLAGS  (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
179#endif
180
181#define VM_READHINTMASK                 (VM_SEQ_READ | VM_RAND_READ)
182#define VM_ClearReadHint(v)             (v)->vm_flags &= ~VM_READHINTMASK
183#define VM_NormalReadHint(v)            (!((v)->vm_flags & VM_READHINTMASK))
184#define VM_SequentialReadHint(v)        ((v)->vm_flags & VM_SEQ_READ)
185#define VM_RandomReadHint(v)            ((v)->vm_flags & VM_RAND_READ)
186
187/*
188 * mapping from the currently active vm_flags protection bits (the
189 * low four bits) to a page protection mask..
190 */
191extern pgprot_t protection_map[16];
192
193
194/*
195 * These are the virtual MM functions - opening of an area, closing and
196 * unmapping it (needed to keep files on disk up-to-date etc), pointer
197 * to the functions called when a no-page or a wp-page exception occurs.
198 */
199struct vm_operations_struct {
200        void (*open)(struct vm_area_struct * area);
201        void (*close)(struct vm_area_struct * area);
202        struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
203        int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
204
205        /* notification that a previously read-only page is about to become
206         * writable, if an error is returned it will cause a SIGBUS */
207        int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
208        /* Area-specific function for clearing the PTE at @ptep. Returns the
209         * original value of @ptep. */
210        pte_t (*zap_pte)(struct vm_area_struct *vma, 
211                         unsigned long addr, pte_t *ptep, int is_fullmm);
212#ifdef CONFIG_NUMA
213        int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
214        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
215                                        unsigned long addr);
216        int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
217                const nodemask_t *to, unsigned long flags);
218#endif
219};
220
221struct mmu_gather;
222struct inode;
223
224/*
225 * Each physical page in the system has a struct page associated with
226 * it to keep track of whatever it is we are using the page for at the
227 * moment. Note that we have no way to track which tasks are using
228 * a page.
229 */
230struct page {
231        unsigned long flags;            /* Atomic flags, some possibly
232                                         * updated asynchronously */
233        atomic_t _count;                /* Usage count, see below. */
234        atomic_t _mapcount;             /* Count of ptes mapped in mms,
235                                         * to show when page is mapped
236                                         * & limit reverse map searches.
237                                         */
238        union {
239            struct {
240                unsigned long private;          /* Mapping-private opaque data:
241                                                 * usually used for buffer_heads
242                                                 * if PagePrivate set; used for
243                                                 * swp_entry_t if PageSwapCache;
244                                                 * indicates order in the buddy
245                                                 * system if PG_buddy is set.
246                                                 */
247                struct address_space *mapping;  /* If low bit clear, points to
248                                                 * inode address_space, or NULL.
249                                                 * If page mapped as anonymous
250                                                 * memory, low bit is set, and
251                                                 * it points to anon_vma object:
252                                                 * see PAGE_MAPPING_ANON below.
253                                                 */
254            };
255#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
256            spinlock_t ptl;
257#endif
258        };
259        pgoff_t index;                  /* Our offset within mapping. */
260        struct list_head lru;           /* Pageout list, eg. active_list
261                                         * protected by zone->lru_lock !
262                                         */
263        /*
264         * On machines where all RAM is mapped into kernel address space,
265         * we can simply calculate the virtual address. On machines with
266         * highmem some memory is mapped into kernel virtual memory
267         * dynamically, so we need a place to store that address.
268         * Note that this field could be 16 bits on x86 ... ;)
269         *
270         * Architectures with slow multiplication can define
271         * WANT_PAGE_VIRTUAL in asm/page.h
272         */
273#if defined(WANT_PAGE_VIRTUAL)
274        void *virtual;                  /* Kernel virtual address (NULL if
275                                           not kmapped, ie. highmem) */
276#endif /* WANT_PAGE_VIRTUAL */
277};
278
279#define page_private(page)              ((page)->private)
280#define set_page_private(page, v)       ((page)->private = (v))
281
282/*
283 * FIXME: take this include out, include page-flags.h in
284 * files which need it (119 of them)
285 */
286#include <linux/page-flags.h>
287
288/*
289 * Methods to modify the page usage count.
290 *
291 * What counts for a page usage:
292 * - cache mapping   (page->mapping)
293 * - private data    (page->private)
294 * - page mapped in a task's page tables, each mapping
295 *   is counted separately
296 *
297 * Also, many kernel routines increase the page count before a critical
298 * routine so they can be sure the page doesn't go away from under them.
299 */
300
301/*
302 * Drop a ref, return true if the logical refcount fell to zero (the page has
303 * no users)
304 */
305static inline int put_page_testzero(struct page *page)
306{
307        BUG_ON(atomic_read(&page->_count) == 0);
308        return atomic_dec_and_test(&page->_count);
309}
310
311/*
312 * Try to grab a ref unless the page has a refcount of zero, return false if
313 * that is the case.
314 */
315static inline int get_page_unless_zero(struct page *page)
316{
317        return atomic_inc_not_zero(&page->_count);
318}
319
320extern void FASTCALL(__page_cache_release(struct page *));
321
322static inline int page_count(struct page *page)
323{
324        if (unlikely(PageCompound(page)))
325                page = (struct page *)page_private(page);
326        return atomic_read(&page->_count);
327}
328
329static inline void get_page(struct page *page)
330{
331        if (unlikely(PageCompound(page)))
332                page = (struct page *)page_private(page);
333        atomic_inc(&page->_count);
334}
335
336/*
337 * Setup the page count before being freed into the page allocator for
338 * the first time (boot or memory hotplug)
339 */
340static inline void init_page_count(struct page *page)
341{
342        atomic_set(&page->_count, 1);
343}
344
345void put_page(struct page *page);
346void put_pages_list(struct list_head *pages);
347
348void split_page(struct page *page, unsigned int order);
349
350/*
351 * Multiple processes may "see" the same page. E.g. for untouched
352 * mappings of /dev/null, all processes see the same page full of
353 * zeroes, and text pages of executables and shared libraries have
354 * only one copy in memory, at most, normally.
355 *
356 * For the non-reserved pages, page_count(page) denotes a reference count.
357 *   page_count() == 0 means the page is free. page->lru is then used for
358 *   freelist management in the buddy allocator.
359 *   page_count() == 1 means the page is used for exactly one purpose
360 *   (e.g. a private data page of one process).
361 *
362 * A page may be used for kmalloc() or anyone else who does a
363 * __get_free_page(). In this case the page_count() is at least 1, and
364 * all other fields are unused but should be 0 or NULL. The
365 * management of this page is the responsibility of the one who uses
366 * it.
367 *
368 * The other pages (we may call them "process pages") are completely
369 * managed by the Linux memory manager: I/O, buffers, swapping etc.
370 * The following discussion applies only to them.
371 *
372 * A page may belong to an inode's memory mapping. In this case,
373 * page->mapping is the pointer to the inode, and page->index is the
374 * file offset of the page, in units of PAGE_CACHE_SIZE.
375 *
376 * A page contains an opaque `private' member, which belongs to the
377 * page's address_space.  Usually, this is the address of a circular
378 * list of the page's disk buffers.
379 *
380 * For pages belonging to inodes, the page_count() is the number of
381 * attaches, plus 1 if `private' contains something, plus one for
382 * the page cache itself.
383 *
384 * Instead of keeping dirty/clean pages in per address-space lists, we instead
385 * now tag pages as dirty/under writeback in the radix tree.
386 *
387 * There is also a per-mapping radix tree mapping index to the page
388 * in memory if present. The tree is rooted at mapping->root. 
389 *
390 * All process pages can do I/O:
391 * - inode pages may need to be read from disk,
392 * - inode pages which have been modified and are MAP_SHARED may need
393 *   to be written to disk,
394 * - private pages which have been modified may need to be swapped out
395 *   to swap space and (later) to be read back into memory.
396 */
397
398/*
399 * The zone field is never updated after free_area_init_core()
400 * sets it, so none of the operations on it need to be atomic.
401 */
402
403
404/*
405 * page->flags layout:
406 *
407 * There are three possibilities for how page->flags get
408 * laid out.  The first is for the normal case, without
409 * sparsemem.  The second is for sparsemem when there is
410 * plenty of space for node and section.  The last is when
411 * we have run out of space and have to fall back to an
412 * alternate (slower) way of determining the node.
413 *
414 *        No sparsemem: |       NODE     | ZONE | ... | FLAGS |
415 * with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
416 *   no space for node: | SECTION |     ZONE    | ... | FLAGS |
417 */
418#ifdef CONFIG_SPARSEMEM
419#define SECTIONS_WIDTH          SECTIONS_SHIFT
420#else
421#define SECTIONS_WIDTH          0
422#endif
423
424#define ZONES_WIDTH             ZONES_SHIFT
425
426#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
427#define NODES_WIDTH             NODES_SHIFT
428#else
429#define NODES_WIDTH             0
430#endif
431
432/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
433#define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
434#define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
435#define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
436
437/*
438 * We are going to use the flags for the page to node mapping if its in
439 * there.  This includes the case where there is no node, so it is implicit.
440 */
441#define FLAGS_HAS_NODE          (NODES_WIDTH > 0 || NODES_SHIFT == 0)
442
443#ifndef PFN_SECTION_SHIFT
444#define PFN_SECTION_SHIFT 0
445#endif
446
447/*
448 * Define the bit shifts to access each section.  For non-existant
449 * sections we define the shift as 0; that plus a 0 mask ensures
450 * the compiler will optimise away reference to them.
451 */
452#define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
453#define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
454#define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
455
456/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */
457#if FLAGS_HAS_NODE
458#define ZONETABLE_SHIFT         (NODES_SHIFT + ZONES_SHIFT)
459#else
460#define ZONETABLE_SHIFT         (SECTIONS_SHIFT + ZONES_SHIFT)
461#endif
462#define ZONETABLE_PGSHIFT       ZONES_PGSHIFT
463
464#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
465#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
466#endif
467
468#define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
469#define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
470#define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
471#define ZONETABLE_MASK          ((1UL << ZONETABLE_SHIFT) - 1)
472
473static inline unsigned long page_zonenum(struct page *page)
474{
475        return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
476}
477
478struct zone;
479extern struct zone *zone_table[];
480
481static inline int page_zone_id(struct page *page)
482{
483        return (page->flags >> ZONETABLE_PGSHIFT) & ZONETABLE_MASK;
484}
485static inline struct zone *page_zone(struct page *page)
486{
487        return zone_table[page_zone_id(page)];
488}
489
490static inline unsigned long page_to_nid(struct page *page)
491{
492        if (FLAGS_HAS_NODE)
493                return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
494        else
495                return page_zone(page)->zone_pgdat->node_id;
496}
497static inline unsigned long page_to_section(struct page *page)
498{
499        return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
500}
501
502static inline void set_page_zone(struct page *page, unsigned long zone)
503{
504        page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
505        page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
506}
507static inline void set_page_node(struct page *page, unsigned long node)
508{
509        page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
510        page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
511}
512static inline void set_page_section(struct page *page, unsigned long section)
513{
514        page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
515        page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
516}
517
518static inline void set_page_links(struct page *page, unsigned long zone,
519        unsigned long node, unsigned long pfn)
520{
521        set_page_zone(page, zone);
522        set_page_node(page, node);
523        set_page_section(page, pfn_to_section_nr(pfn));
524}
525
526/*
527 * Some inline functions in vmstat.h depend on page_zone()
528 */
529#include <linux/vmstat.h>
530
531#ifndef CONFIG_DISCONTIGMEM
532/* The array of struct pages - for discontigmem use pgdat->lmem_map */
533extern struct page *mem_map;
534#endif
535
536static __always_inline void *lowmem_page_address(struct page *page)
537{
538        return __va(page_to_pfn(page) << PAGE_SHIFT);
539}
540
541#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
542#define HASHED_PAGE_VIRTUAL
543#endif
544
545#if defined(WANT_PAGE_VIRTUAL)
546#define page_address(page) ((page)->virtual)
547#define set_page_address(page, address)                 \
548        do {                                            \
549                (page)->virtual = (address);            \
550        } while(0)
551#define page_address_init()  do { } while(0)
552#endif
553
554#if defined(HASHED_PAGE_VIRTUAL)
555void *page_address(struct page *page);
556void set_page_address(struct page *page, void *virtual);
557void page_address_init(void);
558#endif
559
560#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
561#define page_address(page) lowmem_page_address(page)
562#define set_page_address(page, address)  do { } while(0)
563#define page_address_init()  do { } while(0)
564#endif
565
566/*
567 * On an anonymous page mapped into a user virtual memory area,
568 * page->mapping points to its anon_vma, not to a struct address_space;
569 * with the PAGE_MAPPING_ANON bit set to distinguish it.
570 *
571 * Please note that, confusingly, "page_mapping" refers to the inode
572 * address_space which maps the page from disk; whereas "page_mapped"
573 * refers to user virtual address space into which the page is mapped.
574 */
575#define PAGE_MAPPING_ANON       1
576
577extern struct address_space swapper_space;
578static inline struct address_space *page_mapping(struct page *page)
579{
580        struct address_space *mapping = page->mapping;
581
582        if (unlikely(PageSwapCache(page)))
583                mapping = &swapper_space;
584        else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
585                mapping = NULL;
586        return mapping;
587}
588
589static inline int PageAnon(struct page *page)
590{
591        return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
592}
593
594/*
595 * Return the pagecache index of the passed page.  Regular pagecache pages
596 * use ->index whereas swapcache pages use ->private
597 */
598static inline pgoff_t page_index(struct page *page)
599{
600        if (unlikely(PageSwapCache(page)))
601                return page_private(page);
602        return page->index;
603}
604
605/*
606 * The atomic page->_mapcount, like _count, starts from -1:
607 * so that transitions both from it and to it can be tracked,
608 * using atomic_inc_and_test and atomic_add_negative(-1).
609 */
610static inline void reset_page_mapcount(struct page *page)
611{
612        atomic_set(&(page)->_mapcount, -1);
613}
614
615static inline int page_mapcount(struct page *page)
616{
617        return atomic_read(&(page)->_mapcount) + 1;
618}
619
620/*
621 * Return true if this page is mapped into pagetables.
622 */
623static inline int page_mapped(struct page *page)
624{
625        return atomic_read(&(page)->_mapcount) >= 0;
626}
627
628/*
629 * Error return values for the *_nopage functions
630 */
631#define NOPAGE_SIGBUS   (NULL)
632#define NOPAGE_OOM      ((struct page *) (-1))
633
634/*
635 * Different kinds of faults, as returned by handle_mm_fault().
636 * Used to decide whether a process gets delivered SIGBUS or
637 * just gets major/minor fault counters bumped up.
638 */
639#define VM_FAULT_OOM    0x00
640#define VM_FAULT_SIGBUS 0x01
641#define VM_FAULT_MINOR  0x02
642#define VM_FAULT_MAJOR  0x03
643
644/*
645 * Special case for get_user_pages.
646 * Must be in a distinct bit from the above VM_FAULT_ flags.
647 */
648#define VM_FAULT_WRITE  0x10
649
650#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
651
652extern void show_free_areas(void);
653
654#ifdef CONFIG_SHMEM
655struct page *shmem_nopage(struct vm_area_struct *vma,
656                        unsigned long address, int *type);
657int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
658struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
659                                        unsigned long addr);
660int shmem_lock(struct file *file, int lock, struct user_struct *user);
661#else
662#define shmem_nopage filemap_nopage
663
664static inline int shmem_lock(struct file *file, int lock,
665                             struct user_struct *user)
666{
667        return 0;
668}
669
670static inline int shmem_set_policy(struct vm_area_struct *vma,
671                                   struct mempolicy *new)
672{
673        return 0;
674}
675
676static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
677                                                 unsigned long addr)
678{
679        return NULL;
680}
681#endif
682struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
683extern int shmem_mmap(struct file *file, struct vm_area_struct *vma);
684
685int shmem_zero_setup(struct vm_area_struct *);
686
687#ifndef CONFIG_MMU
688extern unsigned long shmem_get_unmapped_area(struct file *file,
689                                             unsigned long addr,
690                                             unsigned long len,
691                                             unsigned long pgoff,
692                                             unsigned long flags);
693#endif
694
695static inline int can_do_mlock(void)
696{
697        if (capable(CAP_IPC_LOCK))
698                return 1;
699        if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
700                return 1;
701        return 0;
702}
703extern int user_shm_lock(size_t, struct user_struct *);
704extern void user_shm_unlock(size_t, struct user_struct *);
705
706/*
707 * Parameter block passed down to zap_pte_range in exceptional cases.
708 */
709struct zap_details {
710        struct vm_area_struct *nonlinear_vma;   /* Check page->index if set */
711        struct address_space *check_mapping;    /* Check page->mapping if set */
712        pgoff_t first_index;                    /* Lowest page->index to unmap */
713        pgoff_t last_index;                     /* Highest page->index to unmap */
714        spinlock_t *i_mmap_lock;                /* For unmap_mapping_range: */
715        unsigned long truncate_count;           /* Compare vm_truncate_count */
716};
717
718struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
719unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
720                unsigned long size, struct zap_details *);
721unsigned long unmap_vmas(struct mmu_gather **tlb,
722                struct vm_area_struct *start_vma, unsigned long start_addr,
723                unsigned long end_addr, unsigned long *nr_accounted,
724                struct zap_details *);
725void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
726                unsigned long end, unsigned long floor, unsigned long ceiling);
727void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
728                unsigned long floor, unsigned long ceiling);
729int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
730                        struct vm_area_struct *vma);
731int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
732                        unsigned long size, pgprot_t prot);
733void unmap_mapping_range(struct address_space *mapping,
734                loff_t const holebegin, loff_t const holelen, int even_cows);
735
736static inline void unmap_shared_mapping_range(struct address_space *mapping,
737                loff_t const holebegin, loff_t const holelen)
738{
739        unmap_mapping_range(mapping, holebegin, holelen, 0);
740}
741
742extern int vmtruncate(struct inode * inode, loff_t offset);
743extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
744extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
745extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
746
747#ifdef CONFIG_MMU
748extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma,
749                        unsigned long address, int write_access);
750
751static inline int handle_mm_fault(struct mm_struct *mm,
752                        struct vm_area_struct *vma, unsigned long address,
753                        int write_access)
754{
755        return __handle_mm_fault(mm, vma, address, write_access) &
756                                (~VM_FAULT_WRITE);
757}
758#else
759static inline int handle_mm_fault(struct mm_struct *mm,
760                        struct vm_area_struct *vma, unsigned long address,
761                        int write_access)
762{
763        /* should never happen if there's no MMU */
764        BUG();
765        return VM_FAULT_SIGBUS;
766}
767#endif
768
769extern int make_pages_present(unsigned long addr, unsigned long end);
770extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
771void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
772
773int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
774                int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
775void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long);
776
777int __set_page_dirty_buffers(struct page *page);
778int __set_page_dirty_nobuffers(struct page *page);
779int redirty_page_for_writepage(struct writeback_control *wbc,
780                                struct page *page);
781int FASTCALL(set_page_dirty(struct page *page));
782int set_page_dirty_lock(struct page *page);
783int clear_page_dirty_for_io(struct page *page);
784
785extern unsigned long do_mremap(unsigned long addr,
786                               unsigned long old_len, unsigned long new_len,
787                               unsigned long flags, unsigned long new_addr);
788
789/*
790 * Prototype to add a shrinker callback for ageable caches.
791 *
792 * These functions are passed a count `nr_to_scan' and a gfpmask.  They should
793 * scan `nr_to_scan' objects, attempting to free them.
794 *
795 * The callback must return the number of objects which remain in the cache.
796 *
797 * The callback will be passed nr_to_scan == 0 when the VM is querying the
798 * cache size, so a fastpath for that case is appropriate.
799 */
800typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask);
801
802/*
803 * Add an aging callback.  The int is the number of 'seeks' it takes
804 * to recreate one of the objects that these functions age.
805 */
806
807#define DEFAULT_SEEKS 2
808struct shrinker;
809extern struct shrinker *set_shrinker(int, shrinker_t);
810extern void remove_shrinker(struct shrinker *shrinker);
811
812extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl));
813
814int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
815int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
816int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
817int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
818
819/*
820 * The following ifdef needed to get the 4level-fixup.h header to work.
821 * Remove it when 4level-fixup.h has been removed.
822 */
823#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
824static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
825{
826        return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
827                NULL: pud_offset(pgd, address);
828}
829
830static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
831{
832        return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
833                NULL: pmd_offset(pud, address);
834}
835#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
836
837#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
838/*
839 * We tuck a spinlock to guard each pagetable page into its struct page,
840 * at page->private, with BUILD_BUG_ON to make sure that this will not
841 * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
842 * When freeing, reset page->mapping so free_pages_check won't complain.
843 */
844#define __pte_lockptr(page)     &((page)->ptl)
845#define pte_lock_init(_page)    do {                                    \
846        spin_lock_init(__pte_lockptr(_page));                           \
847} while (0)
848#define pte_lock_deinit(page)   ((page)->mapping = NULL)
849#define pte_lockptr(mm, pmd)    ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
850#else
851/*
852 * We use mm->page_table_lock to guard all pagetable pages of the mm.
853 */
854#define pte_lock_init(page)     do {} while (0)
855#define pte_lock_deinit(page)   do {} while (0)
856#define pte_lockptr(mm, pmd)    ({(void)(pmd); &(mm)->page_table_lock;})
857#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
858
859#define pte_offset_map_lock(mm, pmd, address, ptlp)     \
860({                                                      \
861        spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
862        pte_t *__pte = pte_offset_map(pmd, address);    \
863        *(ptlp) = __ptl;                                \
864        spin_lock(__ptl);                               \
865        __pte;                                          \
866})
867
868#define pte_unmap_unlock(pte, ptl)      do {            \
869        spin_unlock(ptl);                               \
870        pte_unmap(pte);                                 \
871} while (0)
872
873#define pte_alloc_map(mm, pmd, address)                 \
874        ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
875                NULL: pte_offset_map(pmd, address))
876
877#define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
878        ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
879                NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
880
881#define pte_alloc_kernel(pmd, address)                  \
882        ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
883                NULL: pte_offset_kernel(pmd, address))
884
885extern void free_area_init(unsigned long * zones_size);
886extern void free_area_init_node(int nid, pg_data_t *pgdat,
887        unsigned long * zones_size, unsigned long zone_start_pfn, 
888        unsigned long *zholes_size);
889extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);
890extern void setup_per_zone_pages_min(void);
891extern void mem_init(void);
892extern void show_mem(void);
893extern void si_meminfo(struct sysinfo * val);
894extern void si_meminfo_node(struct sysinfo *val, int nid);
895
896#ifdef CONFIG_NUMA
897extern void setup_per_cpu_pageset(void);
898#else
899static inline void setup_per_cpu_pageset(void) {}
900#endif
901
902/* prio_tree.c */
903void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
904void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
905void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
906struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
907        struct prio_tree_iter *iter);
908
909#define vma_prio_tree_foreach(vma, iter, root, begin, end)      \
910        for (prio_tree_iter_init(iter, root, begin, end), vma = NULL;   \
911                (vma = vma_prio_tree_next(vma, iter)); )
912
913static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
914                                        struct list_head *list)
915{
916        vma->shared.vm_set.parent = NULL;
917        list_add_tail(&vma->shared.vm_set.list, list);
918}
919
920/* mmap.c */
921extern int __vm_enough_memory(long pages, int cap_sys_admin);
922extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
923        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
924extern struct vm_area_struct *vma_merge(struct mm_struct *,
925        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
926        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
927        struct mempolicy *);
928extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
929extern int split_vma(struct mm_struct *,
930        struct vm_area_struct *, unsigned long addr, int new_below);
931extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
932extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
933        struct rb_node **, struct rb_node *);
934extern void unlink_file_vma(struct vm_area_struct *);
935extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
936        unsigned long addr, unsigned long len, pgoff_t pgoff);
937extern void exit_mmap(struct mm_struct *);
938extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
939
940extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
941
942extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
943        unsigned long len, unsigned long prot,
944        unsigned long flag, unsigned long pgoff);
945
946static inline unsigned long do_mmap(struct file *file, unsigned long addr,
947        unsigned long len, unsigned long prot,
948        unsigned long flag, unsigned long offset)
949{
950        unsigned long ret = -EINVAL;
951        if ((offset + PAGE_ALIGN(len)) < offset)
952                goto out;
953        if (!(offset & ~PAGE_MASK))
954                ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
955out:
956        return ret;
957}
958
959extern int do_munmap(struct mm_struct *, unsigned long, size_t);
960
961extern unsigned long do_brk(unsigned long, unsigned long);
962
963/* filemap.c */
964extern unsigned long page_unuse(struct page *);
965extern void truncate_inode_pages(struct address_space *, loff_t);
966extern void truncate_inode_pages_range(struct address_space *,
967                                       loff_t lstart, loff_t lend);
968
969/* generic vm_area_ops exported for stackable file systems */
970extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
971extern int filemap_populate(struct vm_area_struct *, unsigned long,
972                unsigned long, pgprot_t, unsigned long, int);
973
974/* mm/page-writeback.c */
975int write_one_page(struct page *page, int wait);
976
977/* readahead.c */
978#define VM_MAX_READAHEAD        128     /* kbytes */
979#define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
980#define VM_MAX_CACHE_HIT        256     /* max pages in a row in cache before
981                                         * turning readahead off */
982
983int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
984                        pgoff_t offset, unsigned long nr_to_read);
985int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
986                        pgoff_t offset, unsigned long nr_to_read);
987unsigned long page_cache_readahead(struct address_space *mapping,
988                          struct file_ra_state *ra,
989                          struct file *filp,
990                          pgoff_t offset,
991                          unsigned long size);
992void handle_ra_miss(struct address_space *mapping, 
993                    struct file_ra_state *ra, pgoff_t offset);
994unsigned long max_sane_readahead(unsigned long nr);
995
996/* Do stack extension */
997extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
998#ifdef CONFIG_IA64
999extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1000#endif
1001
1002/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1003extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1004extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1005                                             struct vm_area_struct **pprev);
1006
1007/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1008   NULL if none.  Assume start_addr < end_addr. */
1009static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1010{
1011        struct vm_area_struct * vma = find_vma(mm,start_addr);
1012
1013        if (vma && end_addr <= vma->vm_start)
1014                vma = NULL;
1015        return vma;
1016}
1017
1018static inline unsigned long vma_pages(struct vm_area_struct *vma)
1019{
1020        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1021}
1022
1023struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1024struct page *vmalloc_to_page(void *addr);
1025unsigned long vmalloc_to_pfn(void *addr);
1026int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1027                        unsigned long pfn, unsigned long size, pgprot_t);
1028int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1029
1030struct page *follow_page(struct vm_area_struct *, unsigned long address,
1031                        unsigned int foll_flags);
1032#define FOLL_WRITE      0x01    /* check pte is writable */
1033#define FOLL_TOUCH      0x02    /* mark page accessed */
1034#define FOLL_GET        0x04    /* do get_page on page */
1035#define FOLL_ANON       0x08    /* give ZERO_PAGE if no pgtable */
1036
1037#ifdef CONFIG_XEN
1038typedef int (*pte_fn_t)(pte_t *pte, struct page *pmd_page, unsigned long addr,
1039                        void *data);
1040extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1041                               unsigned long size, pte_fn_t fn, void *data);
1042#endif
1043
1044#ifdef CONFIG_PROC_FS
1045void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1046#else
1047static inline void vm_stat_account(struct mm_struct *mm,
1048                        unsigned long flags, struct file *file, long pages)
1049{
1050}
1051#endif /* CONFIG_PROC_FS */
1052
1053#ifndef CONFIG_DEBUG_PAGEALLOC
1054static inline void
1055kernel_map_pages(struct page *page, int numpages, int enable)
1056{
1057        if (!PageHighMem(page) && !enable)
1058                debug_check_no_locks_freed(page_address(page),
1059                                           numpages * PAGE_SIZE);
1060}
1061#endif
1062
1063extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
1064#ifdef  __HAVE_ARCH_GATE_AREA
1065int in_gate_area_no_task(unsigned long addr);
1066int in_gate_area(struct task_struct *task, unsigned long addr);
1067#else
1068int in_gate_area_no_task(unsigned long addr);
1069#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
1070#endif  /* __HAVE_ARCH_GATE_AREA */
1071
1072/* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */
1073#define OOM_DISABLE -17
1074
1075int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
1076                                        void __user *, size_t *, loff_t *);
1077unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1078                        unsigned long lru_pages);
1079void drop_pagecache(void);
1080void drop_slab(void);
1081
1082#ifndef CONFIG_MMU
1083#define randomize_va_space 0
1084#else
1085extern int randomize_va_space;
1086#endif
1087
1088const char *arch_vma_name(struct vm_area_struct *vma);
1089
1090#endif /* __KERNEL__ */
1091#endif /* _LINUX_MM_H */
Note: See TracBrowser for help on using the repository browser.