|   登录   |   注册   |   设为首页   |   加入收藏   

用户登录

close

用户名:

密码:

新用户注册

close

用户名:

密码:

密码确认:

电子邮箱:

关注内容:

个人主页:

帮助

close

龙宇网成立于2008年3月,网站进入整体运作于2010年10月1日。

在这里,我们把它做成了一个真正意义上的网站,完全以个人的信息为内容,以网友的需要为主导,全力搜罗各种信息,建立完善的网站功能,使网友在这里可以第一时间找到所需要的信息。

现在,经过三年的努力,网站的资料已经相当丰富,而网站得到了大家的喜爱和认可。

但,我们还是会继续努力下去,让网间的这份快乐继续持续下去,让这份闲暇时的日子,与快乐一并同行。

寻觅快乐,网住快乐,关注网络,是龙宇网的宣言与承诺。

linux page 页定义

标签: linux page
分类: 系统架构 发布时间: 2019-08-23 10:26:51 浏览次数: 36
内容提要: Linux的内核在内存管理中处理的最小单位是physical pages,处理器的最小的可寻址的单位是byte或者是word。32位系统构架都是4k的页,64位则是8k的页,所以对于4k的页,1G的内存就有262144个不同的页。

Linux的内核在内存管理中处理的最小单位是physical pages,处理器的最小的可寻址的单位是byte或者是word。32位系统构架都是4k的页,64位则是8k的页,所以对于4k的页,1G的内存就有262144个不同的页。

\linux-3.10.0-327.el7\include\linux\mm_types.h

/*

* Each physical page in the system has a struct page associated with

* it to keep track of whatever it is we are using the page for at the

* moment. Note that we have no way to track which tasks are using

* a page, though if it is a pagecache page, rmap structures can tell us

* who is mapping it.

*

* The objects in struct page are organized in double word blocks in

* order to allows us to use atomic double word operations on portions

* of struct page. That is currently only used by slub but the arrangement

* allows the use of atomic double word operations on the flags/mapping

* and lru list pointers also.

*/

struct page {

    /* First double word block */

    unsigned long flags;        /* Atomic flags, some possibly

                     * updated asynchronously */

    struct address_space *mapping;  /* If low bit clear, points to

                     * inode address_space, or NULL.

                     * If page mapped as anonymous

                     * memory, low bit is set, and

                     * it points to anon_vma object:

                     * see PAGE_MAPPING_ANON below.

                     */

    /* Second double word */

    struct {

        union {

            pgoff_t index;      /* Our offset within mapping. */

            void *freelist;     /* slub/slob first free object */

            bool pfmemalloc;    /* If set by the page allocator,

                         * ALLOC_NO_WATERMARKS was set

                         * and the low watermark was not

                         * met implying that the system

                         * is under some pressure. The

                         * caller should try ensure

                         * this page is only used to

                         * free other pages.

                         */

#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS

        pgtable_t pmd_huge_pte; /* protected by page->ptl */

#endif

        };

 

        union {

#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \

    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)

            /* Used for cmpxchg_double in slub */

            unsigned long counters;

#else

            /*

             * Keep _count separate from slub cmpxchg_double data.

             * As the rest of the double word is protected by

             * slab_lock but _count is not.

             */

            unsigned counters;

#endif

 

            struct {

 

                union {

                    /*

                     * Count of ptes mapped in

                     * mms, to show when page is

                     * mapped & limit reverse map

                     * searches.

                     *

                     * Used also for tail pages

                     * refcounting instead of

                     * _count. Tail pages cannot

                     * be mapped and keeping the

                     * tail page _count zero at

                     * all times guarantees

                     * get_page_unless_zero() will

                     * never succeed on tail

                     * pages.

                     */

                    atomic_t _mapcount;

 

                    struct { /* SLUB */

                        unsigned inuse:16;

                        unsigned objects:15;

                        unsigned frozen:1;

                    };

                    int units;  /* SLOB */

                };

                atomic_t _count;        /* Usage count, see below. */

            };

        };

    };

 

    /* Third double word block */

    union {

        struct list_head lru;   /* Pageout list, eg. active_list

                     * protected by zone->lru_lock !

                     */

        struct {        /* slub per cpu partial pages */

            struct page *next;  /* Next partial slab */

#ifdef CONFIG_64BIT

            int pages;  /* Nr of partial slabs left */

            int pobjects;   /* Approximate # of objects */

#else

            short int pages;

            short int pobjects;

#endif

        };

 

        struct list_head list;  /* slobs list of pages */

        struct slab *slab_page; /* slab fields */

    };

 

    /* Remainder is not double word aligned */

    union {

        unsigned long private;      /* Mapping-private opaque data:

                         * usually used for buffer_heads

                         * if PagePrivate set; used for

                         * swp_entry_t if PageSwapCache;

                         * indicates order in the buddy

                         * system if PG_buddy is set.

                         */

#if USE_SPLIT_PTE_PTLOCKS

#if BLOATED_SPINLOCKS

        spinlock_t *ptl;

#else

        spinlock_t ptl;

#endif

#endif

        struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */

        struct page *first_page;    /* Compound tail pages */

    };

 

    /*

     * On machines where all RAM is mapped into kernel address space,

     * we can simply calculate the virtual address. On machines with

     * highmem some memory is mapped into kernel virtual memory

     * dynamically, so we need a place to store that address.

     * Note that this field could be 16 bits on x86 ... ;)

     *

     * Architectures with slow multiplication can define

     * WANT_PAGE_VIRTUAL in asm/page.h

     */

#if defined(WANT_PAGE_VIRTUAL)

    void *virtual;          /* Kernel virtual address (NULL if

                     not kmapped, ie. highmem) */

#endif /* WANT_PAGE_VIRTUAL */

#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS

    unsigned long debug_flags;  /* Use atomic bitops on this */

#endif

 

#ifdef CONFIG_KMEMCHECK

    /*

     * kmemcheck wants to track the status of each byte in a page; this

     * is a pointer to such a status block. NULL if not tracked.

     */

    void *shadow;

#endif

 

#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS

    int _last_cpupid;

#endif

}

/*

* The struct page can be forced to be double word aligned so that atomic ops

* on double words work. The SLUB allocator can make use of such a feature.

*/

#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE

    __aligned(2 * sizeof(unsigned long))

#endif

;

 

struct page_frag {

    struct page *page;

#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)

    __u32 offset;

    __u32 size;

#else

    __u16 offset;

    __u16 size;

#endif

};

page 结构体中的_count 表示和这个页关联的references number,当值为-1的时后,则表示没人在用这个页了。
当然内核不会直接检查这一项,而是通过page_count()函数来检查,返回0则表示page free

温馨提示:时间太晚了,该休息了,身体是革命本钱。
15
20

分类: 系统架构   |   评论: 0   |   引用: 0   |   浏览次数: 36

相关文章