[Patch] RADIX-TREE PageCache for 2.5
From: christoph hellwig
hch@caldera.de)
Date: Tue Jan 29 2002 - 10:54:44 EST
Next Message: Joachim Steiger: "Re: new photos from my party!" Previous message: Eli Carter: "Re: a modest proposal - we need a patch penguin"
Next in thread: "Re: [patch] Radix-Tree PageCache for 2.5" Reply: Linus Torvalds: "Re: [patch] RADIX-Tree PageCache for 2.5" Reply: William Lee Irwin III: "Re: [Patch ] RADIX-TREE PAGECACHE for 2.5 "
Messages sorted by: [DATE] [THREAD] [SUBJECT] [Author]
I'VE PORTED My Hacked Up Version of Momchil Velikov's Radix Tree Radix Tree PageCache TO 2.5.3-Pre {5,6}.
The Changes Over the 2.4.17 Version Are:
OMPOOL TO AVOID OOM SIUMORE_LOCKED, IT WAS UNUSED IN The 2.4.17 patch. o unified add_to_page and add_to_page_unique
IT Gives Nice Scalability Improvements On Big Machines and Drops The Memory Usage ON Small Ones (if you consider my 64MB Athlon Small :)).
THE PATCH IS AT
ftp://ftp.kernel.org:/pub/linux/kernel/people/hch/patches/v2.5/2.5.3-pre5/linux-2.5.3- gratpagecache.patch.gz ftp: // ftp. Keernel.org:/pub/linux/kernel/people/hch/patches/v2.5/2.5.3-pre5/linux-2.5.3- gratpagecache.patch.bz2
OR BELOW.
Christoph
-
Of Course It Doesn't Work. We've Performed A Software Upgrade.
Diff -unr-xdontdiff /datenklo/ref/linux-vger/drivers/block/rd.c Linux-vger / drivers / block / rd.c
--- /Datenklo/ref/linux-vger/drivers/block/rd.c sun jan 13 00:05:09 2002
Linux-Vger / Drivers / Block / rd.c Tue Jan 29 03:05:40 2002
@@ -156, 7 156, 6 @@
Do {
INT country;
- Struct Page ** Hash;
Struct Page * Page;
CHAR * SRC, * DST;
INT UNLOCK = 0;
@@ -166, 8 165 ,7 @@
Count = size;
Size - = count;
- hash = Page_hash (mapping, index);
- Page = __find_get_page (mapping, index, hash);
Page = Find_get_page (mapping, index);
IF (! Page) {
Page = GRAB_CACHE_PAGE (Mapping, INDEX);
Err = -enomem;
Diff -unr-xdontdiff /datenklo/ref/linux-vger/fs/inode.c Linux-vger / fs / inode.c
--- /Datenklo/ref/linux-vger/fs/inode.c fri jan 25 13:12:03 2002
Linux-Vger / FS / Inode.c Tue Jan 29 03:05:40 2002
@@ -144, 6 144, 7 @@
INIT_LIST_HEAD (& Inode-> i_Devices);
SEMA_INIT (& INODE-> i_SEM, 1);
SEMA_INIT (& INODE-> i_zombie, 1);
Init_rat_root (& Inode-> i_data.page_tree, gfp_atomic);
Spin_lock_init (& Inode-> i_data.i_shared_lock);
}
Diff -unr-xdontdiff /datenklo/ref/linux-vger/include/linux/fs.h Linux-vger / include / linux / fs.h
--- /Datenklo/ref/linux-vger/include/linux/fs.h fri jan 25 13:15:19 2002
Linux-Vger / include / linux / fs.h Tue Jan 29 03:05:40 2002
@@ -21, 6 21 ,7 @@
#include
#include
#include
# include
#include
#include
@@ -378, 6 379 ,7 @@
}
Struct address_space {
Struct Rat_Root Page_tree; / * Radix Tree Of All Pages * /
Struct List_Head Clean_pages; / * list of clean pages * /
Struct List_head dirty_pages; / * list of dirty pages * /
Struct list_head locked_pages; / * list of locked pages * /
Diff -unr-xdontdiff /datenklo/ref/linux-vger/include/linux/mm.h Linux-Vger / include / linux / mm.h
--- /Datenklo/ref/linux-vger/include/linux/mm.h WED JAN 16 21:49:08 2002 Linux-Vger / include / Linux / mm.h Tue Jan 29 03:05:40 2002
@@ -149, 15 149, 12 @@
Struct list_head list; / * -> mapping has some point lists. * /
Struct address_space * mapping; / * the inode (or ...) we belong to. * /
Unsigned long index; / * outset within mapping. * /
- struct page * next_hash; / * Next Page Sharing Our Hash Bucket in
- The pagecache hash table. * /
Atomic_t count; / * usage count, see bebelow. * /
Unsigned long flag; / * atomic flags, some possibly
Updated askRONOMSLY * /
Struct List_head Lru; / * Pageout List, Eg. Active_List;
protected by pagemap_lru_lock !! * /
WAIT_QUEUE_HEAD_T WAIT; / * Page Locked? Stand in line ... * /
- struct page ** pprev_hash; / * Complement to * Next_hash. * /
Struct buffer_head * buffers; / * buffer maps us to a disk block. * /
Void * Virtual; / * kernel Virtual Address (Null IF
NOT KMAPPED, IE. Highmem) * /
@@ -225, 9 222 ,8 @@
* Using the page-> list list_head. Thase Fields Are ALSO Used for
* Freeelist management (when page-> count == 0).
*
- * There is also a hash table maping (mapping, index) to the page
- * In Memory if presents. The list for this hash table.com
- * Page-> Next_hash and page-> pprev_hash.
* There is also a per-mapping radix tree maping index to the page
* in Memory if present. The tree is rooted at mapping-> root.
*
* All Process Pages CAN do I / O:
* - Inode Pages May Need to Be Read from Disk,
@@ -461, 6 457, 24 @@
Return 0;
}
static inline void add_page_to_inode_queue (struct address_space * mapping, struct point * page)
{
struct list_head * head = & mapping-> clean_pages;
mapping-> nrpages ;
LIST_ADD (& Page-> List, Head);
Page-> mapping = mapping;
}
Static Inline Void Remove_page_from_inode_queue (Struct Page * PAGE)
{
struct address_space * mapping = page-> mapping;
mapping-> nrpages--;
List_Del (& Page-> LIST);
Page-> mapping = null;
}
Struct zone_t;
/ * filemap.c * /
EXTERN VOID Remove_inode_page (struct page *);
Diff -unr-xdontdiff /datenklo/ref/linux-vger/include/linux/pagemap.h Linux-Vger / include / Linux / PageMap.h
--- /datenklo/ref/linux-vger/include/linux/pagemap.h mon Nov 12 18:19:18 2001
Linux-Vger / include / linux / PageMap.h Tue Jan 29 03:18:57 2002
@@ -41, 53 41, 22 @@
* /
#define Page_Cache_Entry (x) Virt_to_page (x)
-Extern unsigned int point_hash_bits;
- # Define Page_Hash_Bits (Page_Hash_BITS)
- # Define Page_hash_size (1 << Page_Hash_BITS)
-
-extern atomic_t page_cache_size; / * # of pages currently in the hash table * /
-Extern struct Page ** Page_hash_table;
-
-Extern void Page_cache_init (unsigned long);
-
- / *
- * We use a power-of-two hash Table to Avoid A Modulus,
- * And Get a reasonable hash by knowing rously ~
- * Inode Pointer and Indexes Are Distributed (IE, WE
- * Roughly Know Which Bits Are "Significant")
- *
- * for the time being it will work for struct address_space TOO (MOST OF
- * Them sitting inside the inodes. We mightow to change it.
- * /
-static inline unsigned long_page_hashfn (struct address_space * mapping, unsigned long index)
- {
- # Define I ((unsigned long) / (SIZEOF (STRUCT INODE) & ~ (Struct Inode - 1))) - # define s (x) ((x) ((x) >> Page_hash_bits))))
- RETURN S (I INDEX) & (Page_Hash_Size-1);
- # undef i
- # undef s
-}
-
- # Define Page_hash (Mapping, INDEX) (Page_Hash_Table _Page_Hashfn (Mapping, INDEX))
-
-Extern struct page * __find_get_page (struct address_space * mapping,
- unsigned long index, struct page ** hash;
- # Define Find_get_page (mapping, index) /
- __find_get_page (mapping, index, page_hash (mapping, index))
-Extern struct page * __find_lock_page (struct address_space * mapping,
- unsigned long index, struct page ** hash;
extern atomic_t page_cache_size; / * # of pages currently in the page cache * /
EXTERN STRUCT Page * Find_get_page (struct address_space * mapping,
unsigned long index;
EXTERN STRUCT PAGE * FIND_LOCK_PAGE (Struct Address_Space * mapping,
unsigned long index;
EXTERN STRUCT Page * FIND_TRYLOCK_PAGE (Struct Address_Space * mapping,
unsigned long index;
EXTERN STRUCT Page * Find_or_create_page (struct address_space * mapping,
Unsigned long index, unsigned int GFP_mask;
EXTERN INT ADD_TO_PAGE_CACHE (Struct Page * page, struct address_space * mapping,
unsigned long index;
Extern Void FastCall (Lock_page (struct page * page);
EXTERN VOID FastCall (unlock_page (struct page * page);
- # Define Find_lock_page (mapping, index) /
- __find_lock_page (Mapping, INDEX, Page_hash (Mapping, INDEX))
-Extern Struct Page * Find_TRYLOCK_PAGE (struct address_space *, unsigned long);
-
-Extern void add_to_page_cache (struct page * page, struct address_space * mapping, unsigned long index);
-extern void add_to_page_cache_locked (struct page * page, struct address_space * mapping, unsigned long index); - extern int add_to_page_cache_unique (struct page * page, struct address_space * mapping, unsigned long index, struct page ** hash);
Extern void ___ wait_on_page (struct page *);
Diff -unr-xdontdiff /datenklo/ref/linux-vger/include/linux/rat.h Linux-Vger / include / Linux / Rat.h
--- /Datenklo/ref/linux-vger/include/linux/rat.h THU Jan 1 01:00:00 1970
Linux-Vger / include / linux / Rat.h Tue Jan 29 03:05:40 2002
@@ -0, 0 1,26 @@
# ifndef _linux_rat_h
# define _linux_rat_h
struct rat_node;
# define rat_slot_reserved ((void *) ~ 0ul)
struct rat_root {
unsigned int height;
INT GFP_MASK;
struct rat_node * rnode;
};
# define init_rat_root (root, mask) /
DO {/
(root) -> height = 0; /
(root) -> GFP_MASK = (MASK); /
(root) -> RNODE = NULL; /
} while (0)
EXTERN INT RAT_RESERVE (Struct Rat_root *, Unsigned long, void ***);
extern int RAT_INSERT (Struct Rat_root *, Unsigned long, void *);
EXTERN VOID * RAT_LOOKUP (Struct Rat_Root *, unsigned long);
extern int R_Delete (Struct Rat_Root *, unsigned long);
# Endif / * _Linux_rat_h * /
Diff -unr-xdontdiff /datenklo/ref/linux-vger/include/linux/swap.h Linux-vger / include / linux / swap.h
--- /Datenklo/ref/linux-vger/include/linux/swap.h WED JAN 16 21:49:11 2002
Linux-Vger / include / linux / swap.h Tue Jan 29 03:05:40 2002
@@ -96, 7 96 ,7 @@
Struct Task_struct;
Struct vm_area_struct;
Struct sysinfo;
-
struct address_space;
Struct zone_t;
/ * Linux / mm / swap.c * / @@ -126, 6 126, 9 @@
Extern int add_to_swap_cache (Struct Page *, SWP_ENTRY_T);
EXTERN VOID __DELETE_FROM_SWAP_CACHE (Struct Page * Page);
Extern void delete_from_swap_cache (Struct Page * Page);
EXTERN INT MOVE_TO_SWAP_CACHE (Struct Page * Page, SWP_ENTRY_T ENTRY);
extern int move_from_swap_cache (Struct Page * page, unsigned long index,
struct address_space * mapping);
EXTERN VOID FREE_PAGE_AND_SWAP_CACHE (Struct Page * Page);
EXTERN STRUCT PAGE * LOOKUP_SWAP_CACHE (SWP_ENTRY_T);
EXTERN STRUCT PAGE * READ_SWAP_CACHE_ASYNC (SWP_ENTRY_T);
Diff -unr-xdontdiff /datenklo/ref/linux-vger/init/main.c Linux-vger / init / main.c
--- /Datenklo/ref/linux-vger/init/main.c fri jan 25 13:16:04 2002
Linux-Vger / init / main.c tue jan 29 03:05:40 2002
@@ -69, 6 69 ,7 @@
Extern Void SBUS_INIT (VOID);
Extern void sysctl_init (void);
EXTERN VOID SIGNALS_INIT (VOID);
EXTERN VOID RATCACHE_INIT (VOID) __INIT;
Extern void free_initmem (void);
@@ -377, 7 378, 7 @@
proc_caches_init ();
VFS_CACHES_INIT (MEMPAGES);
Buffer_init (Mempages);
- Page_Cache_init (Mempages);
Ratcache_init ();
#if Defined (config_arch_s390)
ccwcache_init ();
#ENDIF
Diff -unr-xdontdiff /datenklo/ref/linux-vger/kernel/ksyms.c Linux-vger / kernel / ksyms.c
--- /Datenklo/ref/linux-vger/kernel/ksyms.c fri jan 25 13:16:04 2002
Linux-Vger / kernel / ksyms.c Tue Jan 29 03:05:40 2002
@@ -218, 8 218, 6 @@
Export_symbol (generic_file_mmap);
Export_Symbol (generic_ro_fops);
EXPORT_SYMBOL (generic_buffer_fdataasync);
-Export_symbol (Page_Hash_BITS);
-EXPORT_SYMBOL (Page_Hash_Table);
Export_Symbol (file_lock_list);
EXPORT_SYMBOL (LOCKS_INIT_LOCK); Export_Symbol (LOCKS_COPY_LOCK);
@@ -254, 8 252, 8 @@
Export_symbol (__ polwait);
Export_Symbol (Poll_Freewait);
Export_symbol (root_dev);
-EXPORT_SYMBOL (__ find_get_page);
-EXPORT_SYMBOL (__find_lock_page);
Export_symbol (find_get_page);
Export_symbol (find_lock_page);
Export_Symbol (GRAB_CACHE_PAGE);
EXPORT_SYMBOL (GRAB_CACHE_PAGE_NOWAIT);
Export_Symbol (Read_Cache_page);
Datenklo / ref / linux-vger / lib / makefile Linux-vger / lib / makefile, V GER / LIB / MAKEFILE
--- / Datenklo / Ref / Linux-Vger / Lib / Makefile Wed Jan 16 21:49:17 2002
Linux-Vger / Lib / Makefile Tue Jan 29 03:05:40 2002
@@ -8, 9 8, 10 @@
L_target: = lib.a
-EXPORT-OBJS: = cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o crc32.o
export-objs: = cmdline.o dec_and_lock.o rwsem-spinlock.o rwsem.o crc32.o Rat.o
-obj-y: = errno.o ctype.o string.o vsprintf.o broock.o cmdline.o bust_spinlocks.o RBTree.o
OBJ-Y: = errno.o ctype.o string.o vsprintf.o broock.o cmdline.o /
Bust_Spinlocks.o Rbtree.o Rat.o
Obj - $ (config_rwsem_generic_spinlock) = rwsem-spinlock.o
Obj - $ (config_rwsem_xchgadd_algorithm) = rwsem.o
Diff -unr-xdontdiff /datenklo/ref/linux-vger/lib/rat.c Linux-vger / lib / Rat.c
--- /Datenklo/ref/linux-vger/lib/rat.c thu Jan 1 01:00:00 1970
Linux-Vger / Lib / Rat.c Tue Jan 29 03:05:40 2002
@@ -0, 0 1,294 @@
/ *
* CopyRight (C) 2001 Momchil Velikov
* Portions Copyright (C) 2001 Christoph Hellwig
*
* This Program is free software; you can redistribute it and / or
* modify it under the Terms of the gnu general public license as
* Published by the Free Software Foundation; Either Version 2, OR (At * Your Option) Any Later Version.
*
* This Program Is Distributed in The Hope That IT Will Be Useful, But
* WITHOUT Any Warranty; WITHOUT EVEN THE IMPLIED WARRANTY OF
* Merchantability or fitness for a particular purpose. See the gnu
* General public license for more details.
*
* You SHOULD HAVE RECEIVED A COPY OF THE GNU General Public License
* Along with this program; if not, Write to the Free Software
* Foundation, Inc., 675 Mass AVE, Cambridge, MA 02139, USA.
* /
# include
# include
# include
# include
# include
# include
# include
/ *
* Radix tree node definition.
* /
# define rat_map_shift 7
# define rat_map_size (1 ul << Rat_map_shift)
# define rat_map_mask (RAT_MAP_SIZE-1)
struct Rat_Node {
unsigned int count
VOID * slots [Rat_Map_size];
};
struct rat_path {
struct rat_node * node, ** slot;
};
# define rat_index_bits (8 / * char_bit * / * sizeof (unsigned long))
/ *
* Radix Tree Node Cache.
* /
# define pool_size 32
Static KMEM_CACHE_T * RATNODE_CACHEP;
Static Mempool_t * RatNode_Pool;
# define RatNode_alloc (root) /
Mempool_alloc (RatNode_Pool, (root) -> GFP_MASK)
# define RatNode_Free (Node) /
MEMPOOL_FREE ((Node), RatNode_Pool;
/ *
* Return The Maximum Key Which Can Be Store Into A * Radix Tree with Height Height.
* /
Static inline unsigned long Rat_maxindex (unsigned int hotht)
{
unsigned int TMP = height * rat_map_shift;
unsigned long index = (~ 0 ul >> (Rat_index_bits - tmp - 1) >> 1;
IF (TMP> = Rat_index_bits)
index = ~ 0ul;
Return INDEX;
}
/ *
* Extend A Radix Tree So It Can Store Key @Index.
* /
Static int RAT_EXTEND (STRUCT RAT_ROOT * ROOT, Unsigned Long Index)
{
struct rat_node * node;
unsigned int height;
/ * Figure out what the height shouth be. * /
height = root-> height 1;
while (index> RAT_MAXINDEX (HEIGHT))
height ;
IF (root-> rnode) {
DO {
IF (! (Node = RatNode_alloc (root))))
return -enomem;
/ * Increase the height. * /
node-> slots [0] = root-> rnode;
IF (root-> rnode)
node-> count = 1;
root-> rnode = node;
ROOT-> Height ;
} while (height> root-> height);
} else
root-> height = height;
Return 0;
}
/ **
* RAT_RESERVE - RESERVE SPACE IN A RADIX TREE
* @Root: Radix Tree root
* @index: Index Key
* @pslot: Pointer to Reserved Slot
*
* Reserve a slot in a Radix tree for the key @index.
* /
INT RAT_RESERVE (Struct Rat_Root * root, unsigned long index, void *** pslot)
{
struct rat_node * node = null, * TMP, ** slot
unsigned int Height, Shift; int error;
/ * Make sure the tree is high enough. * /
IF (INDEX> RAT_MAXINDEX (Root-> Height)) {
Error = Rat_extend (root, index);
if (error)
return error;
}
slot = & root-> rnode;
height = root-> height;
Shift = (HEIGHT-1) * RAT_MAP_SHIFT;
while (Height> 0) {
IF (* slot == null) {
/ * Have to Add a child node. * /
IF (! (tmp = RatNode_alloc (root))))
return -enomem;
* slot = TMP;
if (node)
Node-> COUNT ;
}
/ * Go a level down. * /
node = * slot;
slot = (Struct Rat_Node **)
(Node-> Slots (INDEX >> Shift) & Rat_map_mask);
Shift - = RAT_MAP_SHIFT;
height--
}
IF (* slot! = null)
return -eexist;
if (node)
Node-> COUNT ;
* pslot = (void **) Slot;
** pslot = rat_slot_reserved;
Return 0;
}
Export_symbol (Rat_Rserve);
/ **
* RAT_INSERT - INSERT INTO A RADIX TREE
* @Root: Radix Tree root
* @index: Index Key
* @Item: item to insert
*
* Insert An Item Into The Radix Tree At Position @index.
* /
int Rat_Insert (Struct Rat_Root * root, unsigned long index, void * iTEM)
{
void ** slot;
Int Error;
Error = Rat_Rserve (root, index, & slot);
if (! Error)
* slot = item;
return error;
}
Export_symbol (RAT_INSERT);
/ **
* RAT_LOOKUP - Perform Lookup Operation ON A Radix Tree
* @Root: Radix Tree root
* @index: Index Key
*
* Lookup Thetem at the position @index in the radix tree @root.
* /
VOID * RAT_LOOKUP (Struct Rat_Root * root, unsigned long index)
{
unsigned int Height, Shift
struct rat_node ** slot;
height = root-> height;
IF (INDEX> RAT_MAXINDEX (HEIGHT))
Return NULL;
Shift = (HEIGHT-1) * RAT_MAP_SHIFT;
slot = & root-> rnode;
while (Height> 0) {
IF (* slot == null)
Return NULL;
slot = (Struct Rat_Node **)
(* slot) -> Slots ((INDEX >> Shift) & rat_map_mask);
Shift - = RAT_MAP_SHIFT;
height--
}
return (void *) * slot;
}
Export_symbol (Rat_lookup);
/ **
* RAT_DELETE - Delete An Item from A Radix Tree
* @Root: Radix Tree root
* @index: Index Key
*
* Remove the item at @index from the radix tree rooted at @root.
* /
int RAT_DELETE (Struct Rat_Root * root, unsigned long index)
{
STRUCT RAT_PATH PATH [RAT_INDEX_BITS / RAT_MAP_SHIFT 2], * PATHP = PATH;
unsigned int Height, Shift
height = root-> height;
IF (INDEX> RAT_MAXINDEX (HEIGHT))
return -enoent;
Shift = (HEIGHT-1) * RAT_MAP_SHIFT;
PATHP-> node = null;
PATHP-> slot = & root-> rnode;
while (Height> 0) {
IF (* pathp-> slot == null)
return -enoent;
PATHP [1] .Node = * PATHP [0] .slot;
PATHP [1]. Slot = (Struct Rat_Node **)
(PATHP [1] .Node-> Slots ((index >> shift) & rat_map_mask);
PATHP ;
Shift - = RAT_MAP_SHIFT;
height--
}
IF (* PATHP [0] .slot == NULL)
return -enoent;
* PATHP [0] .slot = null;
while (PATHP [0] .Node &&pathp [0] .Node-> count == 0) {
PATHP -
* PATHP [0] .slot = null;
RATNODE_FREE (PATHP [1] .Node);
}
Return 0;
}
Export_symbol (rat_delete);
Static void RatNode_ctor (Void * Node, KMEM_CACHE_T * CACHEP, Unsigned Long Flags)
{
MEMSET (Node, 0, SIZEOF (Struct Rat_Node));
}
static void * RatNode_Pool_alloc (int GFP_MASK, VOID * DATA)
{
RETURN KMEM_CACHE_ALLOC (RatNode_Cachep, GFP_MASK);
}
Static void RatNode_Pool_Free (Void * Node, Void * Data)
{
KMEM_CACHE_FREE (RatNode_Cachep, Node);
}
VOID __INIT RATCACHE_INIT (VOID)
{
RATNODE_CACHEP = KMEM_CACHE_CREATE ("RatNode", Sizeof (Struct Rat_Node),
0, SLAB_HWCACHE_ALIGN, RATNODE_CTOR, NULL;
IF (! RatNode_Cachep)
PANIC ("Failed to Create RatNode Cache / N");
RATNODE_POOL = MEMPOOL_CREATE (pool_size, ratnode_pool_alloc,
RATNODE_POOL_FREE, NULL);
if (! RatNode_Pool)
PANIC ("Failed to Create RatNode Pool / N");
}
Diff -unr-xdontdiff /datenklo/ref/linux-vger/mm/filemap.c Linux-vger / mm / filemap.c
--- /Datenklo/ref/linux-vger/mm/filemasp.c fri jan 25 13:16:04 2002
Linux-Vger / mm / filemap.c Tue Jan 29 03:32:41 2002 @@ -44, 69 44, 16 @@
* /
Atomic_t page_cache_size = atomic_init (0);
-unsigned int point_hash_bits;
-Struct Page ** Page_hash_table;
-SPINLOCK_T PAGECHE_LOCK __CACHELINE_ALIGNED_IN_SMP = Spin_lock_unlocked;
/ *
- * NOTE: To Avoid Deadlocking You Must Never Acquire The PageMap_LRU_LOCK
- * with the pagecache_lock held.
- *
- * Ordering:
- * Swap_lock ->
- * PageMap_lru_lock ->
- * PAGECACHE_LOCK
* The deadlock-free ordering of lock acquisition is:
* PageMap_LRU_LOCK ==> mapping_lock
* /
Spinlock_t pagemap_lru_lock __cacheline_aligned_in_smp = spin_lock_unlocked;
#define cluster_pages (1 << Page_Cluster)
#define cluster_offset (x) (((x) >> Page_cluster << Page_cluster)
-static void fastcall (add_page_to_hash_queue (struct page * page);
-static void address_page_to_hash_queue (struct page * page, struct page ** p)
- {
- struct page * next = * p;
-
- * p = Page;
- Page-> Next_hash = next;
- Page-> pprev_hash = p;
- IF (Next)
- Next-> PPREV_HASH = & PAGE-> Next_hash;
- IF (page-> buffers)
- Page_Bug (PAGE);
- atomic_inc (& Page_Cache_size);
-}
-
-static inline void add_page_to_inode_queue (struct address_space * mapping, struct point * page)
- {
- struct list_head * head = & mapping-> clean_pages;
-
- Mapping-> NRPAGES ;
- List_add (& Page-> List, Head);
- Page-> mapping = mapping;
-}
-
-static inline void remove_page_from_inode_queue (struct page * page)
- {
- struct address_space * mapping = page-> mapping;
-
- Mapping-> NRPAGES -
- List_del (& page-> list);
- Page-> mapping = null;
-}
-
-static inline void remove_page_from_hash_queue (struct page * page)
- {
- Struct Page * Next = Page-> Next_hash;
- struct page ** pprev = page-> pprev_hash;
-
- IF (Next)
- Next-> pprev_hash = pprev;
- * pPREV = NEXT;
- Page-> pprev_hash = null;
- atomic_DEC (& Page_Cache_size);
-}
-
/ *
* Remove a page from the page cache and free it. Caller Has To Make
* Sure the page is locked and this number else uses it - or what usage
@@ -115, 18 62, 20 @@
Void __remove_inode_page (struct page * page)
{
IF (PageDIRTY (PAGE)) BUG ();
RAT_DELETE (& Page-> mapping-> point_tree, page-> index);
REMOVE_PAGE_FROM_INODE_QUEUE (PAGE);
- Remove_page_from_hash_queue (page);
atomic_DEC (& Page_Cache_size);
}
Void Remove_inode_page (Struct Page * Page)
{
struct address_space * mapping = page-> mapping;
IF (! PageLocked (Page))
Page_bug (page);
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
__remove_inode_page (page);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
}
Static Inline Int Sync_Page (Struct Page * Page)
@@ -147, 10 96, 10 @@
Struct address_space * mapping = page-> mapping;
IF (mapping) {
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
List_del (& Page-> List);
List_add (& Page-> List, & mapping-> dirty_pages);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
IF (mapping-> host)
Mark_inode_dirty_pages (mapping-> host);
@@ -170, 11 119, 12 @@
{
Struct List_Head * Head, * Curr;
Struct Page * Page;
struct address_space * mapping = inode-> i_mapping;
- Head = & inode-> i_mapping-> clean_pages;
Head = & mapping-> clean_pages;
Spin_lock (& PageMap_LRU_LOCK);
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Curr = head-> next;
While (curr! = head) {
@@ -205, 7 155 ,7 @@
CONTINUE;
}
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
Spin_unlock (& PageMap_LRU_LOCK);
}
@@ -244, 8 194, 9 @@
Page_cache_release (page);
}
-Static int foundcall (truncate_list_pages (struct list_head *, unsigned long, unsigned *);
-static int truncate_list_pages (struct list_head * head, unsigned long start, unsigned * partial)
Static int foundcall (truncate_list_pages (struct address_space *, struct list_head *, unsigned long, unsigned *);
Static int truncate_list_pages (struct address_space * mapping,
struct list_head * head, unsigned long start, unsigned * partial)
{
Struct List_head * Curr;
Struct Page * Page;
@@ -274, 7 225 ,7 @@
/ * Restart on this page * /
List_add (Head, Curr);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
unlocked = 1;
IF (! failed) {
@@ -295, 7 246 ,7 @@
Schedule ();
}
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Goto Restart;
}
Curr = curr-> prev;
@@ -319, 24 270, 28 @@
Unsigned Partial = LSTART & (PAGE_CACHE_SIZE - 1);
Int unlocked;
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Do {
- unlocked = truncate_list_pages (& mapping-> clean_pages, start, & partial); - unlocked | = truncate_list_pages (& mapping-> dirty_pages, start, & partial);
- unlocked | = truncate_list_pages (& mapping-> locked_pages, start, & partial);
unlocked = truncate_list_pages (mapping, & mapping-> clean_pages,
START, & Partial;
unlocked | = truncate_list_pages (mapping, & mapping-> dirty_pages,
START, & Partial;
unlocked | = truncate_list_pages (mapping, & mapping-> locked_pages,
START, & Partial;
} while (unlocked);
/ * Travesed All Three Lists without Dropping the lock * /
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
}
-static inline int invalidate_this_page2 (Struct Page * Page,
Static Inline Int invalidate_this_page2 (struct address_space * mapping,
Struct Page * Page,
Struct List_head * Curr,
Struct List_head * Head "STRUCT LIST_HEAD *
{
Int unlocked = 1;
/ *
- * The page is locked and we hold the pagecache_lock as well
* The page is locked and we hold the mapping lock as well
* SO Both Page_count (page) and page-> buffers stays constant here.
* /
IF (page_count (page) == 1 !! Page-> buffers) {
@@ -345, 7 300 ,7 @@
List_add_tail (Head, Curr);
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
TRUNCATE_COMPLETE_PAGE (PAGE);
} else {
IF (page-> buffers) {
@@ -354, 7 309 ,7 @@
List_add_tail (Head, Curr);
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
Block_INVALIDATE_PAGE (PAGE);
Else
unlocked = 0;
@@ -366, 8 321, 8 @@
Return unlocked;
}
-static int foundcall (invalidate_list_pages2);
-static int invalidate_list_pages2 (Struct List_Head * HEAD)
Static int foundcall (invalidate_list_pages2); Struct Address_Space *);
static int invalidate_list_pages2 (struct address_space * mapping, struct list_head * head)
{
Struct List_head * Curr;
Struct Page * Page;
@@ -381, 7 336 ,7 @@
IF (! TrylockPage (Page) {
INT __UNLOCKED;
- __Unlocked = invalidate_this_page2 (page, curr, head);
__unlocked = invalidate_this_page2 (Mapping, Page, Curr, Head)
UNLOCKPAGE (PAGE);
unlocked | = __UNLOCKED;
IF (! __ unlocked) {
@@ -394, 7 349 ,7 @@
List_add (Head, Curr);
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
unlocked = 1;
WAIT_ON_PAGE (PAGE);
}
@@ -405, 7 360 ,7 @@
Schedule ();
}
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Goto Restart;
}
Return unlocked;
@@ -420, 32 375, 13 @@
{
Int unlocked;
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Do {
- unlocked = invalidate_list_pages2 (& mapping-> clean_pages);
- unlocked | = invalidate_list_pages2 (& mapping-> dirty_pages);
- unlocked | = invalidate_list_pages2 (& maping-> locked_pages);
unlocked = invalidate_list_pages2 (mapping, & mapping-> clean_pages);
unlocked | = invalidate_list_pages2 (mapping, & mapping-> dirty_pages);
unlocked | = invalidate_list_pages2 (mapping, & mapping-> locked_pages);} while (unlocked);
- Spin_unlock (& PageCache_lock);
-}
-
-static inline struct page * __find_page_nolock (struct address_space * mapping, unsigned long offset, struct point * page)
- {
- goto inside;
-
- for (;;) {
- Page = Page-> Next_hash;
-INSIDE:
- IF (! Page)
- goto not_found;
- IF (page-> mapping! = mapping)
- Continue;
- IF (page-> index == offset)
- Break;
-}
-
-not_found:
- Return Page;
spin_unlock (& mapping-> i_shared_lock);
}
/ *
@@ -483, 13 419, 14 @@
Return Error;
}
-static int do_buffer_fdataasync (struct list_head * head, unsigned long start, unsigned long end, int (* fn) (Struct Page *))
Static int do_buffer_fdataasync (struct address_space * mapping,
STRUCT LIST_HEAD * HEAD, Unsigned long start, unsigned long end, int (* fn) (Struct Page *))
{
Struct List_head * Curr;
Struct Page * Page;
INT RETVAL = 0;
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Curr = head-> next;
While (curr! = head) {
Page = list_entry (Curr, Struct Page, List);
@@ -502, 7 439, 7 @@
CONTINUE;
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
LOCK_PAGE (PAGE);
/ * The buffers Could Have Been Free'd While Wei Waited for the page lock * /
@@ -510, 11 4447 ,11 @@
RETVAL | = FN (Page);
UNLOCKPAGE (PAGE);
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Curr = page-> list.next;
Page_cache_release (page);
}
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock); return retval;
}
@@ -525, 17 462, 18 @@
* /
INT generic_buffer_fdataasync (Struct Inode * Inode, unsigned long start_idx, unsigned long end_idx)
{
struct address_space * mapping = inode-> i_mapping;
int Retval;
/ * Writeout Dirty Buffers on Pages from Both Clean and Dirty Lists * /
- RETVAL = Do_buffer_fdataasync (& Inode-> i_mapping-> dirty_pages, start_idx, end_idx, writeout_one_page);
- RETVAL | = do_buffer_fdatasync (& Inode-> i_mapping-> clean_pages, start_idx, end_idx, writeout_one_page);
- RetVal | = do_buffer_fdataasync (& inode-> i_mapping-> locked_pages, start_idx, end_idx, writeout_one_page);
RETVAL = do_buffer_fdatasync (mapping, & mapping-> dirty_pages, start_idx, end_idx, writeout_one_page);
RETVAL | = do_buffer_fdatasync (mapping, & mapping-> clean_pages, start_idx, end_idx, writeout_one_page);
RETVAL | = do_buffer_fdatasync (mapping, & mapping-> locked_pages, start_idx, end_idx, writeout_one_page);
/ * NOW WAIT for Locked Buffers on Pages from Both Clean and Dirty Lists * /
- RETVAL | = do_buffer_fdataasync (& Inode-> i_mapping-> dirty_pages, start_idx, end_idx, waitfor_one_page);
- RETVAL | = do_buffer_fdatanessnc (& inode-> i_mapping-> clean_pages, start_idx, end_idx, waitfor_one_page);
- RETVAL | = do_buffer_fdatasynce (& inode-> i_mapping-> locked_pages, start_idx, end_idx, waitfor_one_page);
RETVAL | = do_buffer_fdataasync (mapping, & mapping-> dirty_pages, start_idx, end_idx, waitfor_one_page);
RETVAL | = do_buffer_fdataasync (mapping, & mapping-> clean_pages, start_idx, end_idx, waitfor_one_page);
RETVAL | = do_buffer_fdatasync (mapping, & mapping-> locked_pages, start_idx, end_idx, waitfor_one_page);
Return RetVal;
}
@@ -580, 7 518 ,7 @@
{
INT (* WRITEPAGE) (Struct Page *) = mapping-> a_ops-> writepage;
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
While (! list_empty (& mapping-> dirty_pages)) {
Struct Page * Page = list_entry (mapping-> dirty_pages.next, struct page, list);
@@ -592, 7 530, 7 @@
CONTINUE;
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
LOCK_PAGE (PAGE);
@@ -603, 9 541, 9 @@
UNLOCKPAGE (PAGE);
Page_cache_release (page);
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
}
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
}
/ **
@@ -617, 7 555 ,7 @@
* /
Void filemap_fdatawait (struct address_space * mapping)
{
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
While (! list_empty (& mapping-> locked_pages)) {
Struct Page * Page = list_entry (mapping-> locked_pages.next, struct page, list);
@@ -629, 83 567, 57 @@
CONTINUE;
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
___ wait_on_page (page);
Page_cache_release (page);
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
}
- Spin_unlock (& PageCache_lock);
-}
-
- / *
- * Add a page to the inode page cache.
- *
- * The caller must harn locked the page and
- * SET all the page flags correctly ..- * /
-void add_to_page_cache_locked (Struct Page * Page, Struct Address_Space * mapping, unsigned long index)
- {
- IF (! PageLocked (Page))
- bug ();
-
- Page-> index = index;
- Page_Cache_Get (PAGE);
- Spin_lock (& PageCache_lock);
- add_page_to_inode_queue (mapping, page);
- add_page_to_hash_queue (page, page_hash (mapping, index);
- Spin_unlock (& PageCache_lock);
-
- Lru_CACHE_ADD (PAGE);
spin_unlock (& mapping-> i_shared_lock);
}
/ *
* This Adds a page to the page cache, starting out as locked,
* Owned by US, But unreferenced, not uptodate and with no errors.
* /
-static inline void __add_to_page_cache (Struct Page * Page,
- struct address_space * mapping, unsigned long offset,
- struct page ** hash)
static int __add_to_page_cache (struct page * page, struct address_space * mapping,
unsigned long offset)
{
Unsigned long flag;
Int Error;
- Flags = Page-> Flags & ~ (1 << pg_uptodate | 1 << pg_error | 1 << pg_dirty | 1 << pg_referenced | 1 << pg_arch_1 | 1 << pg_checked);
- Page-> Flags = Flags | (1 << pg_locked);
Page_cache_get (PAGE);
IF ((Error = Rat_insert (& mapping-> Page_tree, Offset, Page))
goto fail;
Flags = Page-> Flags & ~ (1 << pg_uptodate | 1 << pg_error |
1 << pg_dirty | 1 << pg_referenced |
1 << pg_arch_1 | 1 << pg_checked);
Page-> Flags = Flags | (1 << pg_locked);
Page-> index = offset;
Add_page_to_inode_queue (mapping, page);
- add_page_to_hash_queue (Page, Hash);
-}
-void add_to_page_cache (Struct Page * Page, Struct Address_Space * Mapping, Unsigned Long Offset) - {
- Spin_lock (& PageCache_lock);
- __ADD_TO_PAGE_CACHE (Page, Mapping, Offset, Page_hash (Mapping, Offset);
- Spin_unlock (& PageCache_lock);
- Lru_CACHE_ADD (PAGE);
Atomic_inc (& Page_Cache_size);
Return 0;
Fail:
Page_Cache_Release (Page);
return error;
}
-INT add_to_page_cache_unique (Struct Page * Page,
- struct address_space * mapping, unsigned long offset,
- struct page ** hash)
int Add_to_page_cache (struct page * page, struct address_space * mapping,
unsigned long offset)
{
- int Err;
- Struct Page * alias;
-
- Spin_lock (& PageCache_lock);
- Alias = __find_page_nolock (mapping, offset, * hash);
-
- Err = 1;
- IF (! alias) {
- __ADD_TO_PAGE_CACHE (Page, Mapping, Offset, Hash);
- Err = 0;
-}
Int Error;
- Spin_unlock (& PageCache_lock);
- IF (! ERR)
- Lru_CACHE_ADD (PAGE);
Return Err;
spin_lock (& mapping-> i_shared_lock);
IF ((Error = __ADD_TO_PAGE_CACHE (PAGE, MAPPING, OFFSET)))
goto fail;
spin_unlock (& mapping-> i_shared_lock);
Lru_CACHE_ADD (PAGE);
Return 0;
Fail:
spin_unlock (& mapping-> i_shared_lock);
return -enomem;
}
/ *
@@ -716, 12 628, 12 @@
Static int point_cache_read (struct file * file, unsigned long offset)
{
Struct address_space * mapping = file-> f_dentry-> d_inode-> i_mapping;
- struct page ** hash = page_hash (mapping, offset);
Struct Page * Page;
Int Error;
- Spin_lock (& PageCache_lock);
- Page = __find_page_nolock (mapping, offset, * hash);
- Spin_unlock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock); Page = Rat_lookup (& mapping-> page_tree, offset);
spin_unlock (& mapping-> i_shared_lock);
IF (Page)
Return 0;
@@ -729, 11 641, 18 @@
IF (! Page)
Return -ENMEM;
- IF (! add_to_page_cache_unique (page, mapping, offset, haveh) {
- int error = mapping-> a_ops-> readpage (file, page);
while ((Error = add_to_page_cache (page, mapping, offset) == -ENMEM) {
/ * Yield for kswapd, and try again * /
__set_current_state (task_running);
Yield ();
}
if (! error) {
Error = mapping-> a_ops-> readpage (file, page);
Page_cache_release (page);
Return Error;
}
/ *
* We Arrive here in the unlikely Event thisone
* Raced with us and add our page to the cache first.
@@ -837, 8 756 ,7 @@
* a rather lightweight function, Finding and getting a reason to a
* Hashed Page Atomical.
* /
-Struct Page * __find_get_page (struct address_space * mapping,
- unsigned long offset, struct page ** hash)
Struct Page * Find_get_page (struct address_space * mapping, unsigned long offset)
{
Struct Page * Page;
@@ -846, 11 764 ,11 @@
* We scan the hash list read-only. Addition to and removal from WE
* The Hash-List Needs a helld write-lock.
* /
- Spin_lock (& PageCache_lock);
- Page = __find_page_nolock (mapping, offset, * hash);
spin_lock (& mapping-> i_shared_lock);
Page = Rat_lookup (& mapping-> page_tree, offset);
IF (Page)
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
Return Page;
}
@@ -860, 15 778, 14 @@ struct Page * Find_TRYLOCK_PAGE (Struct Address_Space * Mapping, unsigned long offset)
{
Struct Page * Page;
- struct page ** hash = page_hash (mapping, offset);
- Spin_lock (& PageCache_lock);
- Page = __find_page_nolock (mapping, offset, * hash);
spin_lock (& mapping-> i_shared_lock);
Page = Rat_lookup (& mapping-> page_tree, offset);
IF (Page) {
IF (TrylockPage (Page))
Page = NULL;
}
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
Return Page;
}
@@ -877, 9 794, 9 @@
* Will Return with it Held (But it is be dropped
* During Blocking Operations ..
* /
-static struct page * fastcall (_Find_lock_page_helper (struct address_space *, unsigned long, struct page *);
-static struct page * __find_lock_page_helper (struct address_space * mapping,
- unsigned long offset, struct page * hash)
Static struct page * fastcall (Find_lock_page_helper (struct address_space *, unsigned long);
Static struct page * find_lock_page_helper (struct address_space * mapping,
unsigned long offset)
{
Struct Page * Page;
@@ -888, 13 805, 13 @@
* The Hash-List Needs a helld write-lock.
* /
REPEAT:
- Page = __find_page_nolock (mapping, offset, hash);
Page = Rat_lookup (& mapping-> page_tree, offset);
IF (Page) {
Page_cache_get (PAGE);
IF (TrylockPage (Page) {
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
LOCK_PAGE (PAGE);
- Spin_lock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
/ * HAS THE PAGE BEEN RE-Allocated While WE SLEPT? * /
IF (page-> mapping! = mapping || Page-> index! = offset) {@@ -911, 39 828, 40 @@
* Same as The Above, But Lock The Page Too, Verifying That
* IT's Still Valid ONCE We OWN IT.
* /
-struct page * __find_lock_page (struct address_space * mapping,
- unsigned long offset, struct page ** hash)
STRUCT Page * Find_lock_page (struct address_space * mapping, unsigned long offset)
{
Struct Page * Page;
- Spin_lock (& PageCache_lock);
- Page = __find_lock_page_helper (mapping, offset, * hash);
- Spin_unlock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Page = Find_lock_page_helper (mapping, offset);
spin_unlock (& mapping-> i_shared_lock);
Return Page;
}
/ *
* Same as Above, But create the page if needd ..
* /
-Struct Page * Find_or_create_page (struct address_space * mapping, unsigned long index, unsigned int GFP_mask)
Struct Page * Find_or_create_page (struct address_space * mapping,
unsigned long index, unsigned int GFP_mask
{
Struct Page * Page;
- struct page ** Hash = Page_hash (mapping, index);
- Spin_lock (& PageCache_lock);
- Page = __find_lock_page_helper (mapping, index, * hash);
- Spin_unlock (& PageCache_lock);
spin_lock (& mapping-> i_shared_lock);
Page = find_lock_page_helper (mapping, index);
spin_unlock (& mapping-> i_shared_lock);
IF (! Page) {
Struct Page * newPage = Alloc_page (GFP_MASK);
IF (newpage) {
- Spin_lock (& PageCache_lock);
- Page = __find_lock_page_helper (mapping, index, * hash);
spin_lock (& mapping-> i_shared_lock);
Page = find_lock_page_helper (mapping, index);
IF (likely (! page) {
- = newpage; - __add_to_page_cache (Page, Mapping, INDEX, HASH);
- NewPage = NULL;
IF (! __ add_to_page_cache (newpage, mapping, index) {
Page = NewPage;
newPage = NULL;
}
}
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
IF (newpage == null)
LRU_CACHE_ADD (PAGE);
Else
@@ -970, 10 888, 9 @@
* /
Struct Page * GRAB_CACHE_PAGE_NOWAIT (Struct Address_Space * mapping, unsigned long index)
{
- Struct Page * Page, ** Hash;
Struct Page * Page;
- hash = Page_hash (mapping, index);
- Page = __find_get_page (mapping, index, hash);
Page = Find_get_page (mapping, index);
IF (Page) {
IF (! TrylockPage (Page) {
@@ -998, 7 915 ,7 @@
IF (unlikely (! page)
Return Null; / * Failed to Allocate a page * /
- IF (unlikely (add_to_page_cache_unique (page, mapping, index, hash)) {
IF (unlikely (add_to_page_cache (page, mapping, index)) {
/ * Someone else grabpased the page already. * /
Page_cache_release (page);
Return NULL;
@@ -1323, 7 1240 ,7 @@
}
For (;;) {
- Struct Page * Page, ** Hash;
Struct Page * Page;
Unsigned long end_index, nr, return;
End_index = inode-> i_size >> Page_Cache_shift;
@@ -1342, 15 1259, 14 @@
/ *
* Try to find the data in the page cache ..
* /
- hash = Page_hash (mapping, index);
- Spin_lock (& PageCache_lock);
- Page = __find_page_nolock (mapping, index, * hash);
spin_lock (& mapping-> i_shared_lock);
Page = Rat_lookup (& mapping-> Page_tree, index);
IF (! Page)
goto no_cached_page;
Found_page:
Page_cache_get (PAGE);
- Spin_unlock (& PageCache_lock); spin_unlock (& mapping-> i_shared_lock);
IF (! Page_uptodate (Page))
GTO PAGE_NOT_UP_TO_DATE;
@@ -1444, 7 1360 ,7 @@
* We get here with the page cache lock held.
* /
IF (! cached_page) {
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
Cached_page = Page_Cache_alloc (mapping);
IF (! cached_page) {
DESC-> Error = -Enomem;
@@ -1455, 8 1371 ,8 @@
* Somebody May Have Added The Page While WE
* Dropped the page cache lock. Check for That.
* /
- Spin_lock (& PageCache_lock);
- Page = __find_page_nolock (mapping, index, * hash);
spin_lock (& mapping-> i_shared_lock);
Page = Rat_lookup (& mapping-> Page_tree, index);
IF (Page)
Goto found_page;
}
@@ -1464, 9 1380, 13 @@
/ *
* Ok, add the new page to the hash-queues ...
* /
IF (__add_to_page_cache (cached_page, mapping, index) == -ENMEM) {
spin_unlock (& mapping-> i_shared_lock);
DESC-> Error = -enomem;
Break;
}
Page = cached_page;
- __ADD_TO_PAGE_CACHE (Page, Mapping, INDEX, HASH);
- Spin_unlock (& PageCache_lock);
spin_unlock (& mapping-> i_shared_lock);
LRU_CACHE_ADD (PAGE);
Cached_page = NULL;
@@ -1866, 7 1786 ,7 @@
Struct file * file = area-> vm_file;
Struct address_space * mapping = file-> f_dentry-> d_inode-> i_mapping;
Struct inode * inode = mapping-> host;
- Struct Page * Page, ** Hash;
Struct Page * Page;
Unsigned long size, pgoff, endoff;
PGOFF = (Address - area-> vm_start >> Page_cache_shift) Area-> VM_PGOFF; @@ -1888, 9 1808 ,8 @@
/ *
* Do WE HAVE SOMETHING IN THE PAGE CACHE ALREADY?
* /
- hash = Page_hash (mapping, pgoff);
Retry_find:
- Page = __find_get_page (mapping, pgoff, hash);
Page = Find_get_page (mapping, pgoff);
IF (! Page)
goto no_cached_page;
@@ -2575, 13 2494, 13 @@
{
Unsigned char present = 0;
Struct address_space * as = vma-> vm_file-> f_dentry-> d_inode-> i_mapping;
- Struct Page * Page, ** Hash = Page_hash (as, pgoff);
Struct Page * Page;
- Spin_lock (& PageCache_lock);
- Page = __find_page_nolock (as, pgoff, * hash);
spin_lock (& as-> i_shared_lock);
Page = Rat_lookup (& as-> Page_tree, PGOFF);
IF ((page) && (Page_uptodate (Page)))
Present = 1;
- Spin_unlock (& PageCache_lock);
spin_unlock (& as-> i_shared_lock);
Return Present;
}
@@ -2724, 20 2643, 24 @@
INT (* filler) (void *, struct page *),
Void * data)
{
- struct page ** Hash = Page_hash (mapping, index);
Struct Page * Page, * Cached_page = NULL;
Int Err;
REPEAT:
- Page = __find_get_page (mapping, index, hash);
Page = Find_get_page (mapping, index);
IF (! Page) {
IF (! cached_page) {
Cached_page = Page_Cache_alloc (mapping);
IF (! cached_page)
Return Err_Ptr (-Enomem);
}
- Page = cached_page;
- IF (add_to_page_cache_unique (page, mapping, index, hash)
Err = add_to_page_cache (cached_page, mapping, index);
IF (err == -eexist)
Goto repeat;
IF (Err <0) {
Page_Cache_RELEASE (Cached_page);
Return Err_Ptr (ERR); }
Page = Cached_page;
Cached_page = NULL;
Err = Filler (Data, Page);
IF (Err <0) {
@@ -2792, 19 2715, 23 @@
Static inline struct page * __grab_cache_page (struct address_space * mapping,
Unsigned long index, struct page ** cached_page)
{
- Struct Page * Page, ** Hash = Page_hash (Mapping, INDEX);
int ERR;
Struct Page * Page;
REPEAT:
- Page = __find_lock_page (mapping, index, hash);
Page = Find_lock_page (mapping, index);
IF (! Page) {
IF (! * cached_page) {
* Cached_page = Page_Cache_alloc (mapping);
IF (! * cached_page)
Return NULL;
}
- Page = * cached_page;
- IF (add_to_page_cache_unique (page, mapping, index, hash)
Err = add_to_page_cache (* Cached_page, mapping, index);
IF (err == -eexist)
Goto repeat;
- * cached_page = null;
IF (Err == 0) {
Page = * Cached_page;
* cached_page = null;
}
}
Return Page;
}
@@ -3064, 30 2991, 3 @@
Status = generic_osync_inode (inode, OSYNC_METADATA);
Goto Out_Status;
}
-
-void __init page_cache_init (unsigned long mempages)
- {
- unsigned long htable_size, order;
-
- htable_size = MEMPAGES;
- htable_size * = sizeof (struct page *);
- for (ORDER = 0; (Page_Size << Order) -; - - do { - unsigned long tmp = (page_size << order) / sizeof (struct page *); - - Page_hash_bits = 0; - while ((TMP >> = 1 uL)! = 0ul) - Page_hash_bits ; - - Page_Hash_Table = (Struct Page **) - __get_free_pages (GFP_ATOMIC, ORDER); -} while (page_hash_table == null && --order> 0); - Printk ("Page-Cache Hash Table Entries:% D (ORDER:% LD,% LD BYTES / N", - (1 << Page_Hash_Bits), Order, (Page_SIZE << Order); - IF (! Page_Hash_Table) - PANIC ("Failed to Allocate Page Hash Table / N"); - MEMSET ((void *) Page_hash_table, 0, page_hash_size * sizeof (struct page *)); -} Diff -unr-xdontdiff /datenklo/ref/linux-vger/mm/shmem.c Linux-Vger / mm / shMem.c --- /Datenklo/ref/linux-vger/mm/shmem.c fri jan 25 13:16:06 2002 Linux-Vger / mm / shMem.c Tue Jan 29 03:20:18 2002 @@ -366, 7 366 ,7 @@ SWP_ENTRY_T * PTR; UNSIGNED long IDX; Int offset - IDX = 0; Spin_lock (& Info-> Lock); OFFSET = shMem_clear_swp (entry, info-> i_direct, shmem_nr_direct); @@ -385, 11 385, 8 @@ Spin_unlock (& Info-> Lock); Return 0; Found: - delete_from_swap_cache (page); - add_to_page_cache (page, info-> vfs_inode.i_mapping, offset idx); - setPagedirty (Page); - setPageuptodate (Page); - Info-> swapped -; IF (! Move_from_swap_cache (page, offset idx, info-> vfs_inode.i_mapping)) Info-> swapped -; Spin_unlock (& Info-> Lock); Return 1; } @@ -426, 6 423 ,7 @@ Struct Address_Space * mapping; Unsigned long index; Struct inode * inode; Int Error; IF (! PageLocked (Page)) Bug (); @@ -438, 7 436, 6 @@ INFO = shMem_i (inode); IF (Info-> Locked) Return Fail_WritePage (Page); -getswap: SWAP = GET_SWAP_PAGE (); IF (! swap.val) Return Fail_WritePage (Page); @@ -451, 21 448, 12 @@ IF (entry-> val) Bug (); - / * REMOVE IT from the page cache * / - remove_inode_page (page); - Page_Cache_Release (PAGE); - - / * Add it to the swap cache * / - IF (add_to_swap_cache (page, swap)! = 0) { - / * - * Raced with "speculate" read_swap_cache_async. - * Add Page Back to Page Cache, Unref swap, Try Again. - * / - add_to_page_cache_locked (page, mapping, index); Error = Move_to_SWAP_CACHE (Page, SWAP); IF (error) { Spin_unlock (& Info-> Lock); SWAP_FREE (SWAP); - goto getswap; -} Return Fail_WritePage (Page); } * Entry = swap; INFO-> Swapped ; @@ -520, 8 508, 6 @@ Shmem_recal_inode (inode); IF (entry-> val) { - UNSIGNED Long Flags; - / * Look it Up and read it in .. * / Page = find_get_page (& swapper_space, entry-> VAL); IF (! Page) { @@ -546, 16 532, 15 @@ Goto repeat; } - / * We Have to this with page limited to prevent racing * / / * WE Have to do this with page limited to prevent races * / IF (TrylockPage (Page)) Goto WAIT_RETRY; IF (Move_From_Swap_Cache (Page, IDX, MApping)) goto Nomem_Retry; SWAP_FREE (* Entry); * Entry = (SWP_ENTRY_T) {0}; - delete_from_swap_cache (page); - Flags = page-> flags & ~ ((1 << pg_uptodate) | (1 << pg_error) | (1 << pg_referenced) | (1 << pg_arch_1)); - Page-> Flags = Flags | (1 << pg_dirty); - add_to_page_cache_locked (page, mapping, idx); INFO-> swapped -; Spin_unlock (& Info-> Lock); } else { @@ -579, 7 564 ,11 @@ Return Err_Ptr (-Enomem); Clear_Highpage (PAGE); Inode-> i_blocks = blocks_per_page; - add_to_page_cache (Page, Mapping, Idx); while (add_to_page_cache (page, mapping, idx) == -ENMEM) { / * yield for kswapd, and try again * / __set_current_state (task_running); Yield (); } } / * WE HAVE The Page * / @@ -594, 6 583, 16 @@ WAIT_ON_PAGE (PAGE); Page_cache_release (page); Goto repeat; NOMEM_RETRY: Spin_unlock (& Info-> Lock); UnlockPage (Page); Page_Cache_Release (Page); / * Yield for kswapd, and try again * / __set_current_state (task_running); Yield (); goto repeat; } Static int shmem_getpage (struct inode * inode, unsigned long idx, struct page ** PTR) Diff -unr-xdontdiff /datenklo/ref/linux-vger/mm/swap_state.c Linux-Vger / mm / swap_state.c --- /datenklo/ref/linux-vger/mm/swap_state.c mon Nov 12 18:19:54 2001 Linux-Vger / mm / swap_state.c Tue Jan 29 03:30:00 2002 @@ -14, 6 14 ,7 @@ #include #include #include # include #include @@ -37, 11 38, 12 @@ } struct address_space swapper_space = { - list_head_init (swapper_space.clean_pages), - List_head_init (swapper_space.dirty_pages), - list_head_init (swapper_space.locked_pages), - 0, / * NRPAGES * / - & swap_aops, Page_Tree: {0, GFP_ATOMIC, NULL}, Clean_pages: list_head_init (swapper_space.clean_pages), DIRTY_PAGES: LIST_HEAD_INIT (Swapper_space.dirty_pages), LOCKED_PAGES: LIST_HEAD_INIT (Swapper_Space.locked_pages), a_ops: & swap_aops, i_shared_lock: spin_lock_unlocked, } #ifdef swap_cache_info @@ -69, 17 71, 20 @@ Int add_to_swap_cache (Struct Page * Page, SWP_ENTRY_T ENTRY) { Int Error; IF (page-> mapping) Bug (); IF (! Swap_duplicate (entry) { INC_CACHE_INFO (NOENT_RACE); Return -enoent; } - IF (add_to_page_cache_unique (Page, & Swapper_Space, Entry.val, - Page_hash (& Swapper_Space, Entry.val))! = 0) { Error = add_to_page_cache (Page, & Swapper_space, entry.val); IF (error) { SWAP_FREE (Entry); INC_CACHE_INFO (EXIST_RACE); - Return -eexist; return error; } IF (! PageLocked (Page)) Bug (); @@ -121, 14 126, 100 @@ Entry.val = Page-> Index; - Spin_lock (& PageCache_lock); spin_lock (& swapper_space.i_shared_lock); __delete_from_swap_cache (page); - Spin_unlock (& PageCache_lock); spin_unlock (& swapper_space.i_shared_lock); SWAP_FREE (Entry); Page_cache_release (page); } INT MOVE_TO_SWAP_CACHE (STRUCT Page * Page, SWP_ENTRY_T Entry) { struct address_space * mapping = page-> mapping; void ** pslot; int ERR; if (! mapping) Bug (); IF (! swap_duplicate (entry) { Inc_cache_info (nont_race); return -enoent; } spin_lock (& swapper_space.i_shared_lock); spin_lock (& mapping-> i_shared_lock); Err = Rat_Rserve (& Swapper_Space.page_tree, entry.val, & pslot); IF (! ERR) { / * Remove it from the page cache * / __remove_inode_page (page); / * Add it to the swap cache * / * pslot = page; Page-> Flags = ((page-> flags & ~ (1 << pg_uptodate | 1 << pg_error | 1 << pg_dirty | 1 << pg_referenced | 1 << pg_arch_1 | 1 << pg_checked)) | (1 << pg_locked); Page-> index = entry.val; add_page_to_inode_queue (& Swapper_Space, Page); Atomic_inc (& Page_Cache_size); } spin_unlock (& mapping-> i_shared_lock); spin_unlock (& swapper_space.i_shared_lock); IF (! ERR) { Inc_cache_info (add_total); Return 0; } swap_free (entry); IF (err == -eexist) INC_CACHE_INFO (EXIST_RACE); Return Err; } INT MOVE_FROM_SWAP_CACHE (Struct Page * page, unsigned long index, struct address_space * mapping) { void ** pslot; int ERR; IF (! PageLocked (page)) Bug (); spin_lock (& swapper_space.i_shared_lock); spin_lock (& mapping-> i_shared_lock); Err = Rat_Rserve (& mapping-> page_tree, index, & pslot); IF (! ERR) { SWP_ENTRY_T ENTRY; block_flushpage (page, 0); Entry.val = page-> index; __DELETE_FROM_SWAP_CACHE (PAGE); swap_free (entry); * pslot = page; Page-> Flags = ((page-> flags & ~ (1 << pg_uptodate | 1 << pg_error | 1 << pg_referenced | 1 << pg_arch_1 | 1 << pg_checked)) | (1 << pg_dirty)); Page-> index = index; add_page_to_inode_queue (mapping, page); Atomic_inc (& Page_Cache_size); } spin_lock (& mapping-> i_shared_lock); spin_lock (& swapper_space.i_shared_lock); Return Err; } / * * Perform a free_page (), Also Freeing Any Swap Cache Associated with * this page if it is the laast user of the page. can not do a lock_page, Diff -unr-xdontdiff /datenklo/ref/linux-vger/mm/swapfile.c Linux-vger / mm / swapfile.c --- /Datenklo/ref/linux-vger/mm/swapfile.c fri jan 25 13:16:06 2002 Linux-Vger / mm / swapfile.c Tue Jan 29 03:05:40 2002 @@ -239, 10 239, 10 @@ / * Is The Only Swap Cache User the cache itself? * / IF (P-> Swap_map [SWP_OFFSET (Entry)] == 1) { / * Recheck the page count with the pagecache lock held.. * / - Spin_lock (& PageCache_lock); spin_lock (& swapper_space.i_shared_lock); IF (page_count (page) - !! Page-> Buffers == 2) RetVal = 1; - Spin_unlock (& PageCache_lock); spin_unlock (& swapper_space.i_shared_lock); } SWAP_INFO_PUT (P); } @@ -307, 13 307, 13 @@ Retval = 0; IF (P-> Swap_map [SWP_OFFSET (Entry)] == 1) { / * Recheck the page count with the pagecache lock held.. * / - Spin_lock (& PageCache_lock); spin_lock (& swapper_space.i_shared_lock); IF (page_count (page) - !! Page-> buffers == 2) { __delete_from_swap_cache (page); SetPageDIRTY (PAGE); RetVal = 1; } - Spin_unlock (& PageCache_lock); spin_unlock (& swapper_space.i_shared_lock); } SWAP_INFO_PUT (P); Diff -unr-xdontdiff /datenklo/ref/linux-vger/mm/vmscan.c Linux-Vger / mm / vmscan.c --- /datenklo/ref/linux-vger/mm/vmscan.c fri jan 25 13:16:06 2002 Linux-Vger / mm / vmscan.c Tue Jan 29 03:12:59 2002 @@ -137, 10 137, 16 @@ * (adding to the page cache will clear the dirty * And Uptodate Bits, SO We need to do it again) * / - IF (add_to_swap_cache (page, entry) == 0) { Switch (add_to_swap_cache (page, entry) { Case 0: SetPageuptodate (PAGE); SET_PAGE_DIRTY (PAGE); Goto set_swap_pte; Case -enomem: swap_free (entry); goto preserve; DEFAULT: Break; } / * Raced with "speculative" read_swap_cache_async * / SWAP_FREE (Entry); @@ -338, 6 344 ,7 @@ Static int shrink_cache (int NR_PAGES, ZONE_T * Classzone, Unsigned INT GFP_MASK, INT Priority) { Struct list_head * entry; struct address_space * mapping; INT MAX_SCAN = NR_INACTIVE_PAGES / PRIORITY INT MAX_MAPPED = NR_PAGES << (9 - priority); @@ -392, 7 399, 9 @@ CONTINUE; } - IF (PageDirty (Page) && is_page_cache_freeable (page) && page-> mapping) { mapping = page-> mapping; IF (PageDirty (Page) && is_page_cache_freeable (page) && mapping) { / * * IT is not critical here to write it onLix Write It Only IF * The page is unmapped beause any direct writer @@ -403, 7 412 ,7 @@ * / INT (* WRITEPAGE) (Struct Page *); - WritePage = Page-> mapping-> a_ops-> writepage; WritePage = mapping-> a_ops-> Writepage; IF ((GFP_MASK & __GFP_FS) && writepage) { ClearPageDirty (Page); SetPagelaunder (PAGE); @@ -430, 7 439 ,7 @@ Page_cache_get (PAGE); IF (Try_to_RELEASE_PAGE (PAGE, GFP_MASK)) { - IF (! Page-> mapping) { IF (! mapping) { / * * WE MUST Not ALOW An An Anon Page * with no buffers to be visible on @@ -467, 13 476, 22 @@ } } - Spin_lock (& PageCache_lock); / * * Page IS Locked, SO Mapping Can't Change Under Our * FEET. * / IF (! mapping) { UnlockPage (Page); goto points_mapped; } spin_lock (& mapping-> i_shared_lock); / * * this is the non-racy check for busy page. * / - IF (! page-> mapping ||! is_page_cache_freeable (page)) { - Spin_unlock (& PageCache_lock); IF (! is_page_cache_freeable (page) { spin_unlock (& mapping-> i_shared_lock); UNLOCKPAGE (PAGE); Page_mapped: IF (--MAX_MAPPED> = 0) @@ -493, 7 511 ,7 @@ * The page is freeable * so not in use by anybody. * / IF (PageDirty (Page) { - Spin_unlock (& PageCache_lock); spin_unlock (& mapping-> i_shared_lock); UNLOCKPAGE (PAGE); CONTINUE; } @@ -501, 12 519, 12 @@ / * Point of no return * / IF (likely (! pageswapcache (page)) { __remove_inode_page (page); - Spin_unlock (& PageCache_lock); spin_unlock (& mapping-> i_shared_lock); } else { SWP_ENTRY_T SWAP; Swap.val = page-> index; __delete_from_swap_cache (page); - Spin_unlock (& PageCache_lock); spin_unlock (& mapping-> i_shared_lock); SWAP_FREE (SWAP); } - TO unsubscribe from this list: send the line "unsubscribes Linux-kernel" in The body of a message to majordomo@vger.kernel.org More Majordomo Info At http://vger.kernel.org/majordomo-info.html Please read the faq at http://www.tux.org/lkml/ Next Message: Joachim Steiger: "Re: new photos from my party!" Previous message: Eli Carter: "Re: a modest proposal - we need a patch penguin" Next in thread: "Re: [patch] Radix-Tree PageCache for 2.5" Reply: Linus Torvalds: "Re: [patch] RADIX-Tree PageCache for 2.5" Reply: William Lee Irwin III: "Re: [Patch ] RADIX-TREE PageCache for 2.5 "Messages Sorted by: [Date] [THREAD] [SUBJECT] [Author] This Archive Was generated by Hypermail 2b29: Thu Jan 31 2002 - 21:01:04 Est