Skip to content

Commit 09cc9fc

Browse files
Dave ChinnerAl Viro
Dave Chinner
authored and
Al Viro
committed
inode: move to per-sb LRU locks
With the inode LRUs moving to per-sb structures, there is no longer a need for a global inode_lru_lock. The locking can be made more fine-grained by moving to a per-sb LRU lock, isolating the LRU operations of different filesytsems completely from each other. Signed-off-by: Dave Chinner <[email protected]> Signed-off-by: Al Viro <[email protected]>
1 parent 98b745c commit 09cc9fc

File tree

3 files changed

+16
-15
lines changed

3 files changed

+16
-15
lines changed

fs/inode.c

+13-14
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
*
3434
* inode->i_lock protects:
3535
* inode->i_state, inode->i_hash, __iget()
36-
* inode_lru_lock protects:
36+
* inode->i_sb->s_inode_lru_lock protects:
3737
* inode->i_sb->s_inode_lru, inode->i_lru
3838
* inode_sb_list_lock protects:
3939
* sb->s_inodes, inode->i_sb_list
@@ -46,7 +46,7 @@
4646
*
4747
* inode_sb_list_lock
4848
* inode->i_lock
49-
* inode_lru_lock
49+
* inode->i_sb->s_inode_lru_lock
5050
*
5151
* inode_wb_list_lock
5252
* inode->i_lock
@@ -64,8 +64,6 @@ static unsigned int i_hash_shift __read_mostly;
6464
static struct hlist_head *inode_hashtable __read_mostly;
6565
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
6666

67-
static DEFINE_SPINLOCK(inode_lru_lock);
68-
6967
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
7068
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
7169

@@ -342,24 +340,24 @@ EXPORT_SYMBOL(ihold);
342340

343341
static void inode_lru_list_add(struct inode *inode)
344342
{
345-
spin_lock(&inode_lru_lock);
343+
spin_lock(&inode->i_sb->s_inode_lru_lock);
346344
if (list_empty(&inode->i_lru)) {
347345
list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
348346
inode->i_sb->s_nr_inodes_unused++;
349347
this_cpu_inc(nr_unused);
350348
}
351-
spin_unlock(&inode_lru_lock);
349+
spin_unlock(&inode->i_sb->s_inode_lru_lock);
352350
}
353351

354352
static void inode_lru_list_del(struct inode *inode)
355353
{
356-
spin_lock(&inode_lru_lock);
354+
spin_lock(&inode->i_sb->s_inode_lru_lock);
357355
if (!list_empty(&inode->i_lru)) {
358356
list_del_init(&inode->i_lru);
359357
inode->i_sb->s_nr_inodes_unused--;
360358
this_cpu_dec(nr_unused);
361359
}
362-
spin_unlock(&inode_lru_lock);
360+
spin_unlock(&inode->i_sb->s_inode_lru_lock);
363361
}
364362

365363
/**
@@ -615,7 +613,8 @@ static int can_unuse(struct inode *inode)
615613

616614
/*
617615
* Scan `goal' inodes on the unused list for freeable ones. They are moved to a
618-
* temporary list and then are freed outside inode_lru_lock by dispose_list().
616+
* temporary list and then are freed outside sb->s_inode_lru_lock by
617+
* dispose_list().
619618
*
620619
* Any inodes which are pinned purely because of attached pagecache have their
621620
* pagecache removed. If the inode has metadata buffers attached to
@@ -635,7 +634,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
635634
int nr_scanned;
636635
unsigned long reap = 0;
637636

638-
spin_lock(&inode_lru_lock);
637+
spin_lock(&sb->s_inode_lru_lock);
639638
for (nr_scanned = *nr_to_scan; nr_scanned >= 0; nr_scanned--) {
640639
struct inode *inode;
641640

@@ -645,7 +644,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
645644
inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
646645

647646
/*
648-
* we are inverting the inode_lru_lock/inode->i_lock here,
647+
* we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
649648
* so use a trylock. If we fail to get the lock, just move the
650649
* inode to the back of the list so we don't spin on it.
651650
*/
@@ -677,12 +676,12 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
677676
if (inode_has_buffers(inode) || inode->i_data.nrpages) {
678677
__iget(inode);
679678
spin_unlock(&inode->i_lock);
680-
spin_unlock(&inode_lru_lock);
679+
spin_unlock(&sb->s_inode_lru_lock);
681680
if (remove_inode_buffers(inode))
682681
reap += invalidate_mapping_pages(&inode->i_data,
683682
0, -1);
684683
iput(inode);
685-
spin_lock(&inode_lru_lock);
684+
spin_lock(&sb->s_inode_lru_lock);
686685

687686
if (inode != list_entry(sb->s_inode_lru.next,
688687
struct inode, i_lru))
@@ -707,7 +706,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
707706
__count_vm_events(KSWAPD_INODESTEAL, reap);
708707
else
709708
__count_vm_events(PGINODESTEAL, reap);
710-
spin_unlock(&inode_lru_lock);
709+
spin_unlock(&sb->s_inode_lru_lock);
711710
*nr_to_scan = nr_scanned;
712711

713712
dispose_list(&freeable);

fs/super.c

+1
Original file line numberDiff line numberDiff line change
@@ -78,6 +78,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
7878
INIT_LIST_HEAD(&s->s_inodes);
7979
INIT_LIST_HEAD(&s->s_dentry_lru);
8080
INIT_LIST_HEAD(&s->s_inode_lru);
81+
spin_lock_init(&s->s_inode_lru_lock);
8182
init_rwsem(&s->s_umount);
8283
mutex_init(&s->s_lock);
8384
lockdep_set_class(&s->s_umount, &type->s_umount_key);

include/linux/fs.h

+2-1
Original file line numberDiff line numberDiff line change
@@ -1397,7 +1397,8 @@ struct super_block {
13971397
struct list_head s_dentry_lru; /* unused dentry lru */
13981398
int s_nr_dentry_unused; /* # of dentry on lru */
13991399

1400-
/* inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */
1400+
/* s_inode_lru_lock protects s_inode_lru and s_nr_inodes_unused */
1401+
spinlock_t s_inode_lru_lock ____cacheline_aligned_in_smp;
14011402
struct list_head s_inode_lru; /* unused inode lru */
14021403
int s_nr_inodes_unused; /* # of inodes on lru */
14031404

0 commit comments

Comments
 (0)