33
33
*
34
34
* inode->i_lock protects:
35
35
* inode->i_state, inode->i_hash, __iget()
36
- * inode_lru_lock protects:
36
+ * inode->i_sb->s_inode_lru_lock protects:
37
37
* inode->i_sb->s_inode_lru, inode->i_lru
38
38
* inode_sb_list_lock protects:
39
39
* sb->s_inodes, inode->i_sb_list
46
46
*
47
47
* inode_sb_list_lock
48
48
* inode->i_lock
49
- * inode_lru_lock
49
+ * inode->i_sb->s_inode_lru_lock
50
50
*
51
51
* inode_wb_list_lock
52
52
* inode->i_lock
@@ -64,8 +64,6 @@ static unsigned int i_hash_shift __read_mostly;
64
64
static struct hlist_head * inode_hashtable __read_mostly ;
65
65
static __cacheline_aligned_in_smp DEFINE_SPINLOCK (inode_hash_lock );
66
66
67
- static DEFINE_SPINLOCK (inode_lru_lock );
68
-
69
67
__cacheline_aligned_in_smp DEFINE_SPINLOCK (inode_sb_list_lock );
70
68
__cacheline_aligned_in_smp DEFINE_SPINLOCK (inode_wb_list_lock );
71
69
@@ -342,24 +340,24 @@ EXPORT_SYMBOL(ihold);
342
340
343
341
static void inode_lru_list_add (struct inode * inode )
344
342
{
345
- spin_lock (& inode_lru_lock );
343
+ spin_lock (& inode -> i_sb -> s_inode_lru_lock );
346
344
if (list_empty (& inode -> i_lru )) {
347
345
list_add (& inode -> i_lru , & inode -> i_sb -> s_inode_lru );
348
346
inode -> i_sb -> s_nr_inodes_unused ++ ;
349
347
this_cpu_inc (nr_unused );
350
348
}
351
- spin_unlock (& inode_lru_lock );
349
+ spin_unlock (& inode -> i_sb -> s_inode_lru_lock );
352
350
}
353
351
354
352
static void inode_lru_list_del (struct inode * inode )
355
353
{
356
- spin_lock (& inode_lru_lock );
354
+ spin_lock (& inode -> i_sb -> s_inode_lru_lock );
357
355
if (!list_empty (& inode -> i_lru )) {
358
356
list_del_init (& inode -> i_lru );
359
357
inode -> i_sb -> s_nr_inodes_unused -- ;
360
358
this_cpu_dec (nr_unused );
361
359
}
362
- spin_unlock (& inode_lru_lock );
360
+ spin_unlock (& inode -> i_sb -> s_inode_lru_lock );
363
361
}
364
362
365
363
/**
@@ -615,7 +613,8 @@ static int can_unuse(struct inode *inode)
615
613
616
614
/*
617
615
* Scan `goal' inodes on the unused list for freeable ones. They are moved to a
618
- * temporary list and then are freed outside inode_lru_lock by dispose_list().
616
+ * temporary list and then are freed outside sb->s_inode_lru_lock by
617
+ * dispose_list().
619
618
*
620
619
* Any inodes which are pinned purely because of attached pagecache have their
621
620
* pagecache removed. If the inode has metadata buffers attached to
@@ -635,7 +634,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
635
634
int nr_scanned ;
636
635
unsigned long reap = 0 ;
637
636
638
- spin_lock (& inode_lru_lock );
637
+ spin_lock (& sb -> s_inode_lru_lock );
639
638
for (nr_scanned = * nr_to_scan ; nr_scanned >= 0 ; nr_scanned -- ) {
640
639
struct inode * inode ;
641
640
@@ -645,7 +644,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
645
644
inode = list_entry (sb -> s_inode_lru .prev , struct inode , i_lru );
646
645
647
646
/*
648
- * we are inverting the inode_lru_lock /inode->i_lock here,
647
+ * we are inverting the sb->s_inode_lru_lock /inode->i_lock here,
649
648
* so use a trylock. If we fail to get the lock, just move the
650
649
* inode to the back of the list so we don't spin on it.
651
650
*/
@@ -677,12 +676,12 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
677
676
if (inode_has_buffers (inode ) || inode -> i_data .nrpages ) {
678
677
__iget (inode );
679
678
spin_unlock (& inode -> i_lock );
680
- spin_unlock (& inode_lru_lock );
679
+ spin_unlock (& sb -> s_inode_lru_lock );
681
680
if (remove_inode_buffers (inode ))
682
681
reap += invalidate_mapping_pages (& inode -> i_data ,
683
682
0 , -1 );
684
683
iput (inode );
685
- spin_lock (& inode_lru_lock );
684
+ spin_lock (& sb -> s_inode_lru_lock );
686
685
687
686
if (inode != list_entry (sb -> s_inode_lru .next ,
688
687
struct inode , i_lru ))
@@ -707,7 +706,7 @@ static void shrink_icache_sb(struct super_block *sb, int *nr_to_scan)
707
706
__count_vm_events (KSWAPD_INODESTEAL , reap );
708
707
else
709
708
__count_vm_events (PGINODESTEAL , reap );
710
- spin_unlock (& inode_lru_lock );
709
+ spin_unlock (& sb -> s_inode_lru_lock );
711
710
* nr_to_scan = nr_scanned ;
712
711
713
712
dispose_list (& freeable );
0 commit comments