2222import drgn .helpers .linux .list as drgn_list
2323
2424import sdb
25+ from sdb .commands .internal import p2
26+ from sdb .commands .linux import linked_lists
2527from sdb .commands .linux .internal import slub_helpers as slub
2628
2729
28- def list_for_each_spl_kmem_cache () -> Iterable [drgn .Object ]:
30+ def for_each_spl_kmem_cache () -> Iterable [drgn .Object ]:
2931 yield from drgn_list .list_for_each_entry (
3032 "spl_kmem_cache_t" ,
3133 sdb .get_object ("spl_kmem_cache_list" ).address_of_ (), "skc_list" )
3234
3335
3436def backed_by_linux_cache (cache : drgn .Object ) -> bool :
35- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
37+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
3638 return int (cache .skc_linux_cache .value_ ()) != 0x0
3739
3840
3941def slab_name (cache : drgn .Object ) -> str :
40- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
42+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
4143 return str (cache .skc_name .string_ ().decode ('utf-8' ))
4244
4345
4446def nr_slabs (cache : drgn .Object ) -> int :
45- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
47+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
4648 return int (cache .skc_slab_total .value_ ())
4749
4850
4951def slab_alloc (cache : drgn .Object ) -> int :
50- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
52+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
5153 return int (cache .skc_slab_alloc .value_ ())
5254
5355
5456def slab_size (cache : drgn .Object ) -> int :
55- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
57+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
5658 return int (cache .skc_slab_size .value_ ())
5759
5860
5961def slab_linux_cache_source (cache : drgn .Object ) -> str :
60- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
62+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
6163 if not backed_by_linux_cache (cache ):
6264 name = slab_name (cache )
6365 subsystem = "SPL"
@@ -67,46 +69,49 @@ def slab_linux_cache_source(cache: drgn.Object) -> str:
6769 return f"{ name } [{ subsystem :4} ]"
6870
6971
70- def slab_flags (cache : drgn .Object ) -> str :
71- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
72+ def for_each_slab_flag_in_cache (cache : drgn .Object ) -> Iterable [ str ] :
73+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
7274 flag = cache .skc_flags .value_ ()
73- flags_detected = []
7475 for enum_entry , enum_entry_bit in cache .prog_ .type (
7576 'enum kmc_bit' ).enumerators :
7677 if flag & (1 << enum_entry_bit ):
77- flags_detected .append (enum_entry .replace ('_BIT' , '' ))
78- return '|' .join (flags_detected )
78+ yield enum_entry .replace ('_BIT' , '' )
79+
80+
81+ def slab_flags (cache : drgn .Object ) -> str :
82+ assert sdb .type_canonical_name (cache .type_ ) == 'struct spl_kmem_cache *'
83+ return '|' .join (for_each_slab_flag_in_cache (cache ))
7984
8085
8186def object_size (cache : drgn .Object ) -> int :
82- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
87+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
8388 return int (cache .skc_obj_size .value_ ())
8489
8590
8691def nr_objects (cache : drgn .Object ) -> int :
87- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
92+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
8893 if backed_by_linux_cache (cache ):
8994 return int (cache .skc_obj_alloc .value_ ())
9095 return int (cache .skc_obj_total .value_ ())
9196
9297
9398def obj_alloc (cache : drgn .Object ) -> int :
94- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
99+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
95100 return int (cache .skc_obj_alloc .value_ ())
96101
97102
98103def obj_inactive (cache : drgn .Object ) -> int :
99- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
104+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
100105 return nr_objects (cache ) - obj_alloc (cache )
101106
102107
103108def objs_per_slab (cache : drgn .Object ) -> int :
104- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
109+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
105110 return int (cache .skc_slab_objs .value_ ())
106111
107112
108113def entry_size (cache : drgn .Object ) -> int :
109- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
114+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
110115 if backed_by_linux_cache (cache ):
111116 return slub .entry_size (cache .skc_linux_cache )
112117 ops = objs_per_slab (cache )
@@ -116,20 +121,85 @@ def entry_size(cache: drgn.Object) -> int:
116121
117122
118123def active_memory (cache : drgn .Object ) -> int :
119- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
124+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
120125 return obj_alloc (cache ) * entry_size (cache )
121126
122127
123128def total_memory (cache : drgn .Object ) -> int :
124- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
129+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
125130 if backed_by_linux_cache (cache ):
126131 return slub .total_memory (cache .skc_linux_cache )
127132 return slab_size (cache ) * nr_slabs (cache )
128133
129134
130135def util (cache : drgn .Object ) -> int :
131- assert cache .type_ . type_name ( ) == 'spl_kmem_cache_t *'
136+ assert sdb . type_canonical_name ( cache .type_ ) == 'struct spl_kmem_cache *'
132137 total_mem = total_memory (cache )
133138 if total_mem == 0 :
134139 return 0
135140 return int ((active_memory (cache ) / total_mem ) * 100 )
141+
142+
143+ def sko_from_obj (cache : drgn .Object , obj : drgn .Object ) -> drgn .Object :
144+ assert sdb .type_canonical_name (cache .type_ ) == 'struct spl_kmem_cache *'
145+ cache_obj_align = cache .skc_obj_align .value_ ()
146+ return sdb .create_object (
147+ 'spl_kmem_obj_t *' ,
148+ obj .value_ () + p2 .p2roundup (object_size (cache ), cache_obj_align ))
149+
150+
151+ def spl_aligned_obj_size (cache : drgn .Object ) -> int :
152+ assert sdb .type_canonical_name (cache .type_ ) == 'struct spl_kmem_cache *'
153+ cache_obj_align = cache .skc_obj_align .value_ ()
154+ spl_obj_type_size = sdb .type_canonicalize_size ('spl_kmem_obj_t' )
155+ return p2 .p2roundup (object_size (cache ), cache_obj_align ) + p2 .p2roundup (
156+ spl_obj_type_size , cache_obj_align )
157+
158+
159+ def spl_aligned_slab_size (cache : drgn .Object ) -> int :
160+ assert sdb .type_canonical_name (cache .type_ ) == 'struct spl_kmem_cache *'
161+ cache_obj_align = cache .skc_obj_align .value_ ()
162+ spl_slab_type_size = sdb .type_canonicalize_size ('spl_kmem_slab_t' )
163+ return p2 .p2roundup (spl_slab_type_size , cache_obj_align )
164+
165+
166+ def for_each_onslab_object_in_slab (slab : drgn .Object ) -> Iterable [drgn .Object ]:
167+ assert sdb .type_canonical_name (slab .type_ ) == 'struct spl_kmem_slab *'
168+ cache = slab .sks_cache
169+ sks_size = spl_aligned_slab_size (cache )
170+ spl_obj_size = spl_aligned_obj_size (cache )
171+
172+ for i in range (slab .sks_objs .value_ ()):
173+ obj = sdb .create_object ('void *' ,
174+ slab .value_ () + sks_size + (i * spl_obj_size ))
175+ #
176+ # If the sko_list of the object is empty, it means that
177+ # this object is not part of the slab's internal free list
178+ # and therefore it is allocated. NOTE: sko_list in the
179+ # actual code is not a list, but a link on a list. Thus,
180+ # the check below is not checking whether the "object
181+ # list" is empty for this slab, but rather whether the
182+ # link is part of any list.
183+ #
184+ sko = sko_from_obj (cache , obj )
185+ assert sko .sko_magic .value_ () == 0x20202020 # SKO_MAGIC
186+ if linked_lists .is_list_empty (sko .sko_list ):
187+ yield obj
188+
189+
190+ def for_each_object_in_spl_cache (cache : drgn .Object ) -> Iterable [drgn .Object ]:
191+ assert sdb .type_canonical_name (cache .type_ ) == 'struct spl_kmem_cache *'
192+ #
193+ # ZFSonLinux initially implemented OFFSLAB caches for certain cases
194+ # that never showed up and thus have never been used in practice.
195+ # Ensure here that we are not looking at such a cache.
196+ #
197+ if 'KMC_OFFSLAB' in list (for_each_slab_flag_in_cache (cache )):
198+ raise sdb .CommandError ("spl_caches" ,
199+ "KMC_OFFSLAB caches are not supported" )
200+
201+ for slab_list in [cache .skc_complete_list , cache .skc_partial_list ]:
202+ for slab in drgn_list .list_for_each_entry ("spl_kmem_slab_t" ,
203+ slab_list .address_of_ (),
204+ "sks_list" ):
205+ yield from for_each_onslab_object_in_slab (slab )
0 commit comments