@@ -103,38 +103,71 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
103
103
}
104
104
EXPORT_SYMBOL (blkdev_issue_discard );
105
105
106
- static int __blkdev_issue_write_zeroes (struct block_device * bdev ,
107
- sector_t sector , sector_t nr_sects , gfp_t gfp_mask ,
108
- struct bio * * biop , unsigned flags )
106
+ static sector_t bio_write_zeroes_limit (struct block_device * bdev )
109
107
{
110
- struct bio * bio = * biop ;
111
- unsigned int max_sectors ;
112
-
113
- if (bdev_read_only (bdev ))
114
- return - EPERM ;
115
-
116
- /* Ensure that max_sectors doesn't overflow bi_size */
117
- max_sectors = bdev_write_zeroes_sectors (bdev );
108
+ sector_t bs_mask = (bdev_logical_block_size (bdev ) >> 9 ) - 1 ;
118
109
119
- if (max_sectors == 0 )
120
- return - EOPNOTSUPP ;
110
+ return min (bdev_write_zeroes_sectors (bdev ),
111
+ (UINT_MAX >> SECTOR_SHIFT ) & ~bs_mask );
112
+ }
121
113
114
+ static void __blkdev_issue_write_zeroes (struct block_device * bdev ,
115
+ sector_t sector , sector_t nr_sects , gfp_t gfp_mask ,
116
+ struct bio * * biop , unsigned flags )
117
+ {
122
118
while (nr_sects ) {
123
- unsigned int len = min_t (sector_t , nr_sects , max_sectors );
119
+ unsigned int len = min_t (sector_t , nr_sects ,
120
+ bio_write_zeroes_limit (bdev ));
121
+ struct bio * bio ;
122
+
123
+ if ((flags & BLKDEV_ZERO_KILLABLE ) &&
124
+ fatal_signal_pending (current ))
125
+ break ;
124
126
125
- bio = blk_next_bio ( bio , bdev , 0 , REQ_OP_WRITE_ZEROES , gfp_mask );
127
+ bio = bio_alloc ( bdev , 0 , REQ_OP_WRITE_ZEROES , gfp_mask );
126
128
bio -> bi_iter .bi_sector = sector ;
127
129
if (flags & BLKDEV_ZERO_NOUNMAP )
128
130
bio -> bi_opf |= REQ_NOUNMAP ;
129
131
130
132
bio -> bi_iter .bi_size = len << SECTOR_SHIFT ;
133
+ * biop = bio_chain_and_submit (* biop , bio );
134
+
131
135
nr_sects -= len ;
132
136
sector += len ;
133
137
cond_resched ();
134
138
}
139
+ }
135
140
136
- * biop = bio ;
137
- return 0 ;
141
+ static int blkdev_issue_write_zeroes (struct block_device * bdev , sector_t sector ,
142
+ sector_t nr_sects , gfp_t gfp , unsigned flags )
143
+ {
144
+ struct bio * bio = NULL ;
145
+ struct blk_plug plug ;
146
+ int ret = 0 ;
147
+
148
+ blk_start_plug (& plug );
149
+ __blkdev_issue_write_zeroes (bdev , sector , nr_sects , gfp , & bio , flags );
150
+ if (bio ) {
151
+ if ((flags & BLKDEV_ZERO_KILLABLE ) &&
152
+ fatal_signal_pending (current )) {
153
+ bio_await_chain (bio );
154
+ blk_finish_plug (& plug );
155
+ return - EINTR ;
156
+ }
157
+ ret = submit_bio_wait (bio );
158
+ bio_put (bio );
159
+ }
160
+ blk_finish_plug (& plug );
161
+
162
+ /*
163
+ * For some devices there is no non-destructive way to verify whether
164
+ * WRITE ZEROES is actually supported. These will clear the capability
165
+ * on an I/O error, in which case we'll turn any error into
166
+ * "not supported" here.
167
+ */
168
+ if (ret && !bdev_write_zeroes_sectors (bdev ))
169
+ return - EOPNOTSUPP ;
170
+ return ret ;
138
171
}
139
172
140
173
/*
@@ -150,35 +183,63 @@ static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
150
183
return min (pages , (sector_t )BIO_MAX_VECS );
151
184
}
152
185
153
- static int __blkdev_issue_zero_pages (struct block_device * bdev ,
186
+ static void __blkdev_issue_zero_pages (struct block_device * bdev ,
154
187
sector_t sector , sector_t nr_sects , gfp_t gfp_mask ,
155
- struct bio * * biop )
188
+ struct bio * * biop , unsigned int flags )
156
189
{
157
- struct bio * bio = * biop ;
158
- int bi_size = 0 ;
159
- unsigned int sz ;
160
-
161
- if (bdev_read_only (bdev ))
162
- return - EPERM ;
190
+ while (nr_sects ) {
191
+ unsigned int nr_vecs = __blkdev_sectors_to_bio_pages (nr_sects );
192
+ struct bio * bio ;
163
193
164
- while (nr_sects != 0 ) {
165
- bio = blk_next_bio (bio , bdev , __blkdev_sectors_to_bio_pages (nr_sects ),
166
- REQ_OP_WRITE , gfp_mask );
194
+ bio = bio_alloc (bdev , nr_vecs , REQ_OP_WRITE , gfp_mask );
167
195
bio -> bi_iter .bi_sector = sector ;
168
196
169
- while (nr_sects != 0 ) {
170
- sz = min ((sector_t ) PAGE_SIZE , nr_sects << 9 );
171
- bi_size = bio_add_page (bio , ZERO_PAGE (0 ), sz , 0 );
172
- nr_sects -= bi_size >> 9 ;
173
- sector += bi_size >> 9 ;
174
- if (bi_size < sz )
197
+ if ((flags & BLKDEV_ZERO_KILLABLE ) &&
198
+ fatal_signal_pending (current ))
199
+ break ;
200
+
201
+ do {
202
+ unsigned int len , added ;
203
+
204
+ len = min_t (sector_t ,
205
+ PAGE_SIZE , nr_sects << SECTOR_SHIFT );
206
+ added = bio_add_page (bio , ZERO_PAGE (0 ), len , 0 );
207
+ if (added < len )
175
208
break ;
176
- }
209
+ nr_sects -= added >> SECTOR_SHIFT ;
210
+ sector += added >> SECTOR_SHIFT ;
211
+ } while (nr_sects );
212
+
213
+ * biop = bio_chain_and_submit (* biop , bio );
177
214
cond_resched ();
178
215
}
216
+ }
179
217
180
- * biop = bio ;
181
- return 0 ;
218
+ static int blkdev_issue_zero_pages (struct block_device * bdev , sector_t sector ,
219
+ sector_t nr_sects , gfp_t gfp , unsigned flags )
220
+ {
221
+ struct bio * bio = NULL ;
222
+ struct blk_plug plug ;
223
+ int ret = 0 ;
224
+
225
+ if (flags & BLKDEV_ZERO_NOFALLBACK )
226
+ return - EOPNOTSUPP ;
227
+
228
+ blk_start_plug (& plug );
229
+ __blkdev_issue_zero_pages (bdev , sector , nr_sects , gfp , & bio , flags );
230
+ if (bio ) {
231
+ if ((flags & BLKDEV_ZERO_KILLABLE ) &&
232
+ fatal_signal_pending (current )) {
233
+ bio_await_chain (bio );
234
+ blk_finish_plug (& plug );
235
+ return - EINTR ;
236
+ }
237
+ ret = submit_bio_wait (bio );
238
+ bio_put (bio );
239
+ }
240
+ blk_finish_plug (& plug );
241
+
242
+ return ret ;
182
243
}
183
244
184
245
/**
@@ -204,20 +265,19 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
204
265
sector_t nr_sects , gfp_t gfp_mask , struct bio * * biop ,
205
266
unsigned flags )
206
267
{
207
- int ret ;
208
- sector_t bs_mask ;
209
-
210
- bs_mask = (bdev_logical_block_size (bdev ) >> 9 ) - 1 ;
211
- if ((sector | nr_sects ) & bs_mask )
212
- return - EINVAL ;
213
-
214
- ret = __blkdev_issue_write_zeroes (bdev , sector , nr_sects , gfp_mask ,
215
- biop , flags );
216
- if (ret != - EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK ))
217
- return ret ;
268
+ if (bdev_read_only (bdev ))
269
+ return - EPERM ;
218
270
219
- return __blkdev_issue_zero_pages (bdev , sector , nr_sects , gfp_mask ,
220
- biop );
271
+ if (bdev_write_zeroes_sectors (bdev )) {
272
+ __blkdev_issue_write_zeroes (bdev , sector , nr_sects ,
273
+ gfp_mask , biop , flags );
274
+ } else {
275
+ if (flags & BLKDEV_ZERO_NOFALLBACK )
276
+ return - EOPNOTSUPP ;
277
+ __blkdev_issue_zero_pages (bdev , sector , nr_sects , gfp_mask ,
278
+ biop , flags );
279
+ }
280
+ return 0 ;
221
281
}
222
282
EXPORT_SYMBOL (__blkdev_issue_zeroout );
223
283
@@ -237,51 +297,21 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout);
237
297
int blkdev_issue_zeroout (struct block_device * bdev , sector_t sector ,
238
298
sector_t nr_sects , gfp_t gfp_mask , unsigned flags )
239
299
{
240
- int ret = 0 ;
241
- sector_t bs_mask ;
242
- struct bio * bio ;
243
- struct blk_plug plug ;
244
- bool try_write_zeroes = !!bdev_write_zeroes_sectors (bdev );
300
+ int ret ;
245
301
246
- bs_mask = (bdev_logical_block_size (bdev ) >> 9 ) - 1 ;
247
- if ((sector | nr_sects ) & bs_mask )
302
+ if ((sector | nr_sects ) & ((bdev_logical_block_size (bdev ) >> 9 ) - 1 ))
248
303
return - EINVAL ;
304
+ if (bdev_read_only (bdev ))
305
+ return - EPERM ;
249
306
250
- retry :
251
- bio = NULL ;
252
- blk_start_plug (& plug );
253
- if (try_write_zeroes ) {
254
- ret = __blkdev_issue_write_zeroes (bdev , sector , nr_sects ,
255
- gfp_mask , & bio , flags );
256
- } else if (!(flags & BLKDEV_ZERO_NOFALLBACK )) {
257
- ret = __blkdev_issue_zero_pages (bdev , sector , nr_sects ,
258
- gfp_mask , & bio );
259
- } else {
260
- /* No zeroing offload support */
261
- ret = - EOPNOTSUPP ;
262
- }
263
- if (ret == 0 && bio ) {
264
- ret = submit_bio_wait (bio );
265
- bio_put (bio );
266
- }
267
- blk_finish_plug (& plug );
268
- if (ret && try_write_zeroes ) {
269
- if (!(flags & BLKDEV_ZERO_NOFALLBACK )) {
270
- try_write_zeroes = false;
271
- goto retry ;
272
- }
273
- if (!bdev_write_zeroes_sectors (bdev )) {
274
- /*
275
- * Zeroing offload support was indicated, but the
276
- * device reported ILLEGAL REQUEST (for some devices
277
- * there is no non-destructive way to verify whether
278
- * WRITE ZEROES is actually supported).
279
- */
280
- ret = - EOPNOTSUPP ;
281
- }
307
+ if (bdev_write_zeroes_sectors (bdev )) {
308
+ ret = blkdev_issue_write_zeroes (bdev , sector , nr_sects ,
309
+ gfp_mask , flags );
310
+ if (ret != - EOPNOTSUPP )
311
+ return ret ;
282
312
}
283
313
284
- return ret ;
314
+ return blkdev_issue_zero_pages ( bdev , sector , nr_sects , gfp_mask , flags ) ;
285
315
}
286
316
EXPORT_SYMBOL (blkdev_issue_zeroout );
287
317
0 commit comments