|
1 | 1 | use crate::cmp;
|
2 | 2 | use crate::fmt;
|
3 | 3 | use crate::io::{
|
4 |
| - self, BufRead, Initializer, IoSliceMut, Read, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE, |
| 4 | + self, BufRead, IoSliceMut, Read, ReadBuf, Seek, SeekFrom, SizeHint, DEFAULT_BUF_SIZE, |
5 | 5 | };
|
| 6 | +use crate::mem::MaybeUninit; |
6 | 7 |
|
7 | 8 | /// The `BufReader<R>` struct adds buffering to any reader.
|
8 | 9 | ///
|
@@ -47,9 +48,10 @@ use crate::io::{
|
47 | 48 | #[stable(feature = "rust1", since = "1.0.0")]
|
48 | 49 | pub struct BufReader<R> {
|
49 | 50 | inner: R,
|
50 |
| - buf: Box<[u8]>, |
| 51 | + buf: Box<[MaybeUninit<u8>]>, |
51 | 52 | pos: usize,
|
52 | 53 | cap: usize,
|
| 54 | + init: usize, |
53 | 55 | }
|
54 | 56 |
|
55 | 57 | impl<R: Read> BufReader<R> {
|
@@ -91,11 +93,8 @@ impl<R: Read> BufReader<R> {
|
91 | 93 | /// ```
|
92 | 94 | #[stable(feature = "rust1", since = "1.0.0")]
|
93 | 95 | pub fn with_capacity(capacity: usize, inner: R) -> BufReader<R> {
|
94 |
| - unsafe { |
95 |
| - let mut buf = Box::new_uninit_slice(capacity).assume_init(); |
96 |
| - inner.initializer().initialize(&mut buf); |
97 |
| - BufReader { inner, buf, pos: 0, cap: 0 } |
98 |
| - } |
| 96 | + let buf = Box::new_uninit_slice(capacity); |
| 97 | + BufReader { inner, buf, pos: 0, cap: 0, init: 0 } |
99 | 98 | }
|
100 | 99 | }
|
101 | 100 |
|
@@ -171,7 +170,8 @@ impl<R> BufReader<R> {
|
171 | 170 | /// ```
|
172 | 171 | #[stable(feature = "bufreader_buffer", since = "1.37.0")]
|
173 | 172 | pub fn buffer(&self) -> &[u8] {
|
174 |
| - &self.buf[self.pos..self.cap] |
| 173 | + // SAFETY: self.cap is always <= self.init, so self.buf[self.pos..self.cap] is always init |
| 174 | + unsafe { MaybeUninit::slice_assume_init_ref(&self.buf[self.pos..self.cap]) } |
175 | 175 | }
|
176 | 176 |
|
177 | 177 | /// Returns the number of bytes the internal buffer can hold at once.
|
@@ -271,6 +271,25 @@ impl<R: Read> Read for BufReader<R> {
|
271 | 271 | Ok(nread)
|
272 | 272 | }
|
273 | 273 |
|
| 274 | + fn read_buf(&mut self, buf: &mut ReadBuf<'_>) -> io::Result<()> { |
| 275 | + // If we don't have any buffered data and we're doing a massive read |
| 276 | + // (larger than our internal buffer), bypass our internal buffer |
| 277 | + // entirely. |
| 278 | + if self.pos == self.cap && buf.remaining() >= self.buf.len() { |
| 279 | + self.discard_buffer(); |
| 280 | + return self.inner.read_buf(buf); |
| 281 | + } |
| 282 | + |
| 283 | + let prev = buf.filled_len(); |
| 284 | + |
| 285 | + let mut rem = self.fill_buf()?; |
| 286 | + rem.read_buf(buf)?; |
| 287 | + |
| 288 | + self.consume(buf.filled_len() - prev); //slice impl of read_buf known to never unfill buf |
| 289 | + |
| 290 | + Ok(()) |
| 291 | + } |
| 292 | + |
274 | 293 | // Small read_exacts from a BufReader are extremely common when used with a deserializer.
|
275 | 294 | // The default implementation calls read in a loop, which results in surprisingly poor code
|
276 | 295 | // generation for the common path where the buffer has enough bytes to fill the passed-in
|
@@ -303,16 +322,11 @@ impl<R: Read> Read for BufReader<R> {
|
303 | 322 | self.inner.is_read_vectored()
|
304 | 323 | }
|
305 | 324 |
|
306 |
| - // we can't skip unconditionally because of the large buffer case in read. |
307 |
| - unsafe fn initializer(&self) -> Initializer { |
308 |
| - self.inner.initializer() |
309 |
| - } |
310 |
| - |
311 | 325 | // The inner reader might have an optimized `read_to_end`. Drain our buffer and then
|
312 | 326 | // delegate to the inner implementation.
|
313 | 327 | fn read_to_end(&mut self, buf: &mut Vec<u8>) -> io::Result<usize> {
|
314 | 328 | let nread = self.cap - self.pos;
|
315 |
| - buf.extend_from_slice(&self.buf[self.pos..self.cap]); |
| 329 | + buf.extend_from_slice(&self.buffer()); |
316 | 330 | self.discard_buffer();
|
317 | 331 | Ok(nread + self.inner.read_to_end(buf)?)
|
318 | 332 | }
|
@@ -363,10 +377,23 @@ impl<R: Read> BufRead for BufReader<R> {
|
363 | 377 | // to tell the compiler that the pos..cap slice is always valid.
|
364 | 378 | if self.pos >= self.cap {
|
365 | 379 | debug_assert!(self.pos == self.cap);
|
366 |
| - self.cap = self.inner.read(&mut self.buf)?; |
| 380 | + |
| 381 | + let mut readbuf = ReadBuf::uninit(&mut self.buf); |
| 382 | + |
| 383 | + // SAFETY: `self.init` is either 0 or set to `readbuf.initialized_len()` |
| 384 | + // from the last time this function was called |
| 385 | + unsafe { |
| 386 | + readbuf.assume_init(self.init); |
| 387 | + } |
| 388 | + |
| 389 | + self.inner.read_buf(&mut readbuf)?; |
| 390 | + |
| 391 | + self.cap = readbuf.filled_len(); |
| 392 | + self.init = readbuf.initialized_len(); |
| 393 | + |
367 | 394 | self.pos = 0;
|
368 | 395 | }
|
369 |
| - Ok(&self.buf[self.pos..self.cap]) |
| 396 | + Ok(self.buffer()) |
370 | 397 | }
|
371 | 398 |
|
372 | 399 | fn consume(&mut self, amt: usize) {
|
|
0 commit comments