@@ -19,6 +19,7 @@ pub(crate) use item::FnParseMode;
19
19
pub use pat:: { CommaRecoveryMode , RecoverColon , RecoverComma } ;
20
20
pub use path:: PathStyle ;
21
21
22
+ use core:: fmt;
22
23
use rustc_ast:: ptr:: P ;
23
24
use rustc_ast:: token:: { self , Delimiter , Token , TokenKind } ;
24
25
use rustc_ast:: tokenstream:: { AttributesData , DelimSpacing , DelimSpan , Spacing } ;
@@ -60,7 +61,7 @@ mod mut_visit {
60
61
}
61
62
62
63
bitflags:: bitflags! {
63
- #[ derive( Clone , Copy ) ]
64
+ #[ derive( Clone , Copy , Debug ) ]
64
65
struct Restrictions : u8 {
65
66
const STMT_EXPR = 1 << 0 ;
66
67
const NO_STRUCT_LITERAL = 1 << 1 ;
@@ -86,7 +87,7 @@ enum BlockMode {
86
87
87
88
/// Whether or not we should force collection of tokens for an AST node,
88
89
/// regardless of whether or not it has attributes
89
- #[ derive( Clone , Copy , PartialEq ) ]
90
+ #[ derive( Clone , Copy , Debug , PartialEq ) ]
90
91
pub enum ForceCollect {
91
92
Yes ,
92
93
No ,
@@ -134,7 +135,7 @@ macro_rules! maybe_recover_from_interpolated_ty_qpath {
134
135
} ;
135
136
}
136
137
137
- #[ derive( Clone , Copy ) ]
138
+ #[ derive( Clone , Copy , Debug ) ]
138
139
pub enum Recovery {
139
140
Allowed ,
140
141
Forbidden ,
@@ -184,7 +185,7 @@ pub struct Parser<'a> {
184
185
capture_state : CaptureState ,
185
186
/// This allows us to recover when the user forget to add braces around
186
187
/// multiple statements in the closure body.
187
- pub current_closure : Option < ClosureSpans > ,
188
+ current_closure : Option < ClosureSpans > ,
188
189
/// Whether the parser is allowed to do recovery.
189
190
/// This is disabled when parsing macro arguments, see #103534
190
191
pub recovery : Recovery ,
@@ -196,7 +197,7 @@ pub struct Parser<'a> {
196
197
rustc_data_structures:: static_assert_size!( Parser <' _>, 264 ) ;
197
198
198
199
/// Stores span information about a closure.
199
- #[ derive( Clone ) ]
200
+ #[ derive( Clone , Debug ) ]
200
201
pub struct ClosureSpans {
201
202
pub whole_closure : Span ,
202
203
pub closing_pipe : Span ,
@@ -225,15 +226,15 @@ pub type ReplaceRange = (Range<u32>, Vec<(FlatToken, Spacing)>);
225
226
/// Controls how we capture tokens. Capturing can be expensive,
226
227
/// so we try to avoid performing capturing in cases where
227
228
/// we will never need an `AttrTokenStream`.
228
- #[ derive( Copy , Clone ) ]
229
+ #[ derive( Copy , Clone , Debug ) ]
229
230
pub enum Capturing {
230
231
/// We aren't performing any capturing - this is the default mode.
231
232
No ,
232
233
/// We are capturing tokens
233
234
Yes ,
234
235
}
235
236
236
- #[ derive( Clone ) ]
237
+ #[ derive( Clone , Debug ) ]
237
238
struct CaptureState {
238
239
capturing : Capturing ,
239
240
replace_ranges : Vec < ReplaceRange > ,
@@ -244,7 +245,7 @@ struct CaptureState {
244
245
/// we (a) lex tokens into a nice tree structure (`TokenStream`), and then (b)
245
246
/// use this type to emit them as a linear sequence. But a linear sequence is
246
247
/// what the parser expects, for the most part.
247
- #[ derive( Clone ) ]
248
+ #[ derive( Clone , Debug ) ]
248
249
struct TokenCursor {
249
250
// Cursor for the current (innermost) token stream. The delimiters for this
250
251
// token stream are found in `self.stack.last()`; when that is `None` then
@@ -349,6 +350,7 @@ enum TokenExpectType {
349
350
}
350
351
351
352
/// A sequence separator.
353
+ #[ derive( Debug ) ]
352
354
struct SeqSep {
353
355
/// The separator token.
354
356
sep : Option < TokenKind > ,
@@ -366,6 +368,7 @@ impl SeqSep {
366
368
}
367
369
}
368
370
371
+ #[ derive( Debug ) ]
369
372
pub enum FollowedByType {
370
373
Yes ,
371
374
No ,
@@ -390,7 +393,7 @@ pub enum Trailing {
390
393
Yes ,
391
394
}
392
395
393
- #[ derive( Clone , Copy , PartialEq , Eq ) ]
396
+ #[ derive( Clone , Copy , Debug , PartialEq , Eq ) ]
394
397
pub enum TokenDescription {
395
398
ReservedIdentifier ,
396
399
Keyword ,
@@ -1548,6 +1551,47 @@ impl<'a> Parser<'a> {
1548
1551
} )
1549
1552
}
1550
1553
1554
+ // debug view of the parser's token stream, up to `{lookahead}` tokens
1555
+ pub fn debug_lookahead ( & self , lookahead : usize ) -> impl fmt:: Debug + ' _ {
1556
+ struct DebugParser < ' dbg > {
1557
+ parser : & ' dbg Parser < ' dbg > ,
1558
+ lookahead : usize ,
1559
+ }
1560
+
1561
+ impl fmt:: Debug for DebugParser < ' _ > {
1562
+ fn fmt ( & self , f : & mut fmt:: Formatter < ' _ > ) -> fmt:: Result {
1563
+ let Self { parser, lookahead } = self ;
1564
+ let mut dbg_fmt = f. debug_struct ( "Parser" ) ; // or at least, one view of
1565
+
1566
+ // we don't need N spans, but we want at least one, so print all of prev_token
1567
+ dbg_fmt. field ( "prev_token" , & parser. prev_token ) ;
1568
+ // make it easier to peek farther ahead by taking TokenKinds only until EOF
1569
+ let tokens = ( 0 ..* lookahead)
1570
+ . map ( |i| parser. look_ahead ( i, |tok| tok. kind . clone ( ) ) )
1571
+ . scan ( parser. prev_token == TokenKind :: Eof , |eof, tok| {
1572
+ let current = eof. then_some ( tok. clone ( ) ) ; // include a trailing EOF token
1573
+ * eof |= & tok == & TokenKind :: Eof ;
1574
+ current
1575
+ } ) ;
1576
+ dbg_fmt. field_with ( "tokens" , |field| field. debug_list ( ) . entries ( tokens) . finish ( ) ) ;
1577
+ dbg_fmt. field ( "approx_token_stream_pos" , & parser. num_bump_calls ) ;
1578
+
1579
+ // some fields are interesting for certain values, as they relate to macro parsing
1580
+ if let Some ( subparser) = parser. subparser_name {
1581
+ dbg_fmt. field ( "subparser_name" , & subparser) ;
1582
+ }
1583
+ if let Recovery :: Forbidden = parser. recovery {
1584
+ dbg_fmt. field ( "recovery" , & parser. recovery ) ;
1585
+ }
1586
+
1587
+ // imply there's "more to know" than this view
1588
+ dbg_fmt. finish_non_exhaustive ( )
1589
+ }
1590
+ }
1591
+
1592
+ DebugParser { parser : self , lookahead }
1593
+ }
1594
+
1551
1595
pub fn clear_expected_tokens ( & mut self ) {
1552
1596
self . expected_tokens . clear ( ) ;
1553
1597
}
0 commit comments