Skip to content

Commit

Permalink
Try to make uses of Span more expressive
Browse files Browse the repository at this point in the history
  • Loading branch information
VonTum committed Jan 31, 2024
1 parent 81269a9 commit 03c459b
Show file tree
Hide file tree
Showing 6 changed files with 114 additions and 91 deletions.
45 changes: 33 additions & 12 deletions src/ast.rs
Original file line number Diff line number Diff line change
@@ -1,26 +1,53 @@


use crate::{tokenizer::{TokenTypeIdx, get_token_type_name}, linker::FileUUID, flattening::FlattenedModule, arena_alloc::{UUIDMarker, UUID, FlatAlloc}, instantiation::InstantiationList, value::Value, errors::ErrorCollector};
use crate::{tokenizer::{get_token_type_name, TokenTypeIdx, TokenizeResult}, linker::FileUUID, flattening::FlattenedModule, arena_alloc::{UUIDMarker, UUID, FlatAlloc}, instantiation::InstantiationList, value::Value, errors::ErrorCollector};
use core::ops::Range;
use std::{fmt::Display, iter::zip};

// Token span. Indices are INCLUSIVE
#[derive(Clone,Copy,Debug,PartialEq,Eq)]
pub struct Span(pub usize, pub usize);


#[derive(Debug,Clone,Copy,PartialEq,Eq,Hash)]
pub struct DeclIDMarker;
impl UUIDMarker for DeclIDMarker {const DISPLAY_NAME : &'static str = "decl_";}
pub type DeclID = UUID<DeclIDMarker>;


// Token span. Indices are INCLUSIVE
#[derive(Clone,Copy,Debug,PartialEq,Eq)]
pub struct Span(pub usize, pub usize);

impl Span {
pub fn to_range<T : Clone>(&self, tokens : &[Range<T>]) -> Range<T> {
let min = tokens[self.0].start.clone();
let max = tokens[self.1].end.clone();
min..max
}
pub fn new_overarching(left : Span, right : Span) -> Span {
assert!(left.0 <= right.0);
assert!(left.1 <= right.1);
Span(left.0, right.1)
}
pub fn new_single_token(tok_idx : usize) -> Span {
Span(tok_idx, tok_idx)
}
pub fn new_extend_to_include_token(left : Span, tok_idx : usize) -> Span {
Span::new_overarching(left, Span::new_single_token(tok_idx))
}
pub fn dont_include_last_token(self) -> Span {
self
}
pub fn only_last_token(self) -> Span {
Span(self.1, self.1)
}
pub fn new_extend_before(tok_idx : usize, right : Span) -> Span {
Span::new_overarching(Span::new_single_token(tok_idx), right)
}
pub fn new_across_tokens(start_tok : usize, end_tok : usize) -> Span {
assert!(start_tok <= end_tok);
Span(start_tok, end_tok)
}
pub fn whole_file_span(tokens : &TokenizeResult) -> Span {
Span(0, tokens.token_types.len())
}
#[track_caller]
pub fn assert_is_single_token(&self) -> usize {
assert!(self.1 == self.0, "Span is not singleton! {}..{}", self.0, self.1);
Expand All @@ -47,12 +74,6 @@ pub enum IdentifierType {
Generative
}

impl From<usize> for Span {
fn from(v : usize) -> Span {
Span(v, v)
}
}

#[derive(Debug, Clone, Copy)]
pub enum LocalOrGlobal {
Local(DeclID),
Expand Down Expand Up @@ -101,7 +122,7 @@ pub enum Expression {

impl Expression {
pub fn new_binop(left : SpanExpression, op : Operator, op_pos : usize/*Operator token */, right : SpanExpression) -> SpanExpression {
let span = Span(left.1.0, right.1.1);
let span = Span::new_overarching(left.1, right.1);
(Expression::BinOp(Box::new((left, op, op_pos, right))), span)
}
}
Expand Down
20 changes: 10 additions & 10 deletions src/flattening.rs
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ impl Instantiation {
}
}

pub fn get_location_if_in_this_module(&self) -> Option<Span> {
pub fn get_location_of_module_part(&self) -> Option<Span> {
match self {
Instantiation::SubModule(sm) => sm.is_declared_in_this_module.then_some(sm.typ_span),
Instantiation::Declaration(decl) => decl.is_declared_in_this_module.then_some(decl.typ_span),
Expand Down Expand Up @@ -274,7 +274,7 @@ impl<'inst, 'l, 'm> FlatteningContext<'inst, 'l, 'm> {
}))
}
// Returns the module, full interface, and the output range for the function call syntax
fn desugar_func_call(&mut self, func_and_args : &[SpanExpression], closing_bracket_pos : usize) -> Option<(&Module, InterfacePorts<FlatID>)> {
fn desugar_func_call(&mut self, func_and_args : &[SpanExpression], func_call_span : Span) -> Option<(&Module, InterfacePorts<FlatID>)> {
let (name_expr, name_expr_span) = &func_and_args[0]; // Function name is always there
let func_instantiation_id = match name_expr {
Expression::Named(LocalOrGlobal::Local(l)) => {
Expand Down Expand Up @@ -305,13 +305,13 @@ impl<'inst, 'l, 'm> FlatteningContext<'inst, 'l, 'm> {
let module_info = vec![error_info(md.link_info.span, md.link_info.file, "Interface defined here")];
if arg_count > expected_arg_count {
// Too many args, complain about excess args at the end
let excess_args_span = Span(args[expected_arg_count].1.0, closing_bracket_pos - 1);
let excess_args_span = Span::new_overarching(args[expected_arg_count].1, func_call_span).dont_include_last_token();
self.errors.error_with_info(excess_args_span, format!("Excess argument. Function takes {expected_arg_count} args, but {arg_count} were passed."), module_info);
// Shorten args to still get proper type checking for smaller arg array
args = &args[..expected_arg_count];
} else {
// Too few args, mention missing argument names
self.errors.error_with_info(Span::from(closing_bracket_pos), format!("Too few arguments. Function takes {expected_arg_count} args, but {arg_count} were passed."), module_info);
self.errors.error_with_info(func_call_span.only_last_token(), format!("Too few arguments. Function takes {expected_arg_count} args, but {arg_count} were passed."), module_info);
}
}

Expand Down Expand Up @@ -357,7 +357,7 @@ impl<'inst, 'l, 'm> FlatteningContext<'inst, 'l, 'm> {
WireSource::ArrayAccess{arr, arr_idx}
}
Expression::FuncCall(func_and_args) => {
if let Some((md, interface_wires)) = self.desugar_func_call(func_and_args, expr_span.1) {
if let Some((md, interface_wires)) = self.desugar_func_call(func_and_args, *expr_span) {
let output_range = interface_wires.func_call_syntax_outputs();

if output_range.len() != 1 {
Expand Down Expand Up @@ -425,7 +425,7 @@ impl<'inst, 'l, 'm> FlatteningContext<'inst, 'l, 'm> {
let _wire_id = self.flatten_declaration::<true>(*decl_id, false);
}
Statement::Assign{to, expr : (Expression::FuncCall(func_and_args), func_span), eq_sign_position} => {
let Some((md, interface)) = self.desugar_func_call(&func_and_args, func_span.1) else {continue};
let Some((md, interface)) = self.desugar_func_call(&func_and_args, *func_span) else {continue};
let output_range = interface.func_call_syntax_outputs();
let outputs = &interface.ports[output_range];

Expand All @@ -435,10 +435,10 @@ impl<'inst, 'l, 'm> FlatteningContext<'inst, 'l, 'm> {
if num_targets != num_func_outputs {
let info = vec![error_info(md.link_info.span, md.link_info.file, "Module Defined here")];
if num_targets > num_func_outputs {
let excess_results_span = Span(to[num_func_outputs].expr.1.0, to.last().unwrap().expr.1.1);
let excess_results_span = Span::new_overarching(to[num_func_outputs].expr.1, to.last().unwrap().expr.1);
self.errors.error_with_info(excess_results_span, format!("Excess output targets. Function returns {num_func_outputs} results, but {num_targets} targets were given."), info);
} else {
let too_few_targets_pos = if let Some(eq) = eq_sign_position {Span::from(*eq)} else {func_name_span};
let too_few_targets_pos = if let Some(eq) = eq_sign_position {Span::new_single_token(*eq)} else {func_name_span};
self.errors.error_with_info(too_few_targets_pos, format!("Too few output targets. Function returns {num_func_outputs} results, but {num_targets} targets were given."), info);
}
}
Expand Down Expand Up @@ -510,7 +510,7 @@ impl<'inst, 'l, 'm> FlatteningContext<'inst, 'l, 'm> {
==== Typechecking ====
*/
fn make_declared_here(&self, decl : &Declaration) -> ErrorInfo {
error_info(Span(decl.typ_span.0, decl.name_token), self.errors.file, "Declared here")
error_info(Span::new_extend_to_include_token(decl.typ_span, decl.name_token), self.errors.file, "Declared here")
}

fn typecheck_wire_is_of_type(&self, wire : &WireInstance, expected : &Type, context : &str) {
Expand Down Expand Up @@ -780,7 +780,7 @@ impl<'inst, 'l, 'm> FlatteningContext<'inst, 'l, 'm> {
if !is_instance_used_map[id] {
if let Instantiation::Declaration(decl) = inst {
if decl.is_declared_in_this_module {
self.errors.warn_basic(Span::from(decl.name_token), "Unused Variable: This variable does not affect the output ports of this module");
self.errors.warn_basic(Span::new_single_token(decl.name_token), "Unused Variable: This variable does not affect the output ports of this module");
}
}
}
Expand Down
27 changes: 15 additions & 12 deletions src/instantiation/latency_algorithm.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
use std::iter::zip;


#[derive(Debug)]
pub enum LatencyCountingError {
PositiveNetLatencyCycle{nodes_involved : Vec<usize>},
Expand Down Expand Up @@ -65,17 +64,20 @@ fn count_latency(part_of_path : &mut [bool], absolute_latency : &mut [i64], fano
}

fn solve_latencies(fanins : &[Vec<FanInOut>], fanouts : &[Vec<FanInOut>], inputs : &[usize], outputs : &[usize]) -> Result<Vec<i64>, LatencyCountingError> {
let mut part_of_path : Vec<bool> = fanouts.iter().map(|_| false).collect();
assert!(fanins.len() == fanouts.len());

let mut part_of_path : Vec<bool> = vec![false; fanouts.len()];

// Forwards are all performed in the same block. This block is then also used as the output latencies
let mut absolute_latencies_forward : Vec<i64> = fanouts.iter().map(|_| i64::MIN).collect();
let mut absolute_latencies_forward : Vec<i64> = vec![i64::MIN; fanouts.len()];
let mut absolute_latencies_backward_combined : Vec<i64> = vec![i64::MAX; fanouts.len()];

// To find input latencies based on output latencies, we use a separate block to go backwards.
// These are done one at a time, such that we can find conflicting latencies.
let mut absolute_latencies_backward_temporary : Vec<i64> = fanouts.iter().map(|_| i64::MIN).collect();
let mut absolute_latencies_backward_temporary : Vec<i64> = vec![i64::MIN; fanouts.len()];

let mut output_was_covered : Vec<bool> = outputs.iter().map(|_id| false).collect();
let mut input_node_assignments : Vec<i64> = inputs.iter().map(|_id| i64::MIN).collect();
let mut output_was_covered : Vec<bool> = vec![false; outputs.len()];
let mut input_node_assignments : Vec<i64> = vec![i64::MIN; inputs.len()];

input_node_assignments[0] = 0; // Provide a seed to start the algorithm

Expand Down Expand Up @@ -126,6 +128,13 @@ fn solve_latencies(fanins : &[Vec<FanInOut>], fanouts : &[Vec<FanInOut>], inputs
}
}
}

// Add backwards latencies to combined list
for (from, to) in zip(absolute_latencies_backward_temporary.iter(), absolute_latencies_backward_combined.iter_mut()) {
if *from != i64::MIN && -*from < *to {
*to = -*from;
}
}
}
}
}
Expand Down Expand Up @@ -223,8 +232,6 @@ mod tests {
let should_be_err = solve_latencies_infer_ports(&graph);

assert!(matches!(should_be_err, Err(LatencyCountingError::ConflictingPortLatency{bad_ports:_})))

//assert!(latencies_equal(&found_latencies, &correct_latencies));
}

#[test]
Expand All @@ -242,8 +249,6 @@ mod tests {
let should_be_err = solve_latencies_infer_ports(&graph);

assert!(matches!(should_be_err, Err(LatencyCountingError::DisjointNodes{nodes_not_reached: _})))

//assert!(latencies_equal(&found_latencies, &correct_latencies));
}

#[test]
Expand All @@ -259,8 +264,6 @@ mod tests {
let should_be_err = solve_latencies_infer_ports(&graph);

assert!(matches!(should_be_err, Err(LatencyCountingError::PositiveNetLatencyCycle{nodes_involved: _})))

//assert!(latencies_equal(&found_latencies, &correct_latencies));
}
}

2 changes: 1 addition & 1 deletion src/instantiation/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ impl<'fl, 'l> InstantiationContext<'fl, 'l> {
if start_val > end_val {
let start_flat = &self.flattened.instantiations[stm.start].extract_wire();
let end_flat = &self.flattened.instantiations[stm.end].extract_wire();
self.errors.error_basic(Span(start_flat.span.0, end_flat.span.1), format!("for loop range end is before begin: {start_val}:{end_val}"));
self.errors.error_basic(Span::new_overarching(start_flat.span, end_flat.span), format!("for loop range end is before begin: {start_val}:{end_val}"));
return None;
}

Expand Down
Loading

0 comments on commit 03c459b

Please sign in to comment.