diff --git a/graph/src/env/store.rs b/graph/src/env/store.rs index 3ecf92e0388..8197d07b6bc 100644 --- a/graph/src/env/store.rs +++ b/graph/src/env/store.rs @@ -129,14 +129,6 @@ pub struct EnvVarsStore { pub use_brin_for_all_query_types: bool, /// Temporary env var to disable certain lookups in the chain store pub disable_block_cache_for_lookup: bool, - /// Temporary env var to fall back to the old broken way of determining - /// the time of the last rollup from the POI table instead of the new - /// way that fixes - /// https://github.com/graphprotocol/graph-node/issues/5530 Remove this - /// and all code that is dead as a consequence once this has been vetted - /// sufficiently, probably after 2024-12-01 - /// Defaults to `false`, i.e. using the new fixed behavior - pub last_rollup_from_poi: bool, /// Safety switch to increase the number of columns used when /// calculating the chunk size in `InsertQuery::chunk_size`. This can be /// used to work around Postgres errors complaining 'number of @@ -197,7 +189,6 @@ impl TryFrom for EnvVarsStore { create_gin_indexes: x.create_gin_indexes, use_brin_for_all_query_types: x.use_brin_for_all_query_types, disable_block_cache_for_lookup: x.disable_block_cache_for_lookup, - last_rollup_from_poi: x.last_rollup_from_poi, insert_extra_cols: x.insert_extra_cols, fdw_fetch_size: x.fdw_fetch_size, }; @@ -276,8 +267,6 @@ pub struct InnerStore { use_brin_for_all_query_types: bool, #[envconfig(from = "GRAPH_STORE_DISABLE_BLOCK_CACHE_FOR_LOOKUP", default = "false")] disable_block_cache_for_lookup: bool, - #[envconfig(from = "GRAPH_STORE_LAST_ROLLUP_FROM_POI", default = "false")] - last_rollup_from_poi: bool, #[envconfig(from = "GRAPH_STORE_INSERT_EXTRA_COLS", default = "0")] insert_extra_cols: usize, #[envconfig(from = "GRAPH_STORE_FDW_FETCH_SIZE", default = "1000")] diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index e07b4659436..92de85f316e 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -904,20 +904,12 @@ impl DeploymentStore { .await } - pub(crate) fn block_time( - &self, - site: Arc, - block: BlockNumber, - ) -> Result, StoreError> { + pub(crate) fn block_time(&self, site: Arc) -> Result, StoreError> { let store = self.cheap_clone(); let mut conn = self.get_conn()?; let layout = store.layout(&mut conn, site.cheap_clone())?; - if ENV_VARS.store.last_rollup_from_poi { - layout.block_time(&mut conn, block) - } else { - layout.last_rollup(&mut conn) - } + layout.last_rollup(&mut conn) } pub(crate) async fn get_proof_of_indexing( diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index d148060efc2..fb181b7e74d 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -32,7 +32,6 @@ use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation use graph::blockchain::BlockTime; use graph::cheap_clone::CheapClone; use graph::components::store::write::{RowGroup, WriteChunk}; -use graph::components::subgraph::PoICausalityRegion; use graph::constraint_violation; use graph::data::graphql::TypeExt as _; use graph::data::query::Trace; @@ -69,7 +68,7 @@ use crate::{ }, }; use graph::components::store::{AttributeNames, DerivedEntityQuery}; -use graph::data::store::{Id, IdList, IdType, BYTES_SCALAR}; +use graph::data::store::{IdList, IdType, BYTES_SCALAR}; use graph::data::subgraph::schema::POI_TABLE; use graph::prelude::{ anyhow, info, BlockNumber, DeploymentHash, Entity, EntityOperation, Logger, @@ -1113,32 +1112,6 @@ impl Layout { Ok(Arc::new(layout)) } - pub(crate) fn block_time( - &self, - conn: &mut PgConnection, - block: BlockNumber, - ) -> Result, StoreError> { - let block_time_name = self.input_schema.poi_block_time(); - let poi_type = self.input_schema.poi_type(); - let id = Id::String(Word::from(PoICausalityRegion::from_network( - &self.site.network, - ))); - let key = poi_type.key(id); - - let block_time = self - .find(conn, &key, block)? - .and_then(|entity| { - entity.get(&block_time_name).map(|value| { - value - .as_int8() - .ok_or_else(|| constraint_violation!("block_time must have type Int8")) - }) - }) - .transpose()? - .map(|value| BlockTime::since_epoch(value, 0)); - Ok(block_time) - } - /// Find the time of the last rollup for the subgraph. We do this by /// looking for the maximum timestamp in any aggregation table and /// adding a little bit more than the corresponding interval to it. This diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 26e559bcbc9..3d85042d07c 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -95,8 +95,8 @@ impl LastRollup { let kind = match (has_aggregations, block) { (false, _) => LastRollup::NotNeeded, (true, None) => LastRollup::Unknown, - (true, Some(block)) => { - let block_time = store.block_time(site, block)?; + (true, Some(_)) => { + let block_time = store.block_time(site)?; block_time .map(|b| LastRollup::Some(b)) .unwrap_or(LastRollup::Unknown) @@ -240,9 +240,7 @@ impl SyncStore { firehose_cursor, )?; - let block_time = self - .writable - .block_time(self.site.cheap_clone(), block_ptr_to.number)?; + let block_time = self.writable.block_time(self.site.cheap_clone())?; self.last_rollup.set(block_time) }) }