Skip to content

Commit

Permalink
Fix pagination code
Browse files Browse the repository at this point in the history
  • Loading branch information
fschutt committed Feb 22, 2025
1 parent ff3b115 commit bc8f458
Showing 1 changed file with 91 additions and 107 deletions.
198 changes: 91 additions & 107 deletions azul-core/src/pagination.rs
Original file line number Diff line number Diff line change
@@ -1,163 +1,147 @@
use crate::id_tree::{NodeHierarchy, NodeDataContainer, NodeId};
use crate::dom::NodeData;
use crate::dom::{Node, NodeData};
use crate::styled_dom::NodeHierarchyItem;
use crate::ui_solver::PositionedRectangle;
use alloc::collections::BTreeMap;

/// The per-page output: a partial `NodeHierarchy` plus the
/// subset of `PositionedRectangle`s that appear on that page.
/// Both arrays map 1:1 by index.
pub struct PaginatedPage {
pub node_hierarchy: NodeHierarchy,
pub page_rects: NodeDataContainer<PositionedRectangle>,
/// Maps from "original node id" -> "this page's node id" (or None if not present)
/// A newly-built array of NodeHierarchyItem, one entry per retained node,
/// in the same relative order as `page_rects`.
pub hierarchy: NodeDataContainer<NodeHierarchyItem>,

/// The bounding boxes, parallel to `page_node_hierarchy`.
pub rects: NodeDataContainer<PositionedRectangle>,

/// Maps "original NodeId" -> "this page's NodeId" so you can look up
/// which nodes ended up in this page.
pub old_to_new_id_map: BTreeMap<NodeId, NodeId>,
}

/// Break a single large LayoutResult into multiple "pages" by y-coordinate.
pub fn paginate_layout_result(
node_hierarchy: &NodeHierarchy,
node_data: &NodeDataContainer<NodeData>,
rects: &NodeDataContainer<PositionedRectangle>,
pub fn paginate_layout_result<'a>(
node_hierarchy: &crate::id_tree::NodeDataContainerRef<'a, NodeHierarchyItem>,
rects: &crate::id_tree::NodeDataContainerRef<'a, PositionedRectangle>,
page_height: f32,
) -> Vec<PaginatedPage>
{
let mut pages = Vec::new();

// 1) Find total height of the entire layout from the root bounding box
// For a multi-child root, pick the max bounding box, or track from the actual "root" node.
// Example: The root is NodeId(0):
let total_height = rects.internal[0].size.height;

// compute how many pages we need
let num_pages =
(total_height / page_height).ceil() as usize;
// 1) compute total height from the root bounding box (NodeId(0)).
// If your actual root node is different, adjust accordingly.
// This is a minimal example.
let total_height = rects[NodeId::ZERO].size.height;
let num_pages = (total_height / page_height).ceil() as usize;

// We'll BFS from the root for each page, building a partial hierarchy
// This is a naive approach that visits the entire tree once per page.
// If performance is an issue, you can do a single pass to partition everything.
// We'll do a BFS for each page. You can optimize to do it once, but for clarity we do repeated BFS.
for page_idx in 0..num_pages {
let page_start = page_idx as f32 * page_height;
let page_end = page_start + page_height;

for page_index in 0..num_pages {
let page_start_y = page_index as f32 * page_height;
let page_end_y = page_start_y + page_height;
// We'll build arrays for the partial result:
let mut page_node_hierarchy = Vec::<NodeHierarchyItem>::new();
let mut page_rects_array = Vec::<PositionedRectangle>::new();

// We'll keep new arrays for the partial NodeHierarchy
let mut new_nodes = Vec::<crate::id_tree::Node>::new();
let mut new_rects = Vec::<PositionedRectangle>::new();

// We also need a map from old NodeId -> new NodeId
// Map from "old NodeId" to "new NodeId" in these arrays
let mut old_to_new_id_map = BTreeMap::<NodeId, NodeId>::new();

// BFS queue
let mut queue = vec![NodeId::new(0)];
let mut queue = Vec::new();
queue.push(NodeId::ZERO);

while let Some(cur_id) = queue.pop() {
let r = &rects.internal[cur_id.index()];
let node_top = r.position.get_static_offset().y;
let r = &rects[cur_id];
let node_top = r.position.get_static_offset().y;
let node_bottom = node_top + r.size.height;

// If the node is entirely outside this page's y-range, skip
if node_bottom < page_start_y || node_top > page_end_y {
// If the node is completely above or below, skip it
if node_bottom < page_start || node_top > page_end {
continue;
}

// Otherwise, we want to keep it. Create a new Node entry, plus a new rect entry
// We have to replicate the parent's / siblings indices, but in new indices.

// If we have NOT yet assigned a new ID, we create one
// This node belongs in this page. If we haven't already assigned
// a "new" ID for it, do so now
let new_id = match old_to_new_id_map.get(&cur_id) {
Some(nid) => *nid,
Some(&already_there) => already_there,
None => {
let new_idx = new_nodes.len();
// Insert a placeholder Node
new_nodes.push(crate::id_tree::ROOT_NODE);
new_rects.push(PositionedRectangle::default());
let new_idx = page_node_hierarchy.len();

// push placeholders
page_node_hierarchy.push(NodeHierarchyItem {
parent: 0,
previous_sibling: 0,
next_sibling: 0,
last_child: 0,
});
page_rects_array.push(PositionedRectangle::default());

let new_id = NodeId::new(new_idx);
old_to_new_id_map.insert(cur_id, new_id);
new_id
}
};

// Fill out new_node & new_rect
// copy the old Node
let old_node = node_hierarchy.internal[cur_id.index()];
// We'll fix up the parent / sibling pointers AFTER BFS
// so for now store them in a temporary structure
new_nodes[new_id.index()] = crate::id_tree::Node {
parent: None,
previous_sibling: None,
next_sibling: None,
last_child: None,
};

// Copy the old bounding box, optionally rebase "top" so that it starts at 0
// fill out the partial node data
// for BFS, we won't fix the parent/sibling pointers yet, that happens later
// we do want to store the bounding box though
let mut new_rect = r.clone();
// Example: rebase so that page Y=0 is oldY=page_start_y
let offset_amount = page_start_y;
new_rect.position
.translate_vertical(-offset_amount);

new_rects[new_id.index()] = new_rect;
// Optionally rebase the `y` so that each page starts at zero:
let offset_amount = page_start;
new_rect.position.translate_vertical(-offset_amount);

// BFS into the children: we only push them if they're not fully outside
// We do not decide whether to skip them *yet*, we do that once we pop them from the queue
if let Some(first_child) = old_node.get_first_child(cur_id) {
// push all siblings
page_rects_array[new_id.index()] = new_rect;

// BFS push children
let old_node = &node_hierarchy[cur_id];
if let Some(first_child) = old_node.first_child_id(cur_id) {
// traverse siblings
let mut c = first_child;
while let Some(n) = Some(c) {
queue.push(n);
let c_node = node_hierarchy.internal[c.index()];
if let Some(ns) = c_node.next_sibling {
c = ns;
loop {
queue.push(c);
let sibling_node = &node_hierarchy[c];
if let Some(next_sib) = sibling_node.next_sibling_id() {
c = next_sib;
} else {
break;
}
}
}
}
} // while BFS

// 2) Now fix up the parent / sibling pointers in new_nodes
// We only keep them if the parent's old ID is in old_to_new_id_map
for (old_id, new_id) in &old_to_new_id_map {
let old_node = node_hierarchy.internal[old_id.index()];

let old_parent = old_node.parent;
let old_prev = old_node.previous_sibling;
let old_next = old_node.next_sibling;
let old_last_child = old_node.last_child;

let new_parent = old_parent
.and_then(|pid| old_to_new_id_map.get(&pid))
.copied();
let new_prev = old_prev
.and_then(|pid| old_to_new_id_map.get(&pid))
.copied();
let new_next = old_next
.and_then(|pid| old_to_new_id_map.get(&pid))
.copied();
let new_last_child = old_last_child
.and_then(|pid| old_to_new_id_map.get(&pid))
.copied();

new_nodes[new_id.index()].parent = new_parent;
new_nodes[new_id.index()].previous_sibling = new_prev;
new_nodes[new_id.index()].next_sibling = new_next;
new_nodes[new_id.index()].last_child = new_last_child;
}
// 2) fix up parent/sibling pointers in `page_node_hierarchy`
for (old_id, &new_id) in &old_to_new_id_map {
let old_item = &node_hierarchy[*old_id];

let parent = old_item.parent_id();
let new_parent = parent.and_then(|pid| old_to_new_id_map.get(&pid)).copied();

let prev = old_item.previous_sibling_id();
let new_prev = prev.and_then(|pid| old_to_new_id_map.get(&pid)).copied();

// Create final NodeHierarchy + Container
let partial_hierarchy = NodeHierarchy {
internal: new_nodes,
};
let partial_rects = NodeDataContainer {
internal: new_rects,
};
let next = old_item.next_sibling_id();
let new_next = next.and_then(|pid| old_to_new_id_map.get(&pid)).copied();

let last_child = old_item.last_child_id();
let new_last_child = last_child
.and_then(|pid| old_to_new_id_map.get(&pid)).copied();

page_node_hierarchy[new_id.index()] = NodeHierarchyItem {
parent: new_parent.map(|nid| nid.index()+1).unwrap_or(0),
previous_sibling: new_prev.map(|nid| nid.index()+1).unwrap_or(0),
next_sibling: new_next.map(|nid| nid.index()+1).unwrap_or(0),
last_child: new_last_child.map(|nid| nid.index()+1).unwrap_or(0),
};
}

pages.push(PaginatedPage {
node_hierarchy: partial_hierarchy,
page_rects: partial_rects,
hierarchy: NodeDataContainer::new(page_node_hierarchy),
rects: NodeDataContainer::new(page_rects_array),
old_to_new_id_map,
});
}
} // for each page

pages
}

0 comments on commit bc8f458

Please sign in to comment.