This commit is contained in:
Alejandro Alonso 2026-04-07 13:07:47 +02:00
parent 84207b7b9d
commit b4ebb21ada
2 changed files with 235 additions and 45 deletions

View File

@ -373,12 +373,12 @@ pub fn get_cache_size(viewbox: Viewbox, scale: f32) -> skia::ISize {
impl RenderState {
fn schedule_base_cache_refresh_from_full_render(&mut self, src_scale_bits: u32) {
let base_bits = self.base_zoom_placeholder_scale_bits();
if src_scale_bits == base_bits {
let dst_bits = self.fast_anchor_scale_bits();
if src_scale_bits == dst_bits {
self.base_cache_refresh_pending = false;
self.base_cache_refresh_queue.clear();
self.base_cache_refresh_src_scale_bits = 0;
println!("base-cache(100%) refresh skipped (already at 100%)");
println!("base-cache(anchor) refresh skipped (already at anchor)");
return;
}
// Schedule only tiles we actually have at src scale (interest area).
@ -435,18 +435,28 @@ impl RenderState {
println!("base-cache(100%) refresh done (queue already empty)");
return;
}
let base_bits = self.base_zoom_placeholder_scale_bits();
let dst_bits = self.fast_anchor_scale_bits();
let src_bits = self.base_cache_refresh_src_scale_bits;
let dst_scale = f32::from_bits(dst_bits);
if !dst_scale.is_finite() || dst_scale <= 0.0 {
return;
}
let dst_interest_rect = tiles::get_tiles_for_viewbox_with_interest(
self.viewbox,
VIEWPORT_INTEREST_AREA_THRESHOLD,
dst_scale,
);
for _ in 0..max_tiles {
let Some(tile) = self.base_cache_refresh_queue.pop() else { break };
let Some(img) = self.surfaces.cached_tile_image(tile, src_bits) else { continue };
self.surfaces.reproject_cached_tile_into_scale(
self.surfaces.reproject_cached_tile_into_scale_limited(
&self.tile_viewbox,
&img,
tile,
src_bits,
base_bits,
dst_bits,
self.background_color,
&dst_interest_rect,
);
}
if self.base_cache_refresh_queue.is_empty() {
@ -514,16 +524,43 @@ impl RenderState {
})
}
/// Device scale when workspace zoom is 100% (`viewbox.zoom == 1`): `1.0 * dpr`.
fn base_zoom_placeholder_scale_bits(&self) -> u32 {
(1.0_f32 * self.options.dpr()).to_bits()
// Fast-zoom anchor levels (workspace zoom, without DPR).
// These are chosen to cap cross-zoom propagation work and avoid cache explosions
// when zooming very far out (e.g. < 10%).
const FAST_ZOOM_ANCHORS: [f32; 3] = [1.0, 0.25, 0.0625];
fn fast_anchor_zoom(&self, zoom: f32) -> f32 {
if zoom >= Self::FAST_ZOOM_ANCHORS[1] {
Self::FAST_ZOOM_ANCHORS[0] // 100%
} else if zoom >= Self::FAST_ZOOM_ANCHORS[2] {
Self::FAST_ZOOM_ANCHORS[1] // 25%
} else {
Self::FAST_ZOOM_ANCHORS[2] // 6.25%
}
}
/// Device scale bits (includes DPR) for the anchor cache used by fast-mode.
fn fast_anchor_scale_bits(&self) -> u32 {
(self.fast_anchor_zoom(self.viewbox.zoom()) * self.options.dpr()).to_bits()
}
fn scale_bits_for_zoom(&self, zoom: f32) -> u32 {
(zoom * self.options.dpr()).to_bits()
}
fn anchor_scale_bits_list(&self) -> [u32; 3] {
[
self.scale_bits_for_zoom(Self::FAST_ZOOM_ANCHORS[0]),
self.scale_bits_for_zoom(Self::FAST_ZOOM_ANCHORS[1]),
self.scale_bits_for_zoom(Self::FAST_ZOOM_ANCHORS[2]),
]
}
/// Scale bits used to look up tile textures in the cache.
/// In `fast_mode`, always use the 100% zoom cache; otherwise use current scale.
/// In `fast_mode`, use an anchor cache (100%/25%/6.25%); otherwise use current scale.
fn tile_texture_cache_lookup_scale_bits(&self) -> u32 {
if self.options.is_fast_mode() {
self.base_zoom_placeholder_scale_bits()
self.fast_anchor_scale_bits()
} else {
self.get_scale().to_bits()
}
@ -568,16 +605,16 @@ impl RenderState {
self.shape_last_extrect_by_scale.insert(key, new_extrect);
}
// Additionally invalidate the 100% zoom cache, but only for tiles that we already had.
// This is the fast_mode source cache; we want it refreshed after edits, without
// creating work for tiles that were never cached at 100%.
let base_bits = self.base_zoom_placeholder_scale_bits();
let base_scale = f32::from_bits(base_bits);
if base_scale.is_finite() && base_scale > 0.0 {
let new_extrect = shape.extrect(tree, base_scale);
// Additionally invalidate the current fast-mode anchor cache (100% / 25% / 6.25%),
// but only for tiles that we already had. This keeps the interaction cache correct
// without creating new tiles at the anchor level.
let anchor_bits = self.fast_anchor_scale_bits();
let anchor_scale = f32::from_bits(anchor_bits);
if anchor_scale.is_finite() && anchor_scale > 0.0 {
let new_extrect = shape.extrect(tree, anchor_scale);
let key = ShapeScaleKey {
shape_id: shape.id,
scale_bits: base_bits,
scale_bits: anchor_bits,
};
let rect = if let Some(old) = self.shape_last_extrect_by_scale.get(&key).copied() {
Self::rect_union(old, new_extrect)
@ -585,13 +622,16 @@ impl RenderState {
new_extrect
};
let tile_size = tiles::get_tile_size(base_scale);
let tile_size = tiles::get_tile_size(anchor_scale);
let TileRect(sx, sy, ex, ey) = tiles::get_tiles_for_rect(rect, tile_size);
for x in sx..=ex {
for y in sy..=ey {
let tile = tiles::Tile::from(x, y);
if self.surfaces.has_cached_tile_surface_stale_ok(tile, base_bits) {
self.surfaces.remove_cached_tile_surface(tile, base_bits);
if self
.surfaces
.has_cached_tile_surface_stale_ok(tile, anchor_bits)
{
self.surfaces.remove_cached_tile_surface(tile, anchor_bits);
}
}
}
@ -867,21 +907,30 @@ impl RenderState {
scale_bits,
);
// Bootstrap / keep 100% cache warm: whenever we finish a full-quality tile render at
// a zoom != 100%, reproject that tile into the corresponding 100% tiles.
// This lets fast_mode rely on 100% even if the file opened at 60%, etc.
// Bootstrap / keep fast-mode anchor cache warm: whenever we finish a full-quality tile
// render, reproject that tile into the currently selected anchor (100% / 25% / 6.25%),
// limited to the anchor interest rect to avoid cache explosions at extreme zoom-outs.
if !self.options.is_fast_mode() {
if let Some(img) = rendered_tile_image.as_ref() {
let base_bits = self.base_zoom_placeholder_scale_bits();
if base_bits != scale_bits {
self.surfaces.reproject_cached_tile_into_scale(
let anchor_bits = self.fast_anchor_scale_bits();
if anchor_bits != scale_bits {
let anchor_scale = f32::from_bits(anchor_bits);
if anchor_scale.is_finite() && anchor_scale > 0.0 {
let anchor_interest_rect = tiles::get_tiles_for_viewbox_with_interest(
self.viewbox,
VIEWPORT_INTEREST_AREA_THRESHOLD,
anchor_scale,
);
self.surfaces.reproject_cached_tile_into_scale_limited(
&self.tile_viewbox,
img,
tile,
scale_bits,
base_bits,
anchor_bits,
self.background_color,
&anchor_interest_rect,
);
}
}
}
}
@ -1643,6 +1692,15 @@ impl RenderState {
);
} else {
let target_world_rect = tiles::get_tile_rect(tile, current_scale);
let anchor_bits = self.fast_anchor_scale_bits();
let forced_anchor = self
.options
.is_fast_mode()
.then_some(anchor_bits)
.filter(|bits| {
self.surfaces
.world_rect_has_any_tile_at_scale_bits(target_world_rect, *bits)
});
let _ = self.surfaces.draw_tile_fallback_cross_zoom(
&self.tile_viewbox,
rect,
@ -1650,9 +1708,7 @@ impl RenderState {
target_world_rect,
current_scale,
current_scale_bits,
self.options
.is_fast_mode()
.then_some(self.base_zoom_placeholder_scale_bits()),
forced_anchor,
true,
);
}
@ -1710,6 +1766,8 @@ impl RenderState {
let scale = self.get_scale();
self.tile_viewbox.update(self.viewbox, scale);
self.surfaces
.set_protected_tile_cache_scales(self.anchor_scale_bits_list());
self.focus_mode.reset();
performance::begin_measure!("render");
@ -2814,6 +2872,13 @@ impl RenderState {
// never show empty tiles while the exact tile is being regenerated.
let tile_rect = self.get_current_tile_bounds()?;
let target_world_rect = tiles::get_tile_rect(current_tile, scale);
let anchor_bits = self.fast_anchor_scale_bits();
let forced_anchor = fast_mode
.then_some(anchor_bits)
.filter(|bits| {
self.surfaces
.world_rect_has_any_tile_at_scale_bits(target_world_rect, *bits)
});
let _blits = self.surfaces.draw_tile_fallback_cross_zoom(
&self.tile_viewbox,
tile_rect,
@ -2821,7 +2886,7 @@ impl RenderState {
target_world_rect,
scale,
lookup_bits,
fast_mode.then_some(self.base_zoom_placeholder_scale_bits()),
forced_anchor,
true,
);
performance::begin_measure!("render_shape_tree::uncached");

View File

@ -4,7 +4,7 @@ use crate::shapes::Shape;
use skia_safe::{self as skia, IRect, Paint, RRect, Rect};
use super::{gpu_state::GpuState, tiles::Tile, tiles::TileViewbox, tiles::TILE_SIZE};
use super::{gpu_state::GpuState, tiles::Tile, tiles::TileRect, tiles::TileViewbox, tiles::TILE_SIZE};
use base64::{engine::general_purpose, Engine as _};
use std::collections::{HashMap, HashSet};
@ -701,6 +701,90 @@ impl Surfaces {
}
}
/// Like `reproject_cached_tile_into_scale` but only writes destination tiles that are within
/// `dst_tile_limit` (typically the destination interest rect for the current viewport).
pub fn reproject_cached_tile_into_scale_limited(
&mut self,
tile_viewbox: &TileViewbox,
src_image: &skia::Image,
src_tile: Tile,
src_scale_bits: u32,
dst_scale_bits: u32,
background: skia::Color,
dst_tile_limit: &TileRect,
) {
let src_scale = f32::from_bits(src_scale_bits);
let dst_scale = f32::from_bits(dst_scale_bits);
if !src_scale.is_finite()
|| src_scale <= 0.0
|| !dst_scale.is_finite()
|| dst_scale <= 0.0
{
return;
}
let src_world_rect = super::tiles::get_tile_rect(src_tile, src_scale);
let dst_tile_size_world = super::tiles::get_tile_size(dst_scale);
let super::tiles::TileRect(sx, sy, ex, ey) =
super::tiles::get_tiles_for_rect(src_world_rect, dst_tile_size_world);
for x in sx..=ex {
for y in sy..=ey {
let dst_tile = Tile::from(x, y);
if !dst_tile_limit.contains(&dst_tile) {
continue;
}
let dst_world_rect = super::tiles::get_tile_rect(dst_tile, dst_scale);
let Some(overlap_world) =
Self::rect_intersection(src_world_rect, dst_world_rect)
else {
continue;
};
let src_px_l = (overlap_world.left() - src_world_rect.left()) * src_scale;
let src_px_t = (overlap_world.top() - src_world_rect.top()) * src_scale;
let src_px_r = (overlap_world.right() - src_world_rect.left()) * src_scale;
let src_px_b = (overlap_world.bottom() - src_world_rect.top()) * src_scale;
let src_rect = Rect::from_ltrb(src_px_l, src_px_t, src_px_r, src_px_b);
let dst_px_l = (overlap_world.left() - dst_world_rect.left()) * dst_scale;
let dst_px_t = (overlap_world.top() - dst_world_rect.top()) * dst_scale;
let dst_px_r = (overlap_world.right() - dst_world_rect.left()) * dst_scale;
let dst_px_b = (overlap_world.bottom() - dst_world_rect.top()) * dst_scale;
let dst_rect = Rect::from_ltrb(dst_px_l, dst_px_t, dst_px_r, dst_px_b);
let mut tile_surface = match self
.current
.new_surface_with_dimensions((TILE_SIZE as i32, TILE_SIZE as i32))
{
Some(s) => s,
None => return,
};
tile_surface.canvas().clear(background);
if let Some(existing) = self.tiles.get_stale(dst_tile, dst_scale_bits) {
tile_surface.canvas().draw_image_rect(
existing,
None,
Rect::from_xywh(0.0, 0.0, TILE_SIZE, TILE_SIZE),
&skia::Paint::default(),
);
}
tile_surface.canvas().draw_image_rect(
src_image,
Some((&src_rect, skia::canvas::SrcRectConstraint::Fast)),
dst_rect,
&skia::Paint::default(),
);
let new_img = tile_surface.image_snapshot();
self.tiles.add(tile_viewbox, &dst_tile, dst_scale_bits, new_img);
}
}
}
/// Draws the current tile directly to the target and cache surfaces without
/// creating a snapshot. This avoids GPU stalls from ReadPixels but doesn't
/// populate the tile texture cache (suitable for one-shot renders like tests).
@ -875,6 +959,10 @@ impl Surfaces {
self.tiles.gc();
}
pub fn set_protected_tile_cache_scales(&mut self, scale_bits: [u32; 3]) {
self.tiles.set_protected_scales(scale_bits);
}
pub fn resize_export_surface(&mut self, scale: f32, rect: skia::Rect) {
let target_w = (scale * rect.width()).ceil() as i32;
let target_h = (scale * rect.height()).ceil() as i32;
@ -921,6 +1009,7 @@ pub struct TileTextureCache {
grid: HashMap<TileCacheKey, skia::Image>,
removed: HashSet<TileCacheKey>,
scales: HashSet<u32>,
protected_scales: HashSet<u32>,
}
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
@ -935,9 +1024,15 @@ impl TileTextureCache {
grid: HashMap::default(),
removed: HashSet::default(),
scales: HashSet::default(),
protected_scales: HashSet::default(),
}
}
pub fn set_protected_scales(&mut self, scales: impl IntoIterator<Item = u32>) {
self.protected_scales.clear();
self.protected_scales.extend(scales);
}
pub fn has(&self, tile: Tile, scale_bits: u32) -> bool {
let key = TileCacheKey { tile, scale_bits };
self.grid.contains_key(&key) && !self.removed.contains(&key)
@ -959,20 +1054,50 @@ impl TileTextureCache {
}
fn free_tiles(&mut self, tile_viewbox: &TileViewbox) {
println!("free_tiles");
let marked: Vec<_> = self
.grid
.iter_mut()
.filter_map(|(key, _)| {
// Trace cache distribution by scale when evicting tiles.
// Helpful to diagnose cache explosions at extreme zoom levels.
let mut counts_by_scale: HashMap<u32, usize> = HashMap::new();
for key in self.grid.keys() {
*counts_by_scale.entry(key.scale_bits).or_insert(0) += 1;
}
// Ensure protected scales show up in the trace even if currently empty.
for bits in self.protected_scales.iter().copied() {
counts_by_scale.entry(bits).or_insert(0);
}
let mut scales: Vec<u32> = counts_by_scale.keys().copied().collect();
scales.sort_unstable();
println!(
"free_tiles: total_tiles={} scales={} dist={}",
self.grid.len(),
scales.len(),
scales
.iter()
.map(|bits| format!("{:.4}:{:?}", f32::from_bits(*bits), counts_by_scale[bits]))
.collect::<Vec<_>>()
.join(", ")
);
// Prefer evicting non-protected (non-anchor) tiles first.
let mut marked: Vec<TileCacheKey> = Vec::new();
for pass in [false, true] {
if marked.len() >= TEXTURES_BATCH_DELETE {
break;
}
for (key, _) in self.grid.iter() {
if marked.len() >= TEXTURES_BATCH_DELETE {
break;
}
// First pass: only non-protected scales. Second pass: allow protected.
if !pass && self.protected_scales.contains(&key.scale_bits) {
continue;
}
// Approximate visibility check: uses tile coords only.
if !tile_viewbox.is_visible(&key.tile) {
Some(*key)
} else {
None
marked.push(*key);
}
})
.take(TEXTURES_BATCH_DELETE)
.collect();
}
}
for key in marked.iter() {
self.grid.remove(key);