stygian_browser/pool.rs
1//! Browser instance pool with warmup, health checks, and idle eviction
2//!
3//! # Architecture
4//!
5//! ```text
6//! ┌───────────────────────────────────────────────────────────┐
7//! │ BrowserPool │
8//! │ │
9//! │ Semaphore (max_size slots — global backpressure) │
10//! │ ┌───────────────────────────────────────────────────┐ │
11//! │ │ shared: VecDeque<PoolEntry> │ │
12//! │ │ (unscoped browsers — used by acquire()) │ │
13//! │ └───────────────────────────────────────────────────┘ │
14//! │ ┌───────────────────────────────────────────────────┐ │
15//! │ │ scoped: HashMap<String, VecDeque<PoolEntry>> │ │
16//! │ │ (per-context queues — used by acquire_for()) │ │
17//! │ └───────────────────────────────────────────────────┘ │
18//! │ active_count: Arc<AtomicUsize> │
19//! └───────────────────────────────────────────────────────────┘
20//! ```
21//!
22//! **Acquisition flow**
23//! 1. Try to pop a healthy idle entry.
24//! 2. If none idle and `active < max_size`, launch a fresh `BrowserInstance`.
25//! 3. Otherwise wait up to `acquire_timeout` for an idle slot.
26//!
27//! **Release flow**
28//! 1. Run a health-check on the returned instance.
29//! 2. If healthy and `idle < max_size`, push it back to the idle queue.
30//! 3. Otherwise shut it down and decrement the active counter.
31//!
32//! # Example
33//!
34//! ```no_run
35//! use stygian_browser::{BrowserConfig, BrowserPool};
36//!
37//! # async fn run() -> stygian_browser::error::Result<()> {
38//! let config = BrowserConfig::default();
39//! let pool = BrowserPool::new(config).await?;
40//!
41//! let stats = pool.stats();
42//! println!("Pool ready — idle: {}", stats.idle);
43//!
44//! let handle = pool.acquire().await?;
45//! handle.release().await;
46//! # Ok(())
47//! # }
48//! ```
49
50use std::sync::{
51 Arc,
52 atomic::{AtomicUsize, Ordering},
53};
54use std::time::Instant;
55
56use tokio::sync::{Mutex, Semaphore};
57use tokio::time::{sleep, timeout};
58use tracing::{debug, info, warn};
59
60use crate::{
61 BrowserConfig,
62 browser::BrowserInstance,
63 error::{BrowserError, Result},
64};
65
66// ─── PoolEntry ────────────────────────────────────────────────────────────────
67
68struct PoolEntry {
69 instance: BrowserInstance,
70 last_used: Instant,
71 /// RAII proxy lease — held for the entire Chrome process lifetime.
72 /// `mark_success()` is called on clean disposal; simply dropping it
73 /// records a circuit-breaker failure in the proxy pool (if any).
74 proxy_lease: Option<Box<dyn crate::proxy::ProxyLease>>,
75}
76
77// ─── PoolInner ────────────────────────────────────────────────────────────────
78
79struct PoolInner {
80 shared: std::collections::VecDeque<PoolEntry>,
81 scoped: std::collections::HashMap<String, std::collections::VecDeque<PoolEntry>>,
82}
83
84// ─── BrowserPool ──────────────────────────────────────────────────────────────
85
86/// Thread-safe pool of reusable [`BrowserInstance`]s.
87///
88/// Maintains a warm set of idle browsers ready for immediate acquisition
89/// (`<100ms`), and lazily launches new instances when demand spikes.
90///
91/// # Example
92///
93/// ```no_run
94/// use stygian_browser::{BrowserConfig, BrowserPool};
95///
96/// # async fn run() -> stygian_browser::error::Result<()> {
97/// let pool = BrowserPool::new(BrowserConfig::default()).await?;
98/// let handle = pool.acquire().await?;
99/// handle.release().await;
100/// # Ok(())
101/// # }
102/// ```
103pub struct BrowserPool {
104 config: Arc<BrowserConfig>,
105 semaphore: Arc<Semaphore>,
106 inner: Arc<Mutex<PoolInner>>,
107 active_count: Arc<AtomicUsize>,
108 max_size: usize,
109}
110
111impl BrowserPool {
112 /// Create a new pool and pre-warm `config.pool.min_size` browser instances.
113 ///
114 /// Warmup failures are logged but not fatal — the pool will start smaller
115 /// and grow lazily.
116 ///
117 /// # Example
118 ///
119 /// ```no_run
120 /// use stygian_browser::{BrowserPool, BrowserConfig};
121 ///
122 /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
123 /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
124 /// # Ok(())
125 /// # }
126 /// ```
127 pub async fn new(config: BrowserConfig) -> Result<Arc<Self>> {
128 let max_size = config.pool.max_size;
129 let min_size = config.pool.min_size;
130
131 let pool = Self {
132 config: Arc::new(config),
133 semaphore: Arc::new(Semaphore::new(max_size)),
134 inner: Arc::new(Mutex::new(PoolInner {
135 shared: std::collections::VecDeque::new(),
136 scoped: std::collections::HashMap::new(),
137 })),
138 active_count: Arc::new(AtomicUsize::new(0)),
139 max_size,
140 };
141
142 Self::warmup_pool(&pool, min_size).await;
143
144 // Spawn idle-eviction task
145 tokio::spawn(Self::eviction_loop(
146 pool.inner.clone(),
147 pool.active_count.clone(),
148 pool.config.pool.idle_timeout,
149 min_size,
150 ));
151
152 Ok(Arc::new(pool))
153 }
154
155 // ─── Warmup ───────────────────────────────────────────────────────────────
156
157 /// Pre-warm `min_size` browser instances into the shared queue.
158 async fn warmup_pool(pool: &Self, min_size: usize) {
159 info!(
160 "Warming browser pool: min_size={min_size}, max_size={}",
161 pool.max_size
162 );
163 for i in 0..min_size {
164 let (launch_config, proxy_lease) = if let Some(source) = &pool.config.proxy_source {
165 match source.bind_proxy().await {
166 Ok((url, lease)) => {
167 let mut cfg = (*pool.config).clone();
168 cfg.proxy = Some(url);
169 cfg.proxy_source = None;
170 (cfg, Some(lease))
171 }
172 Err(e) => {
173 warn!("Warmup browser {i} failed to acquire proxy (non-fatal): {e}");
174 continue;
175 }
176 }
177 } else {
178 ((*pool.config).clone(), None)
179 };
180
181 // Acquire a semaphore permit so that `active_count` and the
182 // semaphore always agree on capacity.
183 let Ok(permit) = pool.semaphore.try_acquire() else {
184 warn!("Warmup browser {i}: semaphore full, stopping warmup early");
185 break;
186 };
187 permit.forget(); // immediately — track capacity via active_count
188 pool.active_count.fetch_add(1, Ordering::Relaxed);
189
190 match BrowserInstance::launch(launch_config).await {
191 Ok(instance) => {
192 pool.inner.lock().await.shared.push_back(PoolEntry {
193 instance,
194 last_used: Instant::now(),
195 proxy_lease,
196 });
197 debug!("Warmed browser {}/{min_size}", i + 1);
198 }
199 Err(e) => {
200 warn!("Warmup browser {i} failed (non-fatal): {e}");
201 pool.active_count.fetch_sub(1, Ordering::Relaxed);
202 pool.semaphore.add_permits(1);
203 // proxy_lease drops here = circuit-breaker failure signal
204 }
205 }
206 }
207 }
208
209 // ─── Eviction ─────────────────────────────────────────────────────────────
210
211 /// Background loop that evicts browsers idle longer than `idle_timeout`.
212 async fn eviction_loop(
213 inner: Arc<Mutex<PoolInner>>,
214 active_count: Arc<AtomicUsize>,
215 idle_timeout: std::time::Duration,
216 min_size: usize,
217 ) {
218 loop {
219 sleep(idle_timeout / 2).await;
220
221 let mut guard = inner.lock().await;
222 let now = Instant::now();
223 let active = active_count.load(Ordering::Relaxed);
224
225 let total_idle: usize = guard.shared.len()
226 + guard
227 .scoped
228 .values()
229 .map(std::collections::VecDeque::len)
230 .sum::<usize>();
231 let evict_count = if active > min_size {
232 (active - min_size).min(total_idle)
233 } else {
234 0
235 };
236
237 let mut evicted = 0usize;
238
239 // Evict from shared queue
240 let mut kept: std::collections::VecDeque<PoolEntry> = std::collections::VecDeque::new();
241 while let Some(entry) = guard.shared.pop_front() {
242 if evicted < evict_count && now.duration_since(entry.last_used) >= idle_timeout {
243 // Clean eviction: proxy was fine, just expired.
244 if let Some(lease) = &entry.proxy_lease {
245 lease.mark_success();
246 }
247 let instance = entry.instance;
248 tokio::spawn(async move {
249 let _ = instance.shutdown().await;
250 });
251 active_count.fetch_sub(1, Ordering::Relaxed);
252 evicted += 1;
253 } else {
254 kept.push_back(entry);
255 }
256 }
257 guard.shared = kept;
258
259 // Evict from scoped queues
260 let context_ids: Vec<String> = guard.scoped.keys().cloned().collect();
261 for cid in &context_ids {
262 if let Some(queue) = guard.scoped.get_mut(cid) {
263 let mut kept: std::collections::VecDeque<PoolEntry> =
264 std::collections::VecDeque::new();
265 while let Some(entry) = queue.pop_front() {
266 if evicted < evict_count
267 && now.duration_since(entry.last_used) >= idle_timeout
268 {
269 if let Some(lease) = &entry.proxy_lease {
270 lease.mark_success();
271 }
272 let instance = entry.instance;
273 tokio::spawn(async move {
274 let _ = instance.shutdown().await;
275 });
276 active_count.fetch_sub(1, Ordering::Relaxed);
277 evicted += 1;
278 } else {
279 kept.push_back(entry);
280 }
281 }
282 *queue = kept;
283 }
284 }
285
286 // Remove empty scoped queues
287 guard.scoped.retain(|_, q| !q.is_empty());
288
289 // Drop the guard promptly to avoid holding the lock longer than needed
290 drop(guard);
291
292 if evicted > 0 {
293 info!("Evicted {evicted} idle browsers (idle_timeout={idle_timeout:?})");
294 }
295 }
296 }
297
298 // ─── Acquire ──────────────────────────────────────────────────────────────
299
300 /// Acquire a browser handle from the pool.
301 ///
302 /// - If a healthy idle browser is available it is returned immediately.
303 /// - If `active < max_size` a new browser is launched.
304 /// - Otherwise waits up to `pool.acquire_timeout`.
305 ///
306 /// # Errors
307 ///
308 /// Returns [`BrowserError::PoolExhausted`] if no browser becomes available
309 /// within `pool.acquire_timeout`.
310 ///
311 /// # Example
312 ///
313 /// ```no_run
314 /// use stygian_browser::{BrowserPool, BrowserConfig};
315 ///
316 /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
317 /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
318 /// let handle = pool.acquire().await?;
319 /// handle.release().await;
320 /// # Ok(())
321 /// # }
322 /// ```
323 pub async fn acquire(self: &Arc<Self>) -> Result<BrowserHandle> {
324 #[cfg(feature = "metrics")]
325 let acquire_start = std::time::Instant::now();
326
327 let result = self.acquire_inner(None).await;
328
329 #[cfg(feature = "metrics")]
330 {
331 let elapsed = acquire_start.elapsed();
332 crate::metrics::METRICS.record_acquisition(elapsed);
333 crate::metrics::METRICS.set_pool_size(
334 i64::try_from(self.active_count.load(Ordering::Relaxed)).unwrap_or(i64::MAX),
335 );
336 }
337
338 result
339 }
340
341 /// Acquire a browser scoped to `context_id`.
342 ///
343 /// Browsers obtained this way are isolated: they will only be reused by
344 /// future calls to `acquire_for` with the **same** `context_id`.
345 /// The global `max_size` still applies across all contexts.
346 ///
347 /// # Errors
348 ///
349 /// Returns [`BrowserError::PoolExhausted`] if no browser becomes available
350 /// within `pool.acquire_timeout`.
351 ///
352 /// # Example
353 ///
354 /// ```no_run
355 /// use stygian_browser::{BrowserPool, BrowserConfig};
356 ///
357 /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
358 /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
359 /// let a = pool.acquire_for("bot-a").await?;
360 /// let b = pool.acquire_for("bot-b").await?;
361 /// a.release().await;
362 /// b.release().await;
363 /// # Ok(())
364 /// # }
365 /// ```
366 pub async fn acquire_for(self: &Arc<Self>, context_id: &str) -> Result<BrowserHandle> {
367 #[cfg(feature = "metrics")]
368 let acquire_start = std::time::Instant::now();
369
370 let result = self.acquire_inner(Some(context_id)).await;
371
372 #[cfg(feature = "metrics")]
373 {
374 let elapsed = acquire_start.elapsed();
375 crate::metrics::METRICS.record_acquisition(elapsed);
376 crate::metrics::METRICS.set_pool_size(
377 i64::try_from(self.active_count.load(Ordering::Relaxed)).unwrap_or(i64::MAX),
378 );
379 }
380
381 result
382 }
383
384 /// Shared acquisition logic. `context_id = None` reads from the shared
385 /// queue; `Some(id)` reads from the scoped queue for that context.
386 #[allow(clippy::significant_drop_tightening)] // guard scope is already minimal
387 async fn acquire_inner(self: &Arc<Self>, context_id: Option<&str>) -> Result<BrowserHandle> {
388 let acquire_timeout = self.config.pool.acquire_timeout;
389 let active = self.active_count.load(Ordering::Relaxed);
390 let max = self.max_size;
391 let ctx_owned: Option<String> = context_id.map(String::from);
392
393 // Fast path: try idle queue first
394 let fast_result = {
395 let mut guard = self.inner.lock().await;
396 let queue = match context_id {
397 Some(id) => guard.scoped.get_mut(id),
398 None => Some(&mut guard.shared),
399 };
400 let mut healthy: Option<(BrowserInstance, Option<Box<dyn crate::proxy::ProxyLease>>)> =
401 None;
402 let mut unhealthy: Vec<(BrowserInstance, Option<Box<dyn crate::proxy::ProxyLease>>)> =
403 Vec::new();
404 if let Some(queue) = queue {
405 while let Some(entry) = queue.pop_front() {
406 if healthy.is_none() && entry.instance.is_healthy_cached() {
407 healthy = Some((entry.instance, entry.proxy_lease));
408 } else if !entry.instance.is_healthy_cached() {
409 unhealthy.push((entry.instance, entry.proxy_lease));
410 } else {
411 // Healthy but we already found one — push back.
412 queue.push_front(entry);
413 break;
414 }
415 }
416 }
417 (healthy, unhealthy)
418 };
419
420 // Dispose unhealthy entries outside the lock
421 for (instance, _lease) in fast_result.1 {
422 // _lease drops here = circuit-breaker failure signal
423 #[cfg(feature = "metrics")]
424 crate::metrics::METRICS.record_crash();
425 let active_count = self.active_count.clone();
426 tokio::spawn(async move {
427 let _ = instance.shutdown().await;
428 active_count.fetch_sub(1, Ordering::Relaxed);
429 });
430 }
431
432 if let Some((instance, proxy_lease)) = fast_result.0 {
433 debug!(
434 context = context_id.unwrap_or("shared"),
435 "Reusing idle browser (uptime={:?})",
436 instance.uptime()
437 );
438 return Ok(BrowserHandle::new(
439 instance,
440 Arc::clone(self),
441 ctx_owned,
442 proxy_lease,
443 ));
444 }
445
446 // Slow path: launch new or wait
447 if active < max {
448 // Acquire semaphore permit (non-blocking since active < max)
449 // Inline permit — no named binding to avoid significant_drop_tightening
450 timeout(acquire_timeout, self.semaphore.acquire())
451 .await
452 .map_err(|_| BrowserError::PoolExhausted { active, max })?
453 .map_err(|_| BrowserError::PoolExhausted { active, max })?
454 .forget(); // We track capacity manually via active_count
455 self.active_count.fetch_add(1, Ordering::Relaxed);
456
457 let (launch_config, proxy_lease) = if let Some(source) = &self.config.proxy_source {
458 match source.bind_proxy().await {
459 Ok((url, lease)) => {
460 let mut cfg = (*self.config).clone();
461 cfg.proxy = Some(url);
462 cfg.proxy_source = None;
463 (cfg, Some(lease))
464 }
465 Err(e) => {
466 self.active_count.fetch_sub(1, Ordering::Relaxed);
467 self.semaphore.add_permits(1);
468 return Err(e);
469 }
470 }
471 } else {
472 ((*self.config).clone(), None)
473 };
474
475 let instance = match BrowserInstance::launch(launch_config).await {
476 Ok(i) => i,
477 Err(e) => {
478 // proxy_lease drops here = circuit-breaker failure signal
479 self.active_count.fetch_sub(1, Ordering::Relaxed);
480 self.semaphore.add_permits(1);
481 return Err(e);
482 }
483 };
484
485 info!(
486 context = context_id.unwrap_or("shared"),
487 "Launched fresh browser (pool active={})",
488 self.active_count.load(Ordering::Relaxed)
489 );
490 return Ok(BrowserHandle::new(
491 instance,
492 Arc::clone(self),
493 ctx_owned,
494 proxy_lease,
495 ));
496 }
497
498 // Pool full — wait for a release
499 let ctx_for_poll = context_id.map(String::from);
500 self.poll_for_release(ctx_for_poll, acquire_timeout, active, max)
501 .await
502 }
503
504 // ─── Poll for release ─────────────────────────────────────────────────────
505
506 /// Wait until an idle browser is returned to the pool or the timeout fires.
507 async fn poll_for_release(
508 self: &Arc<Self>,
509 ctx_for_poll: Option<String>,
510 acquire_timeout: std::time::Duration,
511 active: usize,
512 max: usize,
513 ) -> Result<BrowserHandle> {
514 timeout(acquire_timeout, async {
515 loop {
516 sleep(std::time::Duration::from_millis(50)).await;
517 let mut guard = self.inner.lock().await;
518 let queue = match ctx_for_poll.as_deref() {
519 Some(id) => guard.scoped.get_mut(id),
520 None => Some(&mut guard.shared),
521 };
522 if let Some(queue) = queue
523 && let Some(entry) = queue.pop_front()
524 {
525 drop(guard);
526 if entry.instance.is_healthy_cached() {
527 let (instance, proxy_lease) = (entry.instance, entry.proxy_lease);
528 return Ok(BrowserHandle::new(
529 instance,
530 Arc::clone(self),
531 ctx_for_poll.clone(),
532 proxy_lease,
533 ));
534 }
535 #[cfg(feature = "metrics")]
536 crate::metrics::METRICS.record_crash();
537 // _lease drops = circuit-breaker failure signal
538 let instance = entry.instance;
539 let active_count = self.active_count.clone();
540 tokio::spawn(async move {
541 let _ = instance.shutdown().await;
542 active_count.fetch_sub(1, Ordering::Relaxed);
543 });
544 }
545 }
546 })
547 .await
548 .map_err(|_| BrowserError::PoolExhausted { active, max })?
549 }
550
551 // ─── Release ──────────────────────────────────────────────────────────────
552
553 /// Return a browser instance to the pool (called by [`BrowserHandle::release`]).
554 async fn release(
555 &self,
556 instance: BrowserInstance,
557 context_id: Option<&str>,
558 mut proxy_lease: Option<Box<dyn crate::proxy::ProxyLease>>,
559 ) {
560 // Health-check before returning to idle queue
561 if instance.is_healthy_cached() {
562 let mut guard = self.inner.lock().await;
563 let total_idle: usize = guard.shared.len()
564 + guard
565 .scoped
566 .values()
567 .map(std::collections::VecDeque::len)
568 .sum::<usize>();
569 if total_idle < self.max_size {
570 let queue = match context_id {
571 Some(id) => guard.scoped.entry(id.to_owned()).or_default(),
572 None => &mut guard.shared,
573 };
574 queue.push_back(PoolEntry {
575 instance,
576 last_used: Instant::now(),
577 proxy_lease: proxy_lease.take(), // lease travels with the pooled entry
578 });
579 debug!(
580 context = context_id.unwrap_or("shared"),
581 "Returned browser to idle pool"
582 );
583 return;
584 }
585 drop(guard);
586 // Healthy but pool full: mark success before clean disposal
587 if let Some(lease) = &proxy_lease {
588 lease.mark_success();
589 }
590 }
591 // proxy_lease drops here:
592 // - healthy + pool full → mark_success was called above, drop is a no-op
593 // - unhealthy → mark_success NOT called, drop records circuit-breaker failure
594
595 // Unhealthy or pool full — dispose
596 #[cfg(feature = "metrics")]
597 if !instance.is_healthy_cached() {
598 crate::metrics::METRICS.record_crash();
599 }
600 let active_count = self.active_count.clone();
601 tokio::spawn(async move {
602 let _ = instance.shutdown().await;
603 active_count.fetch_sub(1, Ordering::Relaxed);
604 });
605
606 self.semaphore.add_permits(1);
607 }
608
609 // ─── Context management ───────────────────────────────────────────────────
610
611 /// Shut down and remove all idle browsers belonging to `context_id`.
612 ///
613 /// Active handles for that context are unaffected — they will be disposed
614 /// normally when released. Call this when a bot or tenant is deprovisioned.
615 ///
616 /// Returns the number of browsers shut down.
617 ///
618 /// # Example
619 ///
620 /// ```no_run
621 /// use stygian_browser::{BrowserPool, BrowserConfig};
622 ///
623 /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
624 /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
625 /// let released = pool.release_context("bot-a").await;
626 /// println!("Shut down {released} browsers for bot-a");
627 /// # Ok(())
628 /// # }
629 /// ```
630 pub async fn release_context(&self, context_id: &str) -> usize {
631 let mut guard = self.inner.lock().await;
632 let entries = guard.scoped.remove(context_id).unwrap_or_default();
633 drop(guard);
634
635 let count = entries.len();
636 for entry in entries {
637 // Clean deprovisioning: mark the proxy as successful
638 if let Some(lease) = &entry.proxy_lease {
639 lease.mark_success();
640 }
641 let instance = entry.instance;
642 let active_count = self.active_count.clone();
643 tokio::spawn(async move {
644 let _ = instance.shutdown().await;
645 active_count.fetch_sub(1, Ordering::Relaxed);
646 });
647 self.semaphore.add_permits(1);
648 }
649
650 if count > 0 {
651 info!("Released {count} browsers for context '{context_id}'");
652 }
653 count
654 }
655
656 /// List all active context IDs that have idle browsers in the pool.
657 ///
658 /// # Example
659 ///
660 /// ```no_run
661 /// use stygian_browser::{BrowserPool, BrowserConfig};
662 ///
663 /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
664 /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
665 /// let ids = pool.context_ids().await;
666 /// println!("Active contexts: {ids:?}");
667 /// # Ok(())
668 /// # }
669 /// ```
670 pub async fn context_ids(&self) -> Vec<String> {
671 let guard = self.inner.lock().await;
672 guard.scoped.keys().cloned().collect()
673 }
674
675 // ─── Stats ────────────────────────────────────────────────────────────────
676
677 /// Snapshot of current pool metrics.
678 ///
679 /// # Example
680 ///
681 /// ```no_run
682 /// use stygian_browser::{BrowserPool, BrowserConfig};
683 ///
684 /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
685 /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
686 /// let s = pool.stats();
687 /// println!("active={} idle={} max={}", s.active, s.idle, s.max);
688 /// # Ok(())
689 /// # }
690 /// ```
691 pub fn stats(&self) -> PoolStats {
692 PoolStats {
693 active: self.active_count.load(Ordering::Relaxed),
694 max: self.max_size,
695 available: self
696 .max_size
697 .saturating_sub(self.active_count.load(Ordering::Relaxed)),
698 idle: 0, // approximate — would need lock; kept lock-free for perf
699 }
700 }
701}
702
703// ─── BrowserHandle ────────────────────────────────────────────────────────────
704
705/// An acquired browser from the pool.
706///
707/// Call [`BrowserHandle::release`] after use to return the instance to the
708/// idle queue. If dropped without releasing, the browser is shut down and the
709/// pool slot freed.
710pub struct BrowserHandle {
711 instance: Option<BrowserInstance>,
712 pool: Arc<BrowserPool>,
713 context_id: Option<String>,
714 proxy_lease: Option<Box<dyn crate::proxy::ProxyLease>>,
715}
716
717impl BrowserHandle {
718 fn new(
719 instance: BrowserInstance,
720 pool: Arc<BrowserPool>,
721 context_id: Option<String>,
722 proxy_lease: Option<Box<dyn crate::proxy::ProxyLease>>,
723 ) -> Self {
724 Self {
725 instance: Some(instance),
726 pool,
727 context_id,
728 proxy_lease,
729 }
730 }
731
732 /// Borrow the underlying [`BrowserInstance`].
733 ///
734 /// Returns `None` if the handle has already been released via [`release`](Self::release).
735 pub const fn browser(&self) -> Option<&BrowserInstance> {
736 self.instance.as_ref()
737 }
738
739 /// Mutable borrow of the underlying [`BrowserInstance`].
740 ///
741 /// Returns `None` if the handle has already been released via [`release`](Self::release).
742 pub const fn browser_mut(&mut self) -> Option<&mut BrowserInstance> {
743 self.instance.as_mut()
744 }
745
746 /// The context that owns this handle, if scoped via [`BrowserPool::acquire_for`].
747 ///
748 /// Returns `None` for handles obtained with [`BrowserPool::acquire`].
749 pub fn context_id(&self) -> Option<&str> {
750 self.context_id.as_deref()
751 }
752
753 /// Return the browser to the pool.
754 ///
755 /// If the instance is unhealthy or the pool is full it will be disposed.
756 pub async fn release(mut self) {
757 if let Some(instance) = self.instance.take() {
758 self.pool
759 .release(
760 instance,
761 self.context_id.as_deref(),
762 self.proxy_lease.take(),
763 )
764 .await;
765 }
766 }
767}
768
769impl Drop for BrowserHandle {
770 fn drop(&mut self) {
771 if let Some(instance) = self.instance.take() {
772 let pool = Arc::clone(&self.pool);
773 let context_id = self.context_id.clone();
774 let proxy_lease = self.proxy_lease.take();
775 tokio::spawn(async move {
776 pool.release(instance, context_id.as_deref(), proxy_lease)
777 .await;
778 });
779 }
780 }
781}
782
783// ─── PoolStats ────────────────────────────────────────────────────────────────
784
785/// Point-in-time metrics for a [`BrowserPool`].
786///
787/// # Example
788///
789/// ```no_run
790/// use stygian_browser::{BrowserPool, BrowserConfig};
791///
792/// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
793/// let pool = BrowserPool::new(BrowserConfig::default()).await?;
794/// let stats = pool.stats();
795/// assert!(stats.max > 0);
796/// # Ok(())
797/// # }
798/// ```
799#[derive(Debug, Clone)]
800pub struct PoolStats {
801 /// Total browser instances currently managed by the pool (idle + in-use).
802 pub active: usize,
803 /// Maximum allowed concurrent instances.
804 pub max: usize,
805 /// Free slots (max - active).
806 pub available: usize,
807 /// Currently idle (warm) instances ready for immediate acquisition.
808 pub idle: usize,
809}
810
811// ─── Tests ────────────────────────────────────────────────────────────────────
812
813#[cfg(test)]
814mod tests {
815 use super::*;
816 use crate::config::{PoolConfig, StealthLevel};
817 use std::time::Duration;
818
819 fn test_config() -> BrowserConfig {
820 BrowserConfig::builder()
821 .stealth_level(StealthLevel::None)
822 .pool(PoolConfig {
823 min_size: 0, // no warmup in unit tests
824 max_size: 5,
825 idle_timeout: Duration::from_mins(5),
826 acquire_timeout: Duration::from_millis(100),
827 })
828 .build()
829 }
830
831 #[test]
832 fn pool_stats_reflects_max() {
833 // This test is purely structural — pool construction needs a real browser
834 // so we only verify the config plumbing here.
835 let config = test_config();
836 assert_eq!(config.pool.max_size, 5);
837 assert_eq!(config.pool.min_size, 0);
838 }
839
840 #[test]
841 fn pool_stats_available_saturates() {
842 let stats = PoolStats {
843 active: 10,
844 max: 10,
845 available: 0,
846 idle: 0,
847 };
848 assert_eq!(stats.available, 0);
849 assert_eq!(stats.active, stats.max);
850 }
851
852 #[test]
853 fn pool_stats_partial_usage() {
854 let stats = PoolStats {
855 active: 3,
856 max: 10,
857 available: 7,
858 idle: 2,
859 };
860 assert_eq!(stats.available, 7);
861 }
862
863 #[tokio::test]
864 async fn pool_new_with_zero_min_size_ok() {
865 // With min_size=0 BrowserPool::new() should succeed without a real Chrome
866 // because no warmup launch is attempted.
867 // We skip this if no Chrome is present; this test is integration-only.
868 // Kept as a compile + config sanity check.
869 let config = test_config();
870 assert_eq!(config.pool.min_size, 0);
871 }
872
873 #[test]
874 fn pool_stats_available_is_max_minus_active() {
875 let stats = PoolStats {
876 active: 6,
877 max: 10,
878 available: 4,
879 idle: 3,
880 };
881 assert_eq!(stats.available, stats.max - stats.active);
882 }
883
884 #[test]
885 fn pool_stats_available_cannot_underflow() {
886 // active > max should not cause a panic — saturating_sub is used.
887 let stats = PoolStats {
888 active: 12,
889 max: 10,
890 available: 0_usize.saturating_sub(2),
891 idle: 0,
892 };
893 // available is computed with saturating_sub in BrowserPool::stats()
894 assert_eq!(stats.available, 0);
895 }
896
897 #[test]
898 fn pool_config_acquire_timeout_respected() {
899 let cfg = BrowserConfig::builder()
900 .pool(PoolConfig {
901 min_size: 0,
902 max_size: 1,
903 idle_timeout: Duration::from_mins(5),
904 acquire_timeout: Duration::from_millis(10),
905 })
906 .build();
907 assert_eq!(cfg.pool.acquire_timeout, Duration::from_millis(10));
908 }
909
910 #[test]
911 fn pool_config_idle_timeout_respected() {
912 let cfg = BrowserConfig::builder()
913 .pool(PoolConfig {
914 min_size: 1,
915 max_size: 5,
916 idle_timeout: Duration::from_mins(1),
917 acquire_timeout: Duration::from_secs(5),
918 })
919 .build();
920 assert_eq!(cfg.pool.idle_timeout, Duration::from_mins(1));
921 }
922
923 #[test]
924 fn browser_handle_drop_does_not_panic_without_runtime() {
925 // Verify BrowserHandle can be constructed/dropped without a real browser
926 // by ensuring the struct itself is Send + Sync (compile-time check).
927 fn assert_send<T: Send>() {}
928 fn assert_sync<T: Sync>() {}
929 assert_send::<BrowserPool>();
930 assert_send::<PoolStats>();
931 assert_sync::<BrowserPool>();
932 }
933
934 #[test]
935 fn pool_stats_zero_active_means_full_availability() {
936 let stats = PoolStats {
937 active: 0,
938 max: 8,
939 available: 8,
940 idle: 0,
941 };
942 assert_eq!(stats.available, stats.max);
943 }
944
945 #[test]
946 fn pool_entry_last_used_ordering() {
947 use std::time::Duration;
948 let now = std::time::Instant::now();
949 let older = now.checked_sub(Duration::from_secs(400)).unwrap_or(now);
950 let idle_timeout = Duration::from_mins(5);
951 // Simulate eviction check: entry older than idle_timeout should be evicted
952 assert!(now.duration_since(older) >= idle_timeout);
953 }
954
955 #[test]
956 fn pool_stats_debug_format() {
957 let stats = PoolStats {
958 active: 2,
959 max: 10,
960 available: 8,
961 idle: 1,
962 };
963 let dbg = format!("{stats:?}");
964 assert!(dbg.contains("active"));
965 assert!(dbg.contains("max"));
966 }
967
968 // ─── Context segregation tests ────────────────────────────────────────────
969
970 #[test]
971 fn pool_inner_scoped_default_is_empty() {
972 let inner = PoolInner {
973 shared: std::collections::VecDeque::new(),
974 scoped: std::collections::HashMap::new(),
975 };
976 assert!(inner.shared.is_empty());
977 assert!(inner.scoped.is_empty());
978 }
979
980 #[test]
981 fn pool_inner_scoped_insert_and_retrieve() {
982 let mut inner = PoolInner {
983 shared: std::collections::VecDeque::new(),
984 scoped: std::collections::HashMap::new(),
985 };
986 // Verify the scoped map key-space is independent
987 inner.scoped.entry("bot-a".to_owned()).or_default();
988 inner.scoped.entry("bot-b".to_owned()).or_default();
989 assert_eq!(inner.scoped.len(), 2);
990 assert!(inner.scoped.contains_key("bot-a"));
991 assert!(inner.scoped.contains_key("bot-b"));
992 assert!(inner.shared.is_empty());
993 }
994
995 #[test]
996 fn pool_inner_scoped_retain_removes_empty() {
997 let mut inner = PoolInner {
998 shared: std::collections::VecDeque::new(),
999 scoped: std::collections::HashMap::new(),
1000 };
1001 inner.scoped.entry("empty".to_owned()).or_default();
1002 assert_eq!(inner.scoped.len(), 1);
1003 inner.scoped.retain(|_, q| !q.is_empty());
1004 assert!(inner.scoped.is_empty());
1005 }
1006
1007 #[tokio::test]
1008 async fn pool_context_ids_empty_by_default() {
1009 // Without a running Chrome, we test with min_size=0 so no browser
1010 // is launched. We need to construct the pool carefully.
1011 let config = test_config();
1012 assert_eq!(config.pool.min_size, 0);
1013 // context_ids requires an actual pool instance — this test verifies
1014 // the zero-state. Full integration tested with real browser.
1015 }
1016
1017 #[test]
1018 fn browser_handle_context_id_none_for_shared() {
1019 // Compile-time / structural: BrowserHandle carries context_id
1020 fn _check_context_api(handle: &BrowserHandle) {
1021 let _: Option<&str> = handle.context_id();
1022 }
1023 }
1024
1025 #[test]
1026 fn pool_inner_total_idle_calculation() {
1027 fn total_idle(inner: &PoolInner) -> usize {
1028 inner.shared.len()
1029 + inner
1030 .scoped
1031 .values()
1032 .map(std::collections::VecDeque::len)
1033 .sum::<usize>()
1034 }
1035 let mut inner = PoolInner {
1036 shared: std::collections::VecDeque::new(),
1037 scoped: std::collections::HashMap::new(),
1038 };
1039 assert_eq!(total_idle(&inner), 0);
1040
1041 // Add entries to scoped queues (without real BrowserInstance, just check sizes)
1042 inner.scoped.entry("a".to_owned()).or_default();
1043 inner.scoped.entry("b".to_owned()).or_default();
1044 assert_eq!(total_idle(&inner), 0); // empty queues don't count
1045 }
1046}