stygian_browser/
pool.rs

1//! Browser instance pool with warmup, health checks, and idle eviction
2//!
3//! # Architecture
4//!
5//! ```text
6//! ┌───────────────────────────────────────────────────────────┐
7//! │                      BrowserPool                         │
8//! │                                                           │
9//! │  Semaphore (max_size slots — global backpressure)        │
10//! │  ┌───────────────────────────────────────────────────┐   │
11//! │  │         shared: VecDeque<PoolEntry>               │   │
12//! │  │  (unscoped browsers — used by acquire())         │   │
13//! │  └───────────────────────────────────────────────────┘   │
14//! │  ┌───────────────────────────────────────────────────┐   │
15//! │  │    scoped: HashMap<String, VecDeque<PoolEntry>>   │   │
16//! │  │  (per-context queues — used by acquire_for())    │   │
17//! │  └───────────────────────────────────────────────────┘   │
18//! │  active_count: Arc<AtomicUsize>                          │
19//! └───────────────────────────────────────────────────────────┘
20//! ```
21//!
22//! **Acquisition flow**
23//! 1. Try to pop a healthy idle entry.
24//! 2. If none idle and `active < max_size`, launch a fresh `BrowserInstance`.
25//! 3. Otherwise wait up to `acquire_timeout` for an idle slot.
26//!
27//! **Release flow**
28//! 1. Run a health-check on the returned instance.
29//! 2. If healthy and `idle < max_size`, push it back to the idle queue.
30//! 3. Otherwise shut it down and decrement the active counter.
31//!
32//! # Example
33//!
34//! ```no_run
35//! use stygian_browser::{BrowserConfig, BrowserPool};
36//!
37//! # async fn run() -> stygian_browser::error::Result<()> {
38//! let config = BrowserConfig::default();
39//! let pool = BrowserPool::new(config).await?;
40//!
41//! let stats = pool.stats();
42//! println!("Pool ready — idle: {}", stats.idle);
43//!
44//! let handle = pool.acquire().await?;
45//! handle.release().await;
46//! # Ok(())
47//! # }
48//! ```
49
50use std::sync::{
51    Arc,
52    atomic::{AtomicUsize, Ordering},
53};
54use std::time::Instant;
55
56use tokio::sync::{Mutex, Semaphore};
57use tokio::time::{sleep, timeout};
58use tracing::{debug, info, warn};
59
60use crate::{
61    BrowserConfig,
62    browser::BrowserInstance,
63    error::{BrowserError, Result},
64};
65
66// ─── PoolEntry ────────────────────────────────────────────────────────────────
67
68struct PoolEntry {
69    instance: BrowserInstance,
70    last_used: Instant,
71}
72
73// ─── PoolInner ────────────────────────────────────────────────────────────────
74
75struct PoolInner {
76    shared: std::collections::VecDeque<PoolEntry>,
77    scoped: std::collections::HashMap<String, std::collections::VecDeque<PoolEntry>>,
78}
79
80// ─── BrowserPool ──────────────────────────────────────────────────────────────
81
82/// Thread-safe pool of reusable [`BrowserInstance`]s.
83///
84/// Maintains a warm set of idle browsers ready for immediate acquisition
85/// (`<100ms`), and lazily launches new instances when demand spikes.
86///
87/// # Example
88///
89/// ```no_run
90/// use stygian_browser::{BrowserConfig, BrowserPool};
91///
92/// # async fn run() -> stygian_browser::error::Result<()> {
93/// let pool = BrowserPool::new(BrowserConfig::default()).await?;
94/// let handle = pool.acquire().await?;
95/// handle.release().await;
96/// # Ok(())
97/// # }
98/// ```
99pub struct BrowserPool {
100    config: Arc<BrowserConfig>,
101    semaphore: Arc<Semaphore>,
102    inner: Arc<Mutex<PoolInner>>,
103    active_count: Arc<AtomicUsize>,
104    max_size: usize,
105}
106
107impl BrowserPool {
108    /// Create a new pool and pre-warm `config.pool.min_size` browser instances.
109    ///
110    /// Warmup failures are logged but not fatal — the pool will start smaller
111    /// and grow lazily.
112    ///
113    /// # Example
114    ///
115    /// ```no_run
116    /// use stygian_browser::{BrowserPool, BrowserConfig};
117    ///
118    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
119    /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
120    /// # Ok(())
121    /// # }
122    /// ```
123    pub async fn new(config: BrowserConfig) -> Result<Arc<Self>> {
124        let max_size = config.pool.max_size;
125        let min_size = config.pool.min_size;
126
127        let pool = Self {
128            config: Arc::new(config),
129            semaphore: Arc::new(Semaphore::new(max_size)),
130            inner: Arc::new(Mutex::new(PoolInner {
131                shared: std::collections::VecDeque::new(),
132                scoped: std::collections::HashMap::new(),
133            })),
134            active_count: Arc::new(AtomicUsize::new(0)),
135            max_size,
136        };
137
138        // Warmup: pre-launch min_size instances
139        info!("Warming browser pool: min_size={min_size}, max_size={max_size}");
140        for i in 0..min_size {
141            match BrowserInstance::launch((*pool.config).clone()).await {
142                Ok(instance) => {
143                    pool.active_count.fetch_add(1, Ordering::Relaxed);
144                    pool.inner.lock().await.shared.push_back(PoolEntry {
145                        instance,
146                        last_used: Instant::now(),
147                    });
148                    debug!("Warmed browser {}/{min_size}", i + 1);
149                }
150                Err(e) => {
151                    warn!("Warmup browser {i} failed (non-fatal): {e}");
152                }
153            }
154        }
155
156        // Spawn idle-eviction task
157        let eviction_inner = pool.inner.clone();
158        let eviction_active = pool.active_count.clone();
159        let idle_timeout = pool.config.pool.idle_timeout;
160        let eviction_min = min_size;
161
162        tokio::spawn(async move {
163            loop {
164                sleep(idle_timeout / 2).await;
165
166                let mut guard = eviction_inner.lock().await;
167                let now = Instant::now();
168                let active = eviction_active.load(Ordering::Relaxed);
169
170                let total_idle: usize = guard.shared.len()
171                    + guard
172                        .scoped
173                        .values()
174                        .map(std::collections::VecDeque::len)
175                        .sum::<usize>();
176                let evict_count = if active > eviction_min {
177                    (active - eviction_min).min(total_idle)
178                } else {
179                    0
180                };
181
182                let mut evicted = 0usize;
183
184                // Evict from shared queue
185                let mut kept: std::collections::VecDeque<PoolEntry> =
186                    std::collections::VecDeque::new();
187                while let Some(entry) = guard.shared.pop_front() {
188                    if evicted < evict_count && now.duration_since(entry.last_used) >= idle_timeout
189                    {
190                        tokio::spawn(async move {
191                            let _ = entry.instance.shutdown().await;
192                        });
193                        eviction_active.fetch_sub(1, Ordering::Relaxed);
194                        evicted += 1;
195                    } else {
196                        kept.push_back(entry);
197                    }
198                }
199                guard.shared = kept;
200
201                // Evict from scoped queues
202                let context_ids: Vec<String> = guard.scoped.keys().cloned().collect();
203                for cid in &context_ids {
204                    if let Some(queue) = guard.scoped.get_mut(cid) {
205                        let mut kept: std::collections::VecDeque<PoolEntry> =
206                            std::collections::VecDeque::new();
207                        while let Some(entry) = queue.pop_front() {
208                            if evicted < evict_count
209                                && now.duration_since(entry.last_used) >= idle_timeout
210                            {
211                                tokio::spawn(async move {
212                                    let _ = entry.instance.shutdown().await;
213                                });
214                                eviction_active.fetch_sub(1, Ordering::Relaxed);
215                                evicted += 1;
216                            } else {
217                                kept.push_back(entry);
218                            }
219                        }
220                        *queue = kept;
221                    }
222                }
223
224                // Remove empty scoped queues
225                guard.scoped.retain(|_, q| !q.is_empty());
226
227                // Explicitly drop the guard as soon as possible to avoid holding the lock longer than needed
228                drop(guard);
229
230                if evicted > 0 {
231                    info!("Evicted {evicted} idle browsers (idle_timeout={idle_timeout:?})");
232                }
233            }
234        });
235
236        Ok(Arc::new(pool))
237    }
238
239    // ─── Acquire ──────────────────────────────────────────────────────────────
240
241    /// Acquire a browser handle from the pool.
242    ///
243    /// - If a healthy idle browser is available it is returned immediately.
244    /// - If `active < max_size` a new browser is launched.
245    /// - Otherwise waits up to `pool.acquire_timeout`.
246    ///
247    /// # Errors
248    ///
249    /// Returns [`BrowserError::PoolExhausted`] if no browser becomes available
250    /// within `pool.acquire_timeout`.
251    ///
252    /// # Example
253    ///
254    /// ```no_run
255    /// use stygian_browser::{BrowserPool, BrowserConfig};
256    ///
257    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
258    /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
259    /// let handle = pool.acquire().await?;
260    /// handle.release().await;
261    /// # Ok(())
262    /// # }
263    /// ```
264    pub async fn acquire(self: &Arc<Self>) -> Result<BrowserHandle> {
265        #[cfg(feature = "metrics")]
266        let acquire_start = std::time::Instant::now();
267
268        let result = self.acquire_inner(None).await;
269
270        #[cfg(feature = "metrics")]
271        {
272            let elapsed = acquire_start.elapsed();
273            crate::metrics::METRICS.record_acquisition(elapsed);
274            crate::metrics::METRICS.set_pool_size(
275                i64::try_from(self.active_count.load(Ordering::Relaxed)).unwrap_or(i64::MAX),
276            );
277        }
278
279        result
280    }
281
282    /// Acquire a browser scoped to `context_id`.
283    ///
284    /// Browsers obtained this way are isolated: they will only be reused by
285    /// future calls to `acquire_for` with the **same** `context_id`.
286    /// The global `max_size` still applies across all contexts.
287    ///
288    /// # Errors
289    ///
290    /// Returns [`BrowserError::PoolExhausted`] if no browser becomes available
291    /// within `pool.acquire_timeout`.
292    ///
293    /// # Example
294    ///
295    /// ```no_run
296    /// use stygian_browser::{BrowserPool, BrowserConfig};
297    ///
298    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
299    /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
300    /// let a = pool.acquire_for("bot-a").await?;
301    /// let b = pool.acquire_for("bot-b").await?;
302    /// a.release().await;
303    /// b.release().await;
304    /// # Ok(())
305    /// # }
306    /// ```
307    pub async fn acquire_for(self: &Arc<Self>, context_id: &str) -> Result<BrowserHandle> {
308        #[cfg(feature = "metrics")]
309        let acquire_start = std::time::Instant::now();
310
311        let result = self.acquire_inner(Some(context_id)).await;
312
313        #[cfg(feature = "metrics")]
314        {
315            let elapsed = acquire_start.elapsed();
316            crate::metrics::METRICS.record_acquisition(elapsed);
317            crate::metrics::METRICS.set_pool_size(
318                i64::try_from(self.active_count.load(Ordering::Relaxed)).unwrap_or(i64::MAX),
319            );
320        }
321
322        result
323    }
324
325    /// Shared acquisition logic. `context_id = None` reads from the shared
326    /// queue; `Some(id)` reads from the scoped queue for that context.
327    #[allow(clippy::significant_drop_tightening)] // guard scope is already minimal
328    async fn acquire_inner(self: &Arc<Self>, context_id: Option<&str>) -> Result<BrowserHandle> {
329        let acquire_timeout = self.config.pool.acquire_timeout;
330        let active = self.active_count.load(Ordering::Relaxed);
331        let max = self.max_size;
332        let ctx_owned: Option<String> = context_id.map(String::from);
333
334        // Fast path: try idle queue first
335        let fast_result = {
336            let mut guard = self.inner.lock().await;
337            let queue = match context_id {
338                Some(id) => guard.scoped.get_mut(id),
339                None => Some(&mut guard.shared),
340            };
341            let mut healthy: Option<BrowserInstance> = None;
342            let mut unhealthy: Vec<BrowserInstance> = Vec::new();
343            if let Some(queue) = queue {
344                while let Some(entry) = queue.pop_front() {
345                    if healthy.is_none() && entry.instance.is_healthy_cached() {
346                        healthy = Some(entry.instance);
347                    } else if !entry.instance.is_healthy_cached() {
348                        unhealthy.push(entry.instance);
349                    } else {
350                        // Healthy but we already found one — push back.
351                        queue.push_front(entry);
352                        break;
353                    }
354                }
355            }
356            (healthy, unhealthy)
357        };
358
359        // Dispose unhealthy entries outside the lock
360        for instance in fast_result.1 {
361            #[cfg(feature = "metrics")]
362            crate::metrics::METRICS.record_crash();
363            let active_count = self.active_count.clone();
364            tokio::spawn(async move {
365                let _ = instance.shutdown().await;
366                active_count.fetch_sub(1, Ordering::Relaxed);
367            });
368        }
369
370        if let Some(instance) = fast_result.0 {
371            debug!(
372                context = context_id.unwrap_or("shared"),
373                "Reusing idle browser (uptime={:?})",
374                instance.uptime()
375            );
376            return Ok(BrowserHandle::new(instance, Arc::clone(self), ctx_owned));
377        }
378
379        // Slow path: launch new or wait
380        if active < max {
381            // Acquire semaphore permit (non-blocking since active < max)
382            // Inline permit — no named binding to avoid significant_drop_tightening
383            timeout(acquire_timeout, self.semaphore.acquire())
384                .await
385                .map_err(|_| BrowserError::PoolExhausted { active, max })?
386                .map_err(|_| BrowserError::PoolExhausted { active, max })?
387                .forget(); // We track capacity manually via active_count
388            self.active_count.fetch_add(1, Ordering::Relaxed);
389
390            let instance = match BrowserInstance::launch((*self.config).clone()).await {
391                Ok(i) => i,
392                Err(e) => {
393                    self.active_count.fetch_sub(1, Ordering::Relaxed);
394                    self.semaphore.add_permits(1);
395                    return Err(e);
396                }
397            };
398
399            info!(
400                context = context_id.unwrap_or("shared"),
401                "Launched fresh browser (pool active={})",
402                self.active_count.load(Ordering::Relaxed)
403            );
404            return Ok(BrowserHandle::new(instance, Arc::clone(self), ctx_owned));
405        }
406
407        // Pool full — wait for a release
408        let ctx_for_poll = context_id.map(String::from);
409        timeout(acquire_timeout, async {
410            loop {
411                sleep(std::time::Duration::from_millis(50)).await;
412                let mut guard = self.inner.lock().await;
413                let queue = match ctx_for_poll.as_deref() {
414                    Some(id) => guard.scoped.get_mut(id),
415                    None => Some(&mut guard.shared),
416                };
417                if let Some(queue) = queue
418                    && let Some(entry) = queue.pop_front()
419                {
420                    drop(guard);
421                    if entry.instance.is_healthy_cached() {
422                        return Ok(BrowserHandle::new(
423                            entry.instance,
424                            Arc::clone(self),
425                            ctx_for_poll.clone(),
426                        ));
427                    }
428                    #[cfg(feature = "metrics")]
429                    crate::metrics::METRICS.record_crash();
430                    let active_count = self.active_count.clone();
431                    tokio::spawn(async move {
432                        let _ = entry.instance.shutdown().await;
433                        active_count.fetch_sub(1, Ordering::Relaxed);
434                    });
435                }
436            }
437        })
438        .await
439        .map_err(|_| BrowserError::PoolExhausted { active, max })?
440    }
441
442    // ─── Release ──────────────────────────────────────────────────────────────
443
444    /// Return a browser instance to the pool (called by [`BrowserHandle::release`]).
445    async fn release(&self, instance: BrowserInstance, context_id: Option<&str>) {
446        // Health-check before returning to idle queue
447        if instance.is_healthy_cached() {
448            let mut guard = self.inner.lock().await;
449            let total_idle: usize = guard.shared.len()
450                + guard
451                    .scoped
452                    .values()
453                    .map(std::collections::VecDeque::len)
454                    .sum::<usize>();
455            if total_idle < self.max_size {
456                let queue = match context_id {
457                    Some(id) => guard.scoped.entry(id.to_owned()).or_default(),
458                    None => &mut guard.shared,
459                };
460                queue.push_back(PoolEntry {
461                    instance,
462                    last_used: Instant::now(),
463                });
464                debug!(
465                    context = context_id.unwrap_or("shared"),
466                    "Returned browser to idle pool"
467                );
468                return;
469            }
470            drop(guard);
471        }
472
473        // Unhealthy or pool full — dispose
474        #[cfg(feature = "metrics")]
475        if !instance.is_healthy_cached() {
476            crate::metrics::METRICS.record_crash();
477        }
478        let active_count = self.active_count.clone();
479        tokio::spawn(async move {
480            let _ = instance.shutdown().await;
481            active_count.fetch_sub(1, Ordering::Relaxed);
482        });
483
484        self.semaphore.add_permits(1);
485    }
486
487    // ─── Context management ───────────────────────────────────────────────────
488
489    /// Shut down and remove all idle browsers belonging to `context_id`.
490    ///
491    /// Active handles for that context are unaffected — they will be disposed
492    /// normally when released. Call this when a bot or tenant is deprovisioned.
493    ///
494    /// Returns the number of browsers shut down.
495    ///
496    /// # Example
497    ///
498    /// ```no_run
499    /// use stygian_browser::{BrowserPool, BrowserConfig};
500    ///
501    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
502    /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
503    /// let released = pool.release_context("bot-a").await;
504    /// println!("Shut down {released} browsers for bot-a");
505    /// # Ok(())
506    /// # }
507    /// ```
508    pub async fn release_context(&self, context_id: &str) -> usize {
509        let mut guard = self.inner.lock().await;
510        let entries = guard.scoped.remove(context_id).unwrap_or_default();
511        drop(guard);
512
513        let count = entries.len();
514        for entry in entries {
515            let active_count = self.active_count.clone();
516            tokio::spawn(async move {
517                let _ = entry.instance.shutdown().await;
518                active_count.fetch_sub(1, Ordering::Relaxed);
519            });
520            self.semaphore.add_permits(1);
521        }
522
523        if count > 0 {
524            info!("Released {count} browsers for context '{context_id}'");
525        }
526        count
527    }
528
529    /// List all active context IDs that have idle browsers in the pool.
530    ///
531    /// # Example
532    ///
533    /// ```no_run
534    /// use stygian_browser::{BrowserPool, BrowserConfig};
535    ///
536    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
537    /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
538    /// let ids = pool.context_ids().await;
539    /// println!("Active contexts: {ids:?}");
540    /// # Ok(())
541    /// # }
542    /// ```
543    pub async fn context_ids(&self) -> Vec<String> {
544        let guard = self.inner.lock().await;
545        guard.scoped.keys().cloned().collect()
546    }
547
548    // ─── Stats ────────────────────────────────────────────────────────────────
549
550    /// Snapshot of current pool metrics.
551    ///
552    /// # Example
553    ///
554    /// ```no_run
555    /// use stygian_browser::{BrowserPool, BrowserConfig};
556    ///
557    /// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
558    /// let pool = BrowserPool::new(BrowserConfig::default()).await?;
559    /// let s = pool.stats();
560    /// println!("active={} idle={} max={}", s.active, s.idle, s.max);
561    /// # Ok(())
562    /// # }
563    /// ```
564    pub fn stats(&self) -> PoolStats {
565        PoolStats {
566            active: self.active_count.load(Ordering::Relaxed),
567            max: self.max_size,
568            available: self
569                .max_size
570                .saturating_sub(self.active_count.load(Ordering::Relaxed)),
571            idle: 0, // approximate — would need lock; kept lock-free for perf
572        }
573    }
574}
575
576// ─── BrowserHandle ────────────────────────────────────────────────────────────
577
578/// An acquired browser from the pool.
579///
580/// Call [`BrowserHandle::release`] after use to return the instance to the
581/// idle queue.  If dropped without releasing, the browser is shut down and the
582/// pool slot freed.
583pub struct BrowserHandle {
584    instance: Option<BrowserInstance>,
585    pool: Arc<BrowserPool>,
586    context_id: Option<String>,
587}
588
589impl BrowserHandle {
590    const fn new(
591        instance: BrowserInstance,
592        pool: Arc<BrowserPool>,
593        context_id: Option<String>,
594    ) -> Self {
595        Self {
596            instance: Some(instance),
597            pool,
598            context_id,
599        }
600    }
601
602    /// Borrow the underlying [`BrowserInstance`].
603    ///
604    /// Returns `None` if the handle has already been released via [`release`](Self::release).
605    pub const fn browser(&self) -> Option<&BrowserInstance> {
606        self.instance.as_ref()
607    }
608
609    /// Mutable borrow of the underlying [`BrowserInstance`].
610    ///
611    /// Returns `None` if the handle has already been released via [`release`](Self::release).
612    pub const fn browser_mut(&mut self) -> Option<&mut BrowserInstance> {
613        self.instance.as_mut()
614    }
615
616    /// The context that owns this handle, if scoped via [`BrowserPool::acquire_for`].
617    ///
618    /// Returns `None` for handles obtained with [`BrowserPool::acquire`].
619    pub fn context_id(&self) -> Option<&str> {
620        self.context_id.as_deref()
621    }
622
623    /// Return the browser to the pool.
624    ///
625    /// If the instance is unhealthy or the pool is full it will be disposed.
626    pub async fn release(mut self) {
627        if let Some(instance) = self.instance.take() {
628            self.pool
629                .release(instance, self.context_id.as_deref())
630                .await;
631        }
632    }
633}
634
635impl Drop for BrowserHandle {
636    fn drop(&mut self) {
637        if let Some(instance) = self.instance.take() {
638            let pool = Arc::clone(&self.pool);
639            let context_id = self.context_id.clone();
640            tokio::spawn(async move {
641                pool.release(instance, context_id.as_deref()).await;
642            });
643        }
644    }
645}
646
647// ─── PoolStats ────────────────────────────────────────────────────────────────
648
649/// Point-in-time metrics for a [`BrowserPool`].
650///
651/// # Example
652///
653/// ```no_run
654/// use stygian_browser::{BrowserPool, BrowserConfig};
655///
656/// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
657/// let pool = BrowserPool::new(BrowserConfig::default()).await?;
658/// let stats = pool.stats();
659/// assert!(stats.max > 0);
660/// # Ok(())
661/// # }
662/// ```
663#[derive(Debug, Clone)]
664pub struct PoolStats {
665    /// Total browser instances currently managed by the pool (idle + in-use).
666    pub active: usize,
667    /// Maximum allowed concurrent instances.
668    pub max: usize,
669    /// Free slots (max - active).
670    pub available: usize,
671    /// Currently idle (warm) instances ready for immediate acquisition.
672    pub idle: usize,
673}
674
675// ─── Tests ────────────────────────────────────────────────────────────────────
676
677#[cfg(test)]
678mod tests {
679    use super::*;
680    use crate::config::{PoolConfig, StealthLevel};
681    use std::time::Duration;
682
683    fn test_config() -> BrowserConfig {
684        BrowserConfig::builder()
685            .stealth_level(StealthLevel::None)
686            .pool(PoolConfig {
687                min_size: 0, // no warmup in unit tests
688                max_size: 5,
689                idle_timeout: Duration::from_secs(300),
690                acquire_timeout: Duration::from_millis(100),
691            })
692            .build()
693    }
694
695    #[test]
696    fn pool_stats_reflects_max() {
697        // This test is purely structural — pool construction needs a real browser
698        // so we only verify the config plumbing here.
699        let config = test_config();
700        assert_eq!(config.pool.max_size, 5);
701        assert_eq!(config.pool.min_size, 0);
702    }
703
704    #[test]
705    fn pool_stats_available_saturates() {
706        let stats = PoolStats {
707            active: 10,
708            max: 10,
709            available: 0,
710            idle: 0,
711        };
712        assert_eq!(stats.available, 0);
713        assert_eq!(stats.active, stats.max);
714    }
715
716    #[test]
717    fn pool_stats_partial_usage() {
718        let stats = PoolStats {
719            active: 3,
720            max: 10,
721            available: 7,
722            idle: 2,
723        };
724        assert_eq!(stats.available, 7);
725    }
726
727    #[tokio::test]
728    async fn pool_new_with_zero_min_size_ok() {
729        // With min_size=0 BrowserPool::new() should succeed without a real Chrome
730        // because no warmup launch is attempted.
731        // We skip this if no Chrome is present; this test is integration-only.
732        // Kept as a compile + config sanity check.
733        let config = test_config();
734        assert_eq!(config.pool.min_size, 0);
735    }
736
737    #[test]
738    fn pool_stats_available_is_max_minus_active() {
739        let stats = PoolStats {
740            active: 6,
741            max: 10,
742            available: 4,
743            idle: 3,
744        };
745        assert_eq!(stats.available, stats.max - stats.active);
746    }
747
748    #[test]
749    fn pool_stats_available_cannot_underflow() {
750        // active > max should not cause a panic — saturating_sub is used.
751        let stats = PoolStats {
752            active: 12,
753            max: 10,
754            available: 0_usize.saturating_sub(2),
755            idle: 0,
756        };
757        // available is computed with saturating_sub in BrowserPool::stats()
758        assert_eq!(stats.available, 0);
759    }
760
761    #[test]
762    fn pool_config_acquire_timeout_respected() {
763        let cfg = BrowserConfig::builder()
764            .pool(PoolConfig {
765                min_size: 0,
766                max_size: 1,
767                idle_timeout: Duration::from_secs(300),
768                acquire_timeout: Duration::from_millis(10),
769            })
770            .build();
771        assert_eq!(cfg.pool.acquire_timeout, Duration::from_millis(10));
772    }
773
774    #[test]
775    fn pool_config_idle_timeout_respected() {
776        let cfg = BrowserConfig::builder()
777            .pool(PoolConfig {
778                min_size: 1,
779                max_size: 5,
780                idle_timeout: Duration::from_secs(60),
781                acquire_timeout: Duration::from_secs(5),
782            })
783            .build();
784        assert_eq!(cfg.pool.idle_timeout, Duration::from_secs(60));
785    }
786
787    #[test]
788    fn browser_handle_drop_does_not_panic_without_runtime() {
789        // Verify BrowserHandle can be constructed/dropped without a real browser
790        // by ensuring the struct itself is Send + Sync (compile-time check).
791        fn assert_send<T: Send>() {}
792        fn assert_sync<T: Sync>() {}
793        assert_send::<BrowserPool>();
794        assert_send::<PoolStats>();
795        assert_sync::<BrowserPool>();
796    }
797
798    #[test]
799    fn pool_stats_zero_active_means_full_availability() {
800        let stats = PoolStats {
801            active: 0,
802            max: 8,
803            available: 8,
804            idle: 0,
805        };
806        assert_eq!(stats.available, stats.max);
807    }
808
809    #[test]
810    fn pool_entry_last_used_ordering() {
811        use std::time::Duration;
812        let now = std::time::Instant::now();
813        let older = now.checked_sub(Duration::from_secs(400)).unwrap_or(now);
814        let idle_timeout = Duration::from_secs(300);
815        // Simulate eviction check: entry older than idle_timeout should be evicted
816        assert!(now.duration_since(older) >= idle_timeout);
817    }
818
819    #[test]
820    fn pool_stats_debug_format() {
821        let stats = PoolStats {
822            active: 2,
823            max: 10,
824            available: 8,
825            idle: 1,
826        };
827        let dbg = format!("{stats:?}");
828        assert!(dbg.contains("active"));
829        assert!(dbg.contains("max"));
830    }
831
832    // ─── Context segregation tests ────────────────────────────────────────────
833
834    #[test]
835    fn pool_inner_scoped_default_is_empty() {
836        let inner = PoolInner {
837            shared: std::collections::VecDeque::new(),
838            scoped: std::collections::HashMap::new(),
839        };
840        assert!(inner.shared.is_empty());
841        assert!(inner.scoped.is_empty());
842    }
843
844    #[test]
845    fn pool_inner_scoped_insert_and_retrieve() {
846        let mut inner = PoolInner {
847            shared: std::collections::VecDeque::new(),
848            scoped: std::collections::HashMap::new(),
849        };
850        // Verify the scoped map key-space is independent
851        inner.scoped.entry("bot-a".to_owned()).or_default();
852        inner.scoped.entry("bot-b".to_owned()).or_default();
853        assert_eq!(inner.scoped.len(), 2);
854        assert!(inner.scoped.contains_key("bot-a"));
855        assert!(inner.scoped.contains_key("bot-b"));
856        assert!(inner.shared.is_empty());
857    }
858
859    #[test]
860    fn pool_inner_scoped_retain_removes_empty() {
861        let mut inner = PoolInner {
862            shared: std::collections::VecDeque::new(),
863            scoped: std::collections::HashMap::new(),
864        };
865        inner.scoped.entry("empty".to_owned()).or_default();
866        assert_eq!(inner.scoped.len(), 1);
867        inner.scoped.retain(|_, q| !q.is_empty());
868        assert!(inner.scoped.is_empty());
869    }
870
871    #[tokio::test]
872    async fn pool_context_ids_empty_by_default() {
873        // Without a running Chrome, we test with min_size=0 so no browser
874        // is launched. We need to construct the pool carefully.
875        let config = test_config();
876        assert_eq!(config.pool.min_size, 0);
877        // context_ids requires an actual pool instance — this test verifies
878        // the zero-state. Full integration tested with real browser.
879    }
880
881    #[test]
882    fn browser_handle_context_id_none_for_shared() {
883        // Compile-time / structural: BrowserHandle carries context_id
884        fn _check_context_api(handle: &BrowserHandle) {
885            let _: Option<&str> = handle.context_id();
886        }
887    }
888
889    #[test]
890    fn pool_inner_total_idle_calculation() {
891        fn total_idle(inner: &PoolInner) -> usize {
892            inner.shared.len()
893                + inner
894                    .scoped
895                    .values()
896                    .map(std::collections::VecDeque::len)
897                    .sum::<usize>()
898        }
899        let mut inner = PoolInner {
900            shared: std::collections::VecDeque::new(),
901            scoped: std::collections::HashMap::new(),
902        };
903        assert_eq!(total_idle(&inner), 0);
904
905        // Add entries to scoped queues (without real BrowserInstance, just check sizes)
906        inner.scoped.entry("a".to_owned()).or_default();
907        inner.scoped.entry("b".to_owned()).or_default();
908        assert_eq!(total_idle(&inner), 0); // empty queues don't count
909    }
910}