This commit is contained in:
DogmaDragon
2026-03-30 17:45:56 +03:00
11 changed files with 777 additions and 257 deletions

View File

@@ -1,7 +1,7 @@
name: Haven VLM Connector
# requires: PythonDepManager
description: Tag videos with Vision-Language Models using any OpenAI-compatible VLM endpoint
version: 1.0.3
version: 1.1.0
url: https://discourse.stashapp.cc/t/haven-vlm-connector/5464
exec:
- python

View File

@@ -13,6 +13,7 @@ import yaml
# VLM Engine Configuration
VLM_ENGINE_CONFIG = {
"active_ai_models": ["vlm_multiplexer_model"],
"trace_logging": True,
"pipelines": {
"video_pipeline_dynamic": {
"inputs": [

View File

@@ -18,6 +18,7 @@ from datetime import datetime
try:
from exit_tracker import install_exit_tracker
import stashapi.log as log
install_exit_tracker(log)
except ImportError as e:
print(f"Warning: exit_tracker not available: {e}")
@@ -28,23 +29,23 @@ except ImportError as e:
# Use PythonDepManager for dependency management
try:
from PythonDepManager import ensure_import
# Install and ensure all required dependencies with specific versions
ensure_import(
"stashapi:stashapp-tools==0.2.58",
"aiohttp==3.12.13",
"pydantic==2.12.5",
"vlm-engine==0.9.4",
"pyyaml==6.0.2"
"vlm-engine==1.0.0",
"pyyaml==6.0.2",
)
# Import the dependencies after ensuring they're available
import stashapi.log as log
from stashapi.stashapp import StashInterface
import aiohttp
import pydantic
import yaml
except ImportError as e:
print(f"Failed to import PythonDepManager or required dependencies: {e}")
print("Please ensure PythonDepManager is installed and available.")
@@ -59,7 +60,9 @@ try:
import haven_vlm_config as config
except ModuleNotFoundError:
log.error("Please provide a haven_vlm_config.py file with the required variables.")
raise Exception("Please provide a haven_vlm_config.py file with the required variables.")
raise Exception(
"Please provide a haven_vlm_config.py file with the required variables."
)
import haven_media_handler as media_handler
import haven_vlm_engine as vlm_engine
@@ -78,29 +81,36 @@ video_progress: Dict[str, float] = {}
# ----------------- Main Execution -----------------
async def main() -> None:
"""Main entry point for the plugin"""
global semaphore
# Semaphore initialization logging for hypothesis A
log.debug(f"[DEBUG_HYPOTHESIS_A] Initializing semaphore with limit {config.config.concurrent_task_limit}")
log.debug(
f"[DEBUG_HYPOTHESIS_A] Initializing semaphore with limit {config.config.concurrent_task_limit}"
)
semaphore = asyncio.Semaphore(config.config.concurrent_task_limit)
# Post-semaphore creation logging
log.debug(f"[DEBUG_HYPOTHESIS_A] Semaphore created successfully (limit: {config.config.concurrent_task_limit})")
log.debug(
f"[DEBUG_HYPOTHESIS_A] Semaphore created successfully (limit: {config.config.concurrent_task_limit})"
)
json_input = read_json_input()
output = {}
await run(json_input, output)
out = json.dumps(output)
print(out + "\n")
def read_json_input() -> Dict[str, Any]:
"""Read JSON input from stdin"""
json_input = sys.stdin.read()
return json.loads(json_input)
async def run(json_input: Dict[str, Any], output: Dict[str, Any]) -> None:
"""Main execution logic"""
plugin_args = None
@@ -113,7 +123,7 @@ async def run(json_input: Dict[str, Any], output: Dict[str, Any]) -> None:
raise
try:
plugin_args = json_input['args']["mode"]
plugin_args = json_input["args"]["mode"]
except KeyError:
pass
@@ -129,46 +139,54 @@ async def run(json_input: Dict[str, Any], output: Dict[str, Any]) -> None:
collect_incorrect_markers_and_images()
output["output"] = "ok"
return
output["output"] = "ok"
return
# ----------------- High Level Processing Functions -----------------
async def tag_videos() -> None:
"""Tag videos with VLM analysis using improved async orchestration"""
global completed_tasks, total_tasks
scenes = media_handler.get_tagme_scenes()
if not scenes:
log.info("No videos to tag. Have you tagged any scenes with the VLM_TagMe tag to get processed?")
log.info(
"No videos to tag. Have you tagged any scenes with the VLM_TagMe tag to get processed?"
)
return
total_tasks = len(scenes)
completed_tasks = 0
video_progress.clear()
for scene in scenes:
video_progress[scene.get('id', 'unknown')] = 0.0
video_progress[scene.get("id", "unknown")] = 0.0
log.progress(0.0)
log.info(f"🚀 Starting video processing for {total_tasks} scenes with semaphore limit of {config.config.concurrent_task_limit}")
log.info(
f"🚀 Starting video processing for {total_tasks} scenes with semaphore limit of {config.config.concurrent_task_limit}"
)
# Create tasks with proper indexing for debugging
tasks = []
for i, scene in enumerate(scenes):
# Pre-task creation logging for hypothesis A (semaphore deadlock) and E (signal termination)
scene_id = scene.get('id')
log.debug(f"[DEBUG_HYPOTHESIS_A] Creating task {i+1}/{total_tasks} for scene {scene_id}, semaphore limit: {config.config.concurrent_task_limit}")
scene_id = scene.get("id")
log.debug(
f"[DEBUG_HYPOTHESIS_A] Creating task {i + 1}/{total_tasks} for scene {scene_id}, semaphore limit: {config.config.concurrent_task_limit}"
)
task = asyncio.create_task(__tag_video_with_timing(scene, i))
tasks.append(task)
# Use asyncio.as_completed to process results as they finish (proves concurrency)
completed_task_futures = asyncio.as_completed(tasks)
batch_start_time = asyncio.get_event_loop().time()
for completed_task in completed_task_futures:
try:
await completed_task
@@ -178,36 +196,46 @@ async def tag_videos() -> None:
completed_tasks += 1
# Exception logging for hypothesis E (signal termination)
error_type = type(e).__name__
log.debug(f"[DEBUG_HYPOTHESIS_E] Task failed with exception: {error_type}: {str(e)} (Task {completed_tasks}/{total_tasks})")
log.debug(
f"[DEBUG_HYPOTHESIS_E] Task failed with exception: {error_type}: {str(e)} (Task {completed_tasks}/{total_tasks})"
)
log.error(f"❌ Task failed: {e}")
total_time = asyncio.get_event_loop().time() - batch_start_time
log.info(f"🎉 All {total_tasks} videos completed in {total_time:.2f}s (avg: {total_time/total_tasks:.2f}s/video)")
log.info(
f"🎉 All {total_tasks} videos completed in {total_time:.2f}s (avg: {total_time / total_tasks:.2f}s/video)"
)
log.progress(1.0)
async def find_marker_settings() -> None:
"""Find optimal marker settings based on a single tagged video"""
scenes = media_handler.get_tagme_scenes()
if len(scenes) != 1:
log.error("Please tag exactly one scene with the VLM_TagMe tag to get processed.")
log.error(
"Please tag exactly one scene with the VLM_TagMe tag to get processed."
)
return
scene = scenes[0]
await __find_marker_settings(scene)
def collect_incorrect_markers_and_images() -> None:
"""Collect data from incorrectly tagged markers and images"""
incorrect_images = media_handler.get_incorrect_images()
image_paths, image_ids, temp_files = media_handler.get_image_paths_and_ids(incorrect_images)
image_paths, image_ids, temp_files = media_handler.get_image_paths_and_ids(
incorrect_images
)
incorrect_markers = media_handler.get_incorrect_markers()
if not (len(incorrect_images) > 0 or len(incorrect_markers) > 0):
log.info("No incorrect images or markers to collect.")
return
current_time = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
try:
# Process images
image_folder = os.path.join(config.config.output_data_dir, "images")
@@ -235,98 +263,130 @@ def collect_incorrect_markers_and_images() -> None:
scene_folder = os.path.join(config.config.output_data_dir, "scenes")
os.makedirs(scene_folder, exist_ok=True)
tag_folders = {}
for marker in incorrect_markers:
scene_path = marker['scene']['files'][0]['path']
scene_path = marker["scene"]["files"][0]["path"]
if not scene_path:
log.error(f"Marker {marker['id']} has no scene path")
continue
try:
tag_name = marker['primary_tag']['name']
tag_name = marker["primary_tag"]["name"]
if tag_name not in tag_folders:
tag_folders[tag_name] = os.path.join(scene_folder, tag_name)
os.makedirs(tag_folders[tag_name], exist_ok=True)
media_handler.write_scene_marker_to_file(marker, scene_path, tag_folders[tag_name])
media_handler.write_scene_marker_to_file(
marker, scene_path, tag_folders[tag_name]
)
except Exception as e:
log.error(f"Failed to collect scene: {e}")
# Remove incorrect tags from images
image_ids = [image['id'] for image in incorrect_images]
image_ids = [image["id"] for image in incorrect_images]
media_handler.remove_incorrect_tag_from_images(image_ids)
# ----------------- Low Level Processing Functions -----------------
async def __tag_video_with_timing(scene: Dict[str, Any], scene_index: int) -> None:
"""Tag a single video scene with timing diagnostics"""
start_time = asyncio.get_event_loop().time()
scene_id = scene.get('id', 'unknown')
scene_id = scene.get("id", "unknown")
log.info(f"🎬 Starting video {scene_index + 1}: Scene {scene_id}")
try:
await __tag_video(scene)
end_time = asyncio.get_event_loop().time()
duration = end_time - start_time
log.info(f"✅ Completed video {scene_index + 1} (Scene {scene_id}) in {duration:.2f}s")
log.info(
f"✅ Completed video {scene_index + 1} (Scene {scene_id}) in {duration:.2f}s"
)
except Exception as e:
end_time = asyncio.get_event_loop().time()
duration = end_time - start_time
log.error(f"❌ Failed video {scene_index + 1} (Scene {scene_id}) after {duration:.2f}s: {e}")
log.error(
f"❌ Failed video {scene_index + 1} (Scene {scene_id}) after {duration:.2f}s: {e}"
)
raise
async def __tag_video(scene: Dict[str, Any]) -> None:
"""Tag a single video scene with semaphore timing instrumentation"""
scene_id = scene.get('id')
scene_id = scene.get("id")
# Pre-semaphore acquisition logging for hypothesis A (semaphore deadlock)
task_start_time = asyncio.get_event_loop().time()
acquisition_start_time = task_start_time
log.debug(f"[DEBUG_HYPOTHESIS_A] Task starting for scene {scene_id} at {task_start_time:.3f}s")
log.debug(
f"[DEBUG_HYPOTHESIS_A] Task starting for scene {scene_id} at {task_start_time:.3f}s"
)
async with semaphore:
try:
# Semaphore acquisition successful logging
acquisition_end_time = asyncio.get_event_loop().time()
acquisition_time = acquisition_end_time - acquisition_start_time
log.debug(f"[DEBUG_HYPOTHESIS_A] Semaphore acquired for scene {scene_id} after {acquisition_time:.3f}s")
log.debug(
f"[DEBUG_HYPOTHESIS_A] Semaphore acquired for scene {scene_id} after {acquisition_time:.3f}s"
)
if scene_id is None:
log.error("Scene missing 'id' field")
return
files = scene.get('files', [])
files = scene.get("files", [])
if not files:
log.error(f"Scene {scene_id} has no files")
return
scene_file = files[0].get('path')
scene_file = files[0].get("path")
if scene_file is None:
log.error(f"Scene {scene_id} file has no path")
return
# Check if scene is VR
is_vr = media_handler.is_vr_scene(scene.get('tags', []))
is_vr = media_handler.is_vr_scene(scene.get("tags", []))
def progress_cb(p: int) -> None:
global video_progress, total_tasks
video_progress[scene_id] = p / 100.0
total_prog = sum(video_progress.values()) / total_tasks
stats = vlm_engine.vlm_engine.get_performance_stats()
total_frames = stats.get("total_frames_processed", 0)
elapsed_seconds = stats.get("elapsed_time", 0.0)
log.info(f"[Throughput] total_frames: {total_frames}")
log.info(f"[Throughput] elapsed_seconds: {elapsed_seconds:.2f}")
if elapsed_seconds > 0:
fpm = (total_frames / elapsed_seconds) * 60.0
else:
fpm = 0.0
log.info(f"[Throughput] calculated_fpm: {fpm:.1f}")
log.info(
f"[Throughput] Frame ~{(p / 100) * 100:.0f}: {fpm:.1f} FPM | progress: {p}%"
)
log.progress(total_prog)
# Process video through VLM Engine with HTTP timing for hypothesis B
processing_start_time = asyncio.get_event_loop().time()
# HTTP request lifecycle tracking start
log.debug(f"[DEBUG_HYPOTHESIS_B] Starting VLM processing for scene {scene_id}: {scene_file}")
log.debug(
f"[DEBUG_HYPOTHESIS_B] Starting VLM processing for scene {scene_id}: {scene_file}"
)
video_result = await vlm_engine.process_video_async(
scene_file,
vr_video=is_vr,
frame_interval=config.config.video_frame_interval,
threshold=config.config.video_threshold,
return_confidence=config.config.video_confidence_return,
progress_callback=progress_cb
progress_callback=progress_cb,
)
# Extract detected tags
@@ -337,13 +397,15 @@ async def __tag_video(scene: Dict[str, Any]) -> None:
# Post-VLM processing logging
processing_end_time = asyncio.get_event_loop().time()
processing_duration = processing_end_time - processing_start_time
log.debug(f"[DEBUG_HYPOTHESIS_B] VLM processing completed for scene {scene_id} in {processing_duration:.2f}s ({len(detected_tags)} detected tags)")
log.debug(
f"[DEBUG_HYPOTHESIS_B] VLM processing completed for scene {scene_id} in {processing_duration:.2f}s ({len(detected_tags)} detected tags)"
)
if detected_tags:
# Clear all existing tags and markers before adding new ones
media_handler.clear_all_tags_from_video(scene)
media_handler.clear_all_markers_from_video(scene_id)
# Add tags to scene
tag_ids = media_handler.get_tag_ids(list(detected_tags), create=True)
media_handler.add_tags_to_video(scene_id, tag_ids)
@@ -351,81 +413,91 @@ async def __tag_video(scene: Dict[str, Any]) -> None:
# Add markers if enabled
if config.config.create_markers:
media_handler.add_markers_to_video_from_dict(scene_id, video_result.tag_timespans)
media_handler.add_markers_to_video_from_dict(
scene_id, video_result.tag_timespans
)
log.info(f"Added markers to scene {scene_id}")
# Remove VLM_TagMe tag from processed scene
media_handler.remove_tagme_tag_from_scene(scene_id)
# Task completion logging
task_end_time = asyncio.get_event_loop().time()
total_task_time = task_end_time - task_start_time
log.debug(f"[DEBUG_HYPOTHESIS_A] Task completed for scene {scene_id} in {total_task_time:.2f}s")
log.debug(
f"[DEBUG_HYPOTHESIS_A] Task completed for scene {scene_id} in {total_task_time:.2f}s"
)
except Exception as e:
# Exception handling with detailed logging for hypothesis E
exception_time = asyncio.get_event_loop().time()
error_type = type(e).__name__
log.debug(f"[DEBUG_HYPOTHESIS_E] Task exception for scene {scene_id}: {error_type}: {str(e)} at {exception_time:.3f}s")
scene_id = scene.get('id', 'unknown')
log.debug(
f"[DEBUG_HYPOTHESIS_E] Task exception for scene {scene_id}: {error_type}: {str(e)} at {exception_time:.3f}s"
)
scene_id = scene.get("id", "unknown")
log.error(f"Error processing video scene {scene_id}: {e}")
# Add error tag to failed scene if we have a valid ID
if scene_id != 'unknown':
if scene_id != "unknown":
media_handler.add_error_scene(scene_id)
async def __find_marker_settings(scene: Dict[str, Any]) -> None:
"""Find optimal marker settings for a scene"""
try:
scene_id = scene.get('id')
scene_id = scene.get("id")
if scene_id is None:
log.error("Scene missing 'id' field")
return
files = scene.get('files', [])
files = scene.get("files", [])
if not files:
log.error(f"Scene {scene_id} has no files")
return
scene_file = files[0].get('path')
scene_file = files[0].get("path")
if scene_file is None:
log.error(f"Scene {scene_id} file has no path")
return
# Get existing markers for the scene
existing_markers = media_handler.get_scene_markers(scene_id)
# Convert markers to desired timespan format
desired_timespan_data = {}
for marker in existing_markers:
tag_name = marker['primary_tag']['name']
tag_name = marker["primary_tag"]["name"]
desired_timespan_data[tag_name] = TimeFrame(
start=marker['seconds'],
end=marker.get('end_seconds', marker['seconds'] + 1),
total_confidence=1.0
start=marker["seconds"],
end=marker.get("end_seconds", marker["seconds"] + 1),
total_confidence=1.0,
)
# Find optimal settings
optimal_settings = await vlm_engine.find_optimal_marker_settings_async(
existing_json={}, # No existing JSON data
desired_timespan_data=desired_timespan_data
desired_timespan_data=desired_timespan_data,
)
# Output results
log.info(f"Optimal marker settings found for scene {scene_id}:")
log.info(json.dumps(optimal_settings, indent=2))
except Exception as e:
scene_id = scene.get('id', 'unknown')
scene_id = scene.get("id", "unknown")
log.error(f"Error finding marker settings for scene {scene_id}: {e}")
# ----------------- Cleanup -----------------
async def cleanup() -> None:
"""Cleanup resources"""
if vlm_engine.vlm_engine:
await vlm_engine.vlm_engine.shutdown()
# Run main function if script is executed directly
if __name__ == "__main__":
try:

View File

@@ -13,49 +13,57 @@ import json
# Use PythonDepManager for dependency management
from vlm_engine import VLMEngine
from vlm_engine.config_models import (
EngineConfig,
PipelineConfig,
ModelConfig,
PipelineModelConfig
EngineConfig,
PipelineConfig,
ModelConfig,
PipelineModelConfig,
)
import haven_vlm_config as config
# Configure logging
logging.basicConfig(level=logging.CRITICAL)
logging.basicConfig(
level=logging.WARNING, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
@dataclass
class TimeFrame:
"""Represents a time frame with start and end times"""
start: float
end: float
total_confidence: Optional[float] = None
def to_json(self) -> str:
"""Convert to JSON string"""
return json.dumps({
"start": self.start,
"end": self.end,
"total_confidence": self.total_confidence
})
return json.dumps(
{
"start": self.start,
"end": self.end,
"total_confidence": self.total_confidence,
}
)
def __str__(self) -> str:
return f"TimeFrame(start={self.start}, end={self.end}, confidence={self.total_confidence})"
@dataclass
class VideoTagInfo:
"""Represents video tagging information"""
video_duration: float
video_tags: Dict[str, Set[str]]
tag_totals: Dict[str, Dict[str, float]]
tag_timespans: Dict[str, Dict[str, List[TimeFrame]]]
@classmethod
def from_json(cls, json_data: Dict[str, Any]) -> 'VideoTagInfo':
def from_json(cls, json_data: Dict[str, Any]) -> "VideoTagInfo":
"""Create VideoTagInfo from JSON data"""
logger.debug(f"Creating VideoTagInfo from JSON: {json_data}")
# Convert tag_timespans to TimeFrame objects
tag_timespans = {}
for category, tags in json_data.get("tag_timespans", {}).items():
@@ -65,46 +73,65 @@ class VideoTagInfo:
TimeFrame(
start=tf["start"],
end=tf["end"],
total_confidence=tf.get("total_confidence")
) for tf in timeframes
total_confidence=tf.get("total_confidence"),
)
for tf in timeframes
]
return cls(
video_duration=json_data.get("video_duration", 0.0),
video_tags=json_data.get("video_tags", {}),
tag_totals=json_data.get("tag_totals", {}),
tag_timespans=tag_timespans
tag_timespans=tag_timespans,
)
def __str__(self) -> str:
return f"VideoTagInfo(duration={self.video_duration}, tags={len(self.video_tags)}, timespans={len(self.tag_timespans)})"
class HavenVLMEngine:
"""Main VLM Engine integration class"""
def __init__(self):
self.engine: Optional[VLMEngine] = None
self.engine_config: Optional[EngineConfig] = None
self._initialized = False
def _configure_logging(self) -> None:
"""Configure logging levels based on plugin config."""
vlm_config = config.config.vlm_engine_config
trace_enabled = vlm_config.get("trace_logging", False)
if trace_enabled:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logging.getLogger("logger").setLevel(logging.DEBUG)
logging.getLogger("multiplexer_llm").setLevel(logging.DEBUG)
logger.debug("Trace logging enabled for vlm-engine and multiplexer-llm")
else:
logger.setLevel(logging.WARNING)
async def initialize(self) -> None:
"""Initialize the VLM Engine with configuration"""
if self._initialized:
return
try:
self._configure_logging()
logger.info("Initializing Haven VLM Engine...")
# Convert config dict to EngineConfig objects
self.engine_config = self._create_engine_config()
# Create and initialize the engine
self.engine = VLMEngine(config=self.engine_config)
await self.engine.initialize()
self._initialized = True
logger.info("Haven VLM Engine initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize VLM Engine: {e}")
raise
@@ -112,24 +139,23 @@ class HavenVLMEngine:
def _create_engine_config(self) -> EngineConfig:
"""Create EngineConfig from the configuration"""
vlm_config = config.config.vlm_engine_config
# Create pipeline configs
pipelines = {}
for pipeline_name, pipeline_data in vlm_config["pipelines"].items():
models = [
PipelineModelConfig(
name=model["name"],
inputs=model["inputs"],
outputs=model["outputs"]
) for model in pipeline_data["models"]
name=model["name"], inputs=model["inputs"], outputs=model["outputs"]
)
for model in pipeline_data["models"]
]
pipelines[pipeline_name] = PipelineConfig(
inputs=pipeline_data["inputs"],
output=pipeline_data["output"],
short_name=pipeline_data["short_name"],
version=pipeline_data["version"],
models=models
models=models,
)
# Create model configs with new architectural changes
@@ -141,17 +167,21 @@ class HavenVLMEngine:
for endpoint in model_data.get("multiplexer_endpoints", []):
# Validate that max_concurrent is present
if "max_concurrent" not in endpoint:
raise ValueError(f"Endpoint '{endpoint.get('name', 'unnamed')}' is missing required 'max_concurrent' parameter")
multiplexer_endpoints.append({
"base_url": endpoint["base_url"],
"api_key": endpoint.get("api_key", ""),
"name": endpoint["name"],
"weight": endpoint.get("weight", 5),
"is_fallback": endpoint.get("is_fallback", False),
"max_concurrent": endpoint["max_concurrent"]
})
raise ValueError(
f"Endpoint '{endpoint.get('name', 'unnamed')}' is missing required 'max_concurrent' parameter"
)
multiplexer_endpoints.append(
{
"base_url": endpoint["base_url"],
"api_key": endpoint.get("api_key", ""),
"name": endpoint["name"],
"weight": endpoint.get("weight", 5),
"is_fallback": endpoint.get("is_fallback", False),
"max_concurrent": endpoint["max_concurrent"],
}
)
models[model_name] = ModelConfig(
type=model_data["type"],
model_file_name=model_data["model_file_name"],
@@ -160,23 +190,26 @@ class HavenVLMEngine:
model_identifier=model_data["model_identifier"],
model_version=model_data["model_version"],
use_multiplexer=model_data.get("use_multiplexer", False),
max_concurrent_requests=model_data.get("max_concurrent_requests", 10),
instance_count=model_data.get("instance_count",1),
max_batch_size=model_data.get("max_batch_size",1),
max_concurrent_requests=model_data.get(
"max_concurrent_requests", 10
),
instance_count=model_data.get("instance_count", 1),
max_batch_size=model_data.get("max_batch_size", 1),
multiplexer_endpoints=multiplexer_endpoints,
tag_list=model_data.get("tag_list", [])
tag_list=model_data.get("tag_list", []),
)
else:
models[model_name] = ModelConfig(
type=model_data["type"],
model_file_name=model_data["model_file_name"]
model_file_name=model_data["model_file_name"],
)
return EngineConfig(
active_ai_models=vlm_config["active_ai_models"],
pipelines=pipelines,
models=models,
category_config=vlm_config["category_config"]
category_config=vlm_config["category_config"],
loglevel="DEBUG" if vlm_config.get("trace_logging", False) else "WARNING",
)
async def process_video(
@@ -187,7 +220,7 @@ class HavenVLMEngine:
threshold: Optional[float] = None,
return_confidence: Optional[bool] = None,
existing_json: Optional[Dict[str, Any]] = None,
progress_callback: Optional[Callable[[int], None]] = None
progress_callback: Optional[Callable[[int], None]] = None,
) -> VideoTagInfo:
"""Process a video using the VLM Engine"""
if not self._initialized:
@@ -195,41 +228,53 @@ class HavenVLMEngine:
try:
logger.info(f"Processing video: {video_path}")
# Use config defaults if not provided
frame_interval = frame_interval or config.config.video_frame_interval
threshold = threshold or config.config.video_threshold
return_confidence = return_confidence if return_confidence is not None else config.config.video_confidence_return
return_confidence = (
return_confidence
if return_confidence is not None
else config.config.video_confidence_return
)
# Process video through the engine
results = await self.engine.process_video(
video_path,
frame_interval=frame_interval,
progress_callback=progress_callback
progress_callback=progress_callback,
)
logger.info(f"Video processing completed for: {video_path}")
logger.debug(f"Raw results structure: {type(results)}")
# Extract video_tag_info from the nested structure
if isinstance(results, dict) and 'video_tag_info' in results:
video_tag_data = results['video_tag_info']
logger.debug(f"Using video_tag_info from results: {video_tag_data.keys()}")
if isinstance(results, dict) and "video_tag_info" in results:
video_tag_data = results["video_tag_info"]
logger.debug(
f"Using video_tag_info from results: {video_tag_data.keys()}"
)
else:
# Fallback: assume results is already in the correct format
video_tag_data = results
logger.debug(f"Using results directly: {video_tag_data.keys() if isinstance(video_tag_data, dict) else type(video_tag_data)}")
logger.debug(
f"Using results directly: {video_tag_data.keys() if isinstance(video_tag_data, dict) else type(video_tag_data)}"
)
return VideoTagInfo.from_json(video_tag_data)
except Exception as e:
logger.error(f"Error processing video {video_path}: {e}")
raise
def get_performance_stats(self) -> Dict[str, Any]:
"""Get performance statistics from the VLM Engine."""
if not self._initialized or not self.engine:
return {}
return self.engine.get_performance_stats()
async def find_optimal_marker_settings(
self,
existing_json: Dict[str, Any],
desired_timespan_data: Dict[str, TimeFrame]
self, existing_json: Dict[str, Any], desired_timespan_data: Dict[str, TimeFrame]
) -> Dict[str, Any]:
"""Find optimal marker settings based on existing data"""
if not self._initialized:
@@ -237,25 +282,24 @@ class HavenVLMEngine:
try:
logger.info("Finding optimal marker settings...")
# Convert TimeFrame objects to dict format
desired_data = {}
for key, timeframe in desired_timespan_data.items():
desired_data[key] = {
"start": timeframe.start,
"end": timeframe.end,
"total_confidence": timeframe.total_confidence
"total_confidence": timeframe.total_confidence,
}
# Call the engine's optimization method
results = await self.engine.optimize_timeframe_settings(
existing_json_data=existing_json,
desired_timespan_data=desired_data
existing_json_data=existing_json, desired_timespan_data=desired_data
)
logger.info("Optimal marker settings found")
return results
except Exception as e:
logger.error(f"Error finding optimal marker settings: {e}")
raise
@@ -267,14 +311,16 @@ class HavenVLMEngine:
# VLMEngine doesn't have a shutdown method, just perform basic cleanup
logger.info("VLM Engine cleanup completed")
self._initialized = False
except Exception as e:
logger.error(f"Error during VLM Engine cleanup: {e}")
self._initialized = False
# Global VLM Engine instance
vlm_engine = HavenVLMEngine()
# Convenience functions for backward compatibility
async def process_video_async(
video_path: str,
@@ -283,17 +329,24 @@ async def process_video_async(
threshold: Optional[float] = None,
return_confidence: Optional[bool] = None,
existing_json: Optional[Dict[str, Any]] = None,
progress_callback: Optional[Callable[[int], None]] = None
progress_callback: Optional[Callable[[int], None]] = None,
) -> VideoTagInfo:
"""Process video asynchronously"""
return await vlm_engine.process_video(
video_path, vr_video, frame_interval, threshold, return_confidence, existing_json,
progress_callback=progress_callback
video_path,
vr_video,
frame_interval,
threshold,
return_confidence,
existing_json,
progress_callback=progress_callback,
)
async def find_optimal_marker_settings_async(
existing_json: Dict[str, Any],
desired_timespan_data: Dict[str, TimeFrame]
existing_json: Dict[str, Any], desired_timespan_data: Dict[str, TimeFrame]
) -> Dict[str, Any]:
"""Find optimal marker settings asynchronously"""
return await vlm_engine.find_optimal_marker_settings(existing_json, desired_timespan_data)
return await vlm_engine.find_optimal_marker_settings(
existing_json, desired_timespan_data
)

View File

@@ -1,6 +1,6 @@
name: AIOverhaul
description: AI Overhaul for Stash with a full plugin engine included to install and manage asynchronous stash plugins for AI or other purposes.
version: 0.9.2
version: 0.9.3
url: https://discourse.stashapp.cc/t/aioverhaul/4847
ui:
javascript:
@@ -30,6 +30,13 @@ ui:
- ws://127.0.0.1:4153
- https://127.0.0.1:4153
# Add additional urls here for the stash-ai-server if your browser is not on the same host
script-src:
- 'self'
- http://localhost:4153
- https://localhost:4153
- 'unsafe-inline'
- 'unsafe-eval'
# Allow plugin JavaScript files to be loaded from the backend server
interface: raw
exec:
- python

View File

@@ -1621,10 +1621,142 @@ const PluginSettings = () => {
React.createElement("button", { style: smallBtn, onClick: handleConfigure }, openConfig === p.name ? 'Close' : 'Configure'))));
}))));
}
// Component to handle dynamic loading of custom field renderer scripts
function CustomFieldLoader({ fieldType, pluginName, field, backendBase, savePluginSetting, loadPluginSettings, setError, renderDefaultInput }) {
var _a;
const React = ((_a = window.PluginApi) === null || _a === void 0 ? void 0 : _a.React) || window.React;
const [renderer, setRenderer] = React.useState(null);
const [loading, setLoading] = React.useState(true);
const [failed, setFailed] = React.useState(false);
React.useEffect(() => {
const pluginSpecificName = `${pluginName}_${fieldType}_Renderer`;
const genericName = `${fieldType}_Renderer`;
const legacyName = fieldType === 'tag_list_editor' ? 'SkierAITaggingTagListEditor' : null;
// Check if renderer is already available
const checkRenderer = () => {
const found = window[pluginSpecificName] ||
window[genericName] ||
(legacyName ? window[legacyName] : null);
if (found && typeof found === 'function') {
setRenderer(() => found);
setLoading(false);
return true;
}
return false;
};
if (checkRenderer())
return;
// Try to load the script from the backend server
// Normalize backendBase to ensure it doesn't end with a slash
const normalizedBackendBase = backendBase.replace(/\/+$/, '');
const possiblePaths = [
`${normalizedBackendBase}/plugins/${pluginName}/${fieldType}.js`,
`${normalizedBackendBase}/dist/plugins/${pluginName}/${fieldType}.js`,
];
// Also try camelCase version
const typeParts = fieldType.split('_');
if (typeParts.length > 1) {
const camelCase = typeParts[0] + typeParts.slice(1).map(p => p.charAt(0).toUpperCase() + p.slice(1)).join('');
possiblePaths.push(`${normalizedBackendBase}/plugins/${pluginName}/${camelCase}.js`);
possiblePaths.push(`${normalizedBackendBase}/dist/plugins/${pluginName}/${camelCase}.js`);
}
let attemptIndex = 0;
const tryLoad = () => {
if (attemptIndex >= possiblePaths.length) {
setLoading(false);
setFailed(true);
if (window.AIDebug) {
console.warn('[PluginSettings.CustomFieldLoader] Failed to load renderer for', fieldType, 'tried:', possiblePaths);
}
return;
}
const path = possiblePaths[attemptIndex];
// Use fetch + eval instead of script tag to work around CSP script-src-elem restrictions
// This uses script-src (which has unsafe-eval) instead of script-src-elem
fetch(path)
.then(response => {
if (!response.ok) {
throw new Error(`HTTP ${response.status}`);
}
return response.text();
})
.then(scriptText => {
console.log('[PluginSettings.CustomFieldLoader] Fetched script:', path);
try {
// Eval the script - this uses script-src (with unsafe-eval) instead of script-src-elem
// Create a new function context to avoid polluting global scope
const scriptFunction = new Function(scriptText);
scriptFunction();
// Wait a bit for the script to register, then check again
setTimeout(() => {
if (checkRenderer()) {
return;
}
// Script loaded but renderer not found, try next path
attemptIndex++;
tryLoad();
}, 200);
}
catch (evalError) {
console.error('[PluginSettings.CustomFieldLoader] Error evaluating script:', path, evalError);
attemptIndex++;
tryLoad();
}
})
.catch(error => {
console.warn('[PluginSettings.CustomFieldLoader] Failed to fetch script:', path, error);
attemptIndex++;
tryLoad();
});
};
tryLoad();
// Also poll for renderer in case it loads asynchronously (max 10 seconds)
let pollCount = 0;
const pollInterval = setInterval(() => {
pollCount++;
if (checkRenderer() || pollCount > 20) {
clearInterval(pollInterval);
if (pollCount > 20 && !renderer) {
setLoading(false);
setFailed(true);
}
}
}, 500);
return () => clearInterval(pollInterval);
}, [fieldType, pluginName]);
if (renderer) {
return React.createElement(renderer, {
field: field,
pluginName: pluginName,
backendBase: backendBase,
savePluginSetting: savePluginSetting,
loadPluginSettings: loadPluginSettings,
setError: setError
});
}
if (loading) {
return React.createElement('div', { style: { padding: 8, fontSize: 11, color: '#888', fontStyle: 'italic' } }, `Loading ${fieldType} editor...`);
}
// Failed to load - use default input if provided, otherwise show error message
if (failed && renderDefaultInput) {
return renderDefaultInput();
}
if (failed) {
return React.createElement('div', { style: { padding: 8, fontSize: 11, color: '#f85149' } }, `Failed to load ${fieldType} editor. Using default input.`);
}
return null;
}
function FieldRenderer({ f, pluginName }) {
const t = f.type || 'string';
const label = f.label || f.key;
const savedValue = f.value === undefined ? f.default : f.value;
// Define styles and computed values early so they're available to callbacks
const changed = savedValue !== undefined && savedValue !== null && f.default !== undefined && savedValue !== f.default;
const inputStyle = { padding: 6, background: '#111', color: '#eee', border: '1px solid #333', minWidth: 120 };
const wrap = { position: 'relative', padding: '4px 4px 6px', border: '1px solid #2a2a2a', borderRadius: 4, background: '#101010' };
const resetStyle = { position: 'absolute', top: 2, right: 4, fontSize: 9, padding: '1px 4px', cursor: 'pointer' };
const labelTitle = f && f.description ? String(f.description) : undefined;
const labelEl = React.createElement('span', { title: labelTitle }, React.createElement(React.Fragment, null, label, changed ? React.createElement('span', { style: { color: '#ffa657', fontSize: 10 } }, ' •') : null));
if (t === 'path_map') {
const containerStyle = {
position: 'relative',
@@ -1643,15 +1775,81 @@ const PluginSettings = () => {
changedMap && React.createElement("span", { style: { color: '#ffa657', fontSize: 10 } }, "\u2022")),
React.createElement(PathMapEditor, { value: savedValue, defaultValue: f.default, onChange: async (next) => { await savePluginSetting(pluginName, f.key, next); }, onReset: async () => { await savePluginSetting(pluginName, f.key, null); }, variant: "plugin" })));
}
const changed = savedValue !== undefined && savedValue !== null && f.default !== undefined && savedValue !== f.default;
const inputStyle = { padding: 6, background: '#111', color: '#eee', border: '1px solid #333', minWidth: 120 };
const wrap = { position: 'relative', padding: '4px 4px 6px', border: '1px solid #2a2a2a', borderRadius: 4, background: '#101010' };
const resetStyle = { position: 'absolute', top: 2, right: 4, fontSize: 9, padding: '1px 4px', cursor: 'pointer' };
const labelTitle = f && f.description ? String(f.description) : undefined;
const labelEl = React.createElement("span", { title: labelTitle },
label,
" ",
changed && React.createElement("span", { style: { color: '#ffa657', fontSize: 10 } }, "\u2022"));
// Check for custom field renderers registered by plugins
// Supports both plugin-specific (pluginName_type_Renderer) and generic (type_Renderer) naming
if (t && typeof t === 'string' && t !== 'string' && t !== 'boolean' && t !== 'number' && t !== 'select' && t !== 'path_map') {
const pluginSpecificName = `${pluginName}_${t}_Renderer`;
const genericName = `${t}_Renderer`;
const customRenderer = window[pluginSpecificName] || window[genericName];
const renderer = customRenderer;
// Debug logging
if (window.AIDebug) {
console.log('[PluginSettings.FieldRenderer] Custom field type detected:', {
type: t,
pluginName: pluginName,
pluginSpecificName: pluginSpecificName,
genericName: genericName,
hasPluginSpecific: !!window[pluginSpecificName],
hasGeneric: !!window[genericName],
renderer: renderer ? typeof renderer : 'null'
});
}
if (renderer && typeof renderer === 'function') {
if (window.AIDebug) {
console.log('[PluginSettings.FieldRenderer] Using custom renderer for', t);
}
return React.createElement(renderer, {
field: f,
pluginName: pluginName,
backendBase: backendBase,
savePluginSetting: savePluginSetting,
loadPluginSettings: loadPluginSettings,
setError: setError
});
}
else {
// Renderer not found - use CustomFieldLoader to dynamically load it
// CustomFieldLoader will handle fallback to default input if renderer not found
return React.createElement(CustomFieldLoader, {
fieldType: t,
pluginName: pluginName,
field: f,
backendBase: backendBase,
savePluginSetting: savePluginSetting,
loadPluginSettings: loadPluginSettings,
setError: setError,
// Pass the default input rendering logic as fallback
renderDefaultInput: () => {
// This will be called if renderer not found - render default text input
const display = savedValue === undefined || savedValue === null ? '' : String(savedValue);
const inputKey = `${pluginName}:${f.key}:${display}`;
const handleBlur = async (event) => {
var _a;
const next = (_a = event.target.value) !== null && _a !== void 0 ? _a : '';
if (next === display)
return;
await savePluginSetting(pluginName, f.key, next);
};
const handleKeyDown = (event) => {
if (event.key === 'Enter') {
event.preventDefault();
event.target.blur();
}
};
const handleReset = async () => {
await savePluginSetting(pluginName, f.key, null);
};
return React.createElement('div', { style: wrap }, React.createElement('label', { style: { fontSize: 12 } }, React.createElement(React.Fragment, null, labelEl, React.createElement('br'), React.createElement('input', {
key: inputKey,
style: inputStyle,
defaultValue: display,
onBlur: handleBlur,
onKeyDown: handleKeyDown
}))), changed ? React.createElement('button', { style: resetStyle, onClick: handleReset }, 'Reset') : null);
}
});
}
}
if (t === 'boolean') {
return (React.createElement("div", { style: wrap },
React.createElement("label", { style: { fontSize: 12, display: 'flex', alignItems: 'center', gap: 8 } },

View File

@@ -619,11 +619,13 @@
}, [onSceneClicked]);
// Render scene in queue list format (matching the Queue tab exactly)
const renderQueueScene = useCallback((scene, index) => {
var _a, _b, _c;
const title = scene.title || `Scene ${scene.id}`;
const studio = ((_a = scene.studio) === null || _a === void 0 ? void 0 : _a.name) || '';
const performers = ((_b = scene.performers) === null || _b === void 0 ? void 0 : _b.map(p => p.name).join(', ')) || '';
const screenshot = (_c = scene.paths) === null || _c === void 0 ? void 0 : _c.screenshot;
var _a, _b, _c, _d, _e;
const filepath = ((_b = (_a = scene.files) === null || _a === void 0 ? void 0 : _a[0]) === null || _b === void 0 ? void 0 : _b.path) || '';
const filename = filepath ? filepath.replace(/\\/g, '/').split('/').pop() || '' : '';
const title = scene.title || filename || `Scene ${scene.id}`;
const studio = ((_c = scene.studio) === null || _c === void 0 ? void 0 : _c.name) || '';
const performers = ((_d = scene.performers) === null || _d === void 0 ? void 0 : _d.map(p => p.name).join(', ')) || '';
const screenshot = (_e = scene.paths) === null || _e === void 0 ? void 0 : _e.screenshot;
const date = scene.date || scene.created_at || '';
return React.createElement('li', {
key: scene.id,
@@ -647,10 +649,11 @@
className: 'queue-scene-details'
}, [
React.createElement('span', { key: 'title', className: 'queue-scene-title' }, title),
filepath ? React.createElement('span', { key: 'filepath', className: 'queue-scene-filepath', title: filepath, style: { fontSize: '0.75em', color: '#888', overflow: 'hidden', textOverflow: 'ellipsis', whiteSpace: 'nowrap', maxWidth: '300px', display: 'block' } }, filepath) : null,
React.createElement('span', { key: 'studio', className: 'queue-scene-studio' }, studio),
React.createElement('span', { key: 'performers', className: 'queue-scene-performers' }, performers),
React.createElement('span', { key: 'date', className: 'queue-scene-date' }, date)
])
].filter(Boolean))
])));
}, [handleSceneClick]);
// Render recommender selector when recommenders are available

View File

@@ -12,15 +12,25 @@
/* HotOrNot */
.hon-performer-image,
.hon-performer-card,
.hon-scene-image,
.hon-selection-image,
.hon-image-image-container,
.hon-image-image,
/* Deck Viewer */
.swiper-zoom-container,
.gallery-cover-link,
/* O Stats */
.custom-stats-row .stats-element img,
#on-this-day-section [style*="position: relative; height: 400px"]
#on-this-day-section [style*="position: relative; height: 400px"],
/* Sprite Tab */
.sprite-cell
{
filter: blur(30px);
filter: blur(30px);
transition: filter 0.25s ease;
}
/* === LESS BLUR === */
@@ -28,7 +38,12 @@ filter: blur(30px);
/* StashBattle */
.pwr-scene-info,
/* Deck Viewer */
.gallery-cover-title,
.gallery-performers,
/* HotOrNot */
.hon-selection-name,
.hon-performer-info.hon-scene-info,
/* O Stats */
@@ -37,9 +52,16 @@ filter: blur(30px);
#on-this-day-section [style*="display: flex"][style*="cursor: pointer"] img + div,
#on-this-day-section > div:last-child
{
filter: blur(2px);
filter: blur(2px);
transition: filter 0.25s ease;
}
/* Deck Viewer */
.swiper-zoom-container:hover,
.gallery-cover-link:hover,
.gallery-cover-title:hover,
.gallery-performers:hover,
/* StashBattle */
.pwr-scene-image-container:hover,
.pwr-scene-image-container:hover .pwr-hover-preview,
@@ -48,10 +70,17 @@ filter: blur(2px);
/* HotOrNot */
.hon-performer-image:hover,
.hon-performer-card:hover,
.hon-scene-image:hover,
.hon-image-image-container:hover,
.hon-image-image:hover,
.hon-performer-info.hon-scene-info:hover,
.hon-selection-card:hover,
.hon-selection-name:hover,
.hon-selection-image:hover,
/* Sprite Tab */
.sprite-cell:hover,
/* O Stats */
.custom-stats-row .stats-element:hover,

View File

@@ -1,134 +1,175 @@
/* [Global changes] Blur NSFW images and unblur on mouse over */
/*Credit: fl0w#9497 */
/* === MORE BLUR === */
/* common */
.thumbnail-container img,
.detail-header-image,
.wall-item-gallery,
/* scene */
.scene-card-preview,
.vjs-poster,
video,
.scene-player-container,
.scene-cover,
.scene-card-preview,
.scrubber-item,
.scene-image,
.scene-card img,
.wall-item-media,
.wall-item.show-title,
/* image */
.image-card-preview,
.image-card img,
.image-thumbnail,
.Lightbox-carousel,
.image-image,
.gallery-image,
.react-photo-gallery--gallery img,
/* group */
.group-card-image,
.group-images,
/* gallery */
.gallery-image,
.gallery-card-image,
table > tbody > tr > td > a > img.w-100,
.gallery-card img,
.gallery-cover img,
.GalleryWallCard.GalleryWallCard-portrait,
.GalleryWallCard.GalleryWallCard-landscape,
/* performer */
.performer-card-image,
img.performer,
.performer-card img,
/* studio */
.studio-card-image,
.studio-card img,
/* tag */
.tag-card-image
.tag-card img
{
filter: blur(30px);
filter: blur(30px);
transition: filter 0.25s ease;
}
/* === LESS BLUR === */
/* common */
.card-section-title,
.detail-item-value.description,
.detail-item-value,
.TruncatedText,
/* scene */
.scene-studio-overlay,
.scene-header > h3,
h3.scene-header,
.studio-logo,
.image-thumbnail,
.TruncatedText.scene-card__description,
.queue-scene-details,
.marker-wall,
/* image */
h3.image-header,
/* performer */
.performer-name,
.card-section,
.name-data,
.aliases-data,
/* gallery */
.gallery-header.no-studio,
.TruncatedText.gallery-card__description,
/* studio */
.studio-name,
.studio-overlay a,
.studio-logo,
.studio-parent-studios,
/* group */
.group-details > div > h2,
/* gallery */
h3.gallery-header,
.TruncatedText.gallery-card__description,
/* studio */
.studio-details .logo,
.studio-details > div > h2,
.studio-card__details,
.studio-parent-studios,
/* image */
h3.image-header,
.Lightbox-carousel:hover,
.TruncatedText.image-card__description,
/* tag */
.logo-container > .logo,
.logo-container > h2,
.TruncatedText.tag-description,
.tag-parent-tags
.tag-item.tag-link.badge.badge-secondary,
.tag-name
{
filter: blur(2px);
}
/* === UNBLUR ON HOVER === */
/* common */
.thumbnail-section:hover *,
.card:hover .card-section-title,
.detail-item-value:hover,
.scene-cover:hover,
.card-section-title:hover,
.TruncatedText.tag-description:hover,
.detail-item-value.description:hover,
.TruncatedText:hover,
/* scene */
.card:hover .scene-studio-overlay,
.video-js:hover .vjs-poster,
video:hover,
.scene-player-container:hover,
.scene-card-preview:hover,
.queue-scene-details:hover,
.scene-card:hover img,
.TruncatedText.scene-card__description:hover,
.scene-player-container:hover,
.scene-header:hover > h3,
div:hover > .scene-header,
.studio-logo:hover,
.scene-cover:hover,
.image-thumbnail:hover,
.scene-card-preview:hover,
.scrubber-item:hover,
.scene-image:hover,
.TruncatedText.scene-card__description:hover,
.wall-item-media:hover,
.marker-wall:hover,
.wall-item.show-title:hover,
/* image */
.image-image:hover,
.detail-header-image:hover,
div:hover > .image-header,
.gallery-image:hover,
.image-card:hover img,
.react-photo-gallery--gallery img:hover,
.image-thumbnail:hover,
.TruncatedText.image-card__description:hover,
.wall-item:hover img,
.image-image:hover,
/* group */
.group-images:hover,
.group-details > div > h2:hover,
.group-card:hover img,
/* gallery */
div:hover > .gallery-header,
table > tbody > tr > td:hover > a > img.w-100,
.gallery-header.no-studio,
.gallery-card:hover img,
.gallery-cover:hover img,
.gallery-image:hover,
.gallery-card-image:hover,
.TruncatedText.gallery-card__description:hover,
.GalleryWallCard.GalleryWallCard-portrait:hover,
.GalleryWallCard.GalleryWallCard-landscape:hover,
/* performer */
img.performer:hover,
.performer-card-image:hover,
.performer-name:hover,
.card-section:hover,
.name-data:hover,
.aliases-data:hover,
.performer-card img:hover,
/* studio */
.studio-details .logo:hover,
.studio-details:hover > div > h2,
.studio-card__details:hover,
.studio-name:hover,
.studio-overlay:hover a,
.studio-card:hover img,
.studio-parent-studios:hover,
.studio-logo:hover,
/* tag */
.logo-container > .logo:hover,
.logo-container:hover > h2,
.TruncatedText.tag-description:hover,
.tag-parent-tags:hover
.tag-card:hover img,
.tag-item.tag-link.badge.badge-secondary:hover,
.tag-name:hover
{
filter: blur(0px);
filter: blur(0);
transition: filter 0.25s ease;
}
/*Credit: fl0w#9497 */

View File

@@ -1,16 +1,48 @@
function sfw_mode() {
let sfw_mediaObserver = null;
let sfw_playListener = null;
let sfw_extraListeners = null;
async function getSfwConfig() {
try {
const response = await fetch('/graphql', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
query: `{
configuration {
plugins
}
}`
}),
});
const result = await response.json();
const pluginSettings = result.data.configuration.plugins.sfwswitch;
return pluginSettings?.audio_setting === true;
} catch (e) {
console.error("SFW Switch: Could not fetch config", e);
return false;
}
}
async function sfw_mode() {
const stash_css = sfwswitch_findstashcss();
const button = document.getElementById("plugin_sfw");
if (!stash_css) return;
const sfwState = localStorage.getItem("sfw_mode") === "true";
const audioMuteEnabled = await getSfwConfig();
// Apply saved state to the stylesheet
stash_css.disabled = !sfwState;
// Update button color
button.style.color = sfwState ? "#5cff00" : "#f5f8fa";
if (sfwState && audioMuteEnabled) {
sfw_mute_all_media();
} else {
sfw_unmute_all_media();
}
if (button) {
button.style.color = sfwState ? "#5cff00" : "#f5f8fa";
}
}
function sfwswitch_createbutton() {
@@ -45,22 +77,101 @@ function sfwswitch_createbutton() {
setTimeout(() => clearInterval(intervalId), 10000);
}
function sfwswitch_switcher() {
const stash_css = sfwswitch_findstashcss();
if (!stash_css) {
console.error("SFW stylesheet not found.");
return;
// Function to strictly handle the muted state
function sfw_forceMute(media) {
if (!media) return;
media.muted = true;
}
function sfw_mute_all_media() {
// Initial sweep
document.querySelectorAll("audio, video").forEach(sfw_forceMute);
// Global event listener for play, seek, and volume changes
if (!sfw_playListener) {
sfw_playListener = function(e) {
if (e.target.tagName === "VIDEO" || e.target.tagName === "AUDIO") {
sfw_forceMute(e.target);
}
};
document.addEventListener("play", sfw_playListener, true);
document.addEventListener("volumechange", sfw_playListener, true);
document.addEventListener("loadeddata", sfw_playListener, true);
document.addEventListener("seeking", sfw_playListener, true);
}
// Toggle stylesheet
stash_css.disabled = !stash_css.disabled;
// MutationObserver for content loaded via AJAX/Dynamic updates
if (!sfw_mediaObserver) {
sfw_mediaObserver = new MutationObserver(mutations => {
for (const mutation of mutations) {
mutation.addedNodes.forEach(node => {
if (node.tagName === "VIDEO" || node.tagName === "AUDIO") {
sfw_forceMute(node);
} else if (node.querySelectorAll) {
node.querySelectorAll("video, audio").forEach(sfw_forceMute);
}
});
}
});
sfw_mediaObserver.observe(document.body, { childList: true, subtree: true });
}
}
// Save new state to localStorage
localStorage.setItem("sfw_mode", !stash_css.disabled);
function sfw_unmute_all_media() {
// 1. Remove listeners FIRST to prevent them from firing during the unmute loop
if (sfw_playListener) {
document.removeEventListener("play", sfw_playListener, true);
document.removeEventListener("volumechange", sfw_playListener, true);
document.removeEventListener("loadeddata", sfw_playListener, true);
document.removeEventListener("seeking", sfw_playListener, true);
sfw_playListener = null;
}
if (sfw_mediaObserver) {
sfw_mediaObserver.disconnect();
sfw_mediaObserver = null;
}
// 2. Unmute existing media
document.querySelectorAll("audio, video").forEach(media => {
media.muted = false;
// Optional: media.volume = 1.0; // Use if volume was also forced to 0
});
}
async function sfwswitch_switcher() {
const stash_css = sfwswitch_findstashcss();
if (!stash_css) return;
// Toggle the CSS
stash_css.disabled = !stash_css.disabled;
const enabled = !stash_css.disabled;
localStorage.setItem("sfw_mode", enabled);
const audioMuteEnabled = await getSfwConfig();
// Logic Check: If we just disabled SFW, we MUST run unmute immediately
if (enabled && audioMuteEnabled) {
sfw_mute_all_media();
} else {
// This clears observers and sets muted = false
sfw_unmute_all_media();
// CRITICAL: Force a pause/reset on any media that might be stuck in a background buffer
document.querySelectorAll("audio, video").forEach(media => {
if (media.paused && media.muted) {
// If it was supposed to be stopped, make sure it stays stopped
media.muted = false;
}
});
}
const button = document.getElementById("plugin_sfw");
button.style.color = stash_css.disabled ? "#f5f8fa" : "#5cff00";
console.log(`SFW mode ${stash_css.disabled ? "disabled" : "enabled"}`);
if (button) {
button.style.color = enabled ? "#5cff00" : "#f5f8fa";
}
}
function sfwswitch_findstashcss() {

View File

@@ -1,10 +1,15 @@
name: SFW Switch
description: Add a button to blur covers and images.
version: 1.4
version: 1.7
url: https://discourse.stashapp.cc/t/sfw-switch/4658
ui:
javascript:
- sfw.js
css:
- sfw.css
- additional_plugins.css
- additional_plugins.css
settings:
audio_setting:
displayName: Enable Sound Mute
description: By default the plugin does not mute sound. Enabling this feature will have sound sources included when the SFW button is enabled.
type: BOOLEAN