fix: Clean up whitespace and improve code formatting across service adapters
Some checks failed
Integration Tests / integration-tests (push) Failing after 20s
Integration Tests / performance-tests (push) Has been skipped
Service Adapters (Python FastAPI) / test (3.11) (push) Failing after 24s
Service Adapters (Python FastAPI) / test (3.12) (push) Failing after 25s
Service Adapters (Python FastAPI) / test (3.13) (push) Failing after 25s
Service Adapters (Python FastAPI) / build (push) Has been skipped

### Summary of Changes
- Removed unnecessary whitespace and standardized formatting in multiple files, including `main.py`, `logging_middleware.py`, `general.py`, and various health checker implementations.
- Enhanced readability and maintainability of the codebase by ensuring consistent formatting practices.

### Expected Results
- Improved code clarity, making it easier for developers to read and understand the service adapters' code.
- Streamlined the codebase, facilitating future updates and maintenance.
This commit is contained in:
GSRN
2025-09-18 13:02:46 +02:00
parent 4450311e47
commit 7eaea39928
13 changed files with 217 additions and 276 deletions

View File

@@ -12,6 +12,7 @@ from services.status_checker import status_checker
# Set up unified logging for both application and request logs # Set up unified logging for both application and request logs
setup_logging(level="INFO", enable_request_logging=True) setup_logging(level="INFO", enable_request_logging=True)
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
"""Manage application lifespan events.""" """Manage application lifespan events."""

View File

@@ -5,7 +5,6 @@ This module provides custom logging middleware for FastAPI requests
to ensure consistent logging format with application logs. to ensure consistent logging format with application logs.
""" """
import logging
import time import time
from typing import Callable from typing import Callable
@@ -41,10 +40,7 @@ class LoggingMiddleware(BaseHTTPMiddleware):
user_agent = request.headers.get("user-agent", "unknown") user_agent = request.headers.get("user-agent", "unknown")
# Log request start # Log request start
logger.info( logger.info(f"Request started: {method} {url_path} from {client_ip} " f"(User-Agent: {user_agent})")
f"Request started: {method} {url_path} from {client_ip} "
f"(User-Agent: {user_agent})"
)
try: try:
# Process the request # Process the request
@@ -54,10 +50,7 @@ class LoggingMiddleware(BaseHTTPMiddleware):
process_time = time.time() - start_time process_time = time.time() - start_time
# Log successful response # Log successful response
logger.info( logger.info(f"Request completed: {method} {url_path} -> " f"{response.status_code} in {process_time:.3f}s")
f"Request completed: {method} {url_path} -> "
f"{response.status_code} in {process_time:.3f}s"
)
return response return response
@@ -66,10 +59,7 @@ class LoggingMiddleware(BaseHTTPMiddleware):
process_time = time.time() - start_time process_time = time.time() - start_time
# Log error # Log error
logger.error( logger.error(f"Request failed: {method} {url_path} -> " f"Exception: {str(e)} in {process_time:.3f}s")
f"Request failed: {method} {url_path} -> "
f"Exception: {str(e)} in {process_time:.3f}s"
)
# Re-raise the exception # Re-raise the exception
raise raise

View File

@@ -54,11 +54,13 @@ async def debug_logging():
# Test request logger # Test request logger
from services.logging_config import get_request_logger from services.logging_config import get_request_logger
request_logger = get_request_logger() request_logger = get_request_logger()
request_logger.info("This is a request logger message") request_logger.info("This is a request logger message")
# Test application logger # Test application logger
from services.logging_config import get_application_logger from services.logging_config import get_application_logger
app_logger = get_application_logger() app_logger = get_application_logger()
app_logger.info("This is an application logger message") app_logger.info("This is an application logger message")
@@ -76,11 +78,7 @@ async def debug_logging():
} }
logger.info("Unified logging debug info requested") logger.info("Unified logging debug info requested")
return { return {"message": "Unified log messages sent to console", "config": config_info, "note": "All logs now use the same format and handler"}
"message": "Unified log messages sent to console",
"config": config_info,
"note": "All logs now use the same format and handler"
}
@router.get( @router.get(
@@ -108,12 +106,7 @@ async def debug_sensor(service_name: str):
# Get raw sensor data # Get raw sensor data
result = await checker.check_health(service_name, config) result = await checker.check_health(service_name, config)
return { return {"service_name": service_name, "config": config, "result": result.to_dict(), "raw_sensor_data": result.metadata}
"service_name": service_name,
"config": config,
"result": result.to_dict(),
"raw_sensor_data": result.metadata
}
except Exception as e: except Exception as e:
logger.error(f"Error debugging sensor for {service_name}: {e}") logger.error(f"Error debugging sensor for {service_name}: {e}")
return {"error": str(e)} return {"error": str(e)}
@@ -143,7 +136,7 @@ async def get_services():
response_time=status_info.get("response_time"), response_time=status_info.get("response_time"),
error=status_info.get("error"), error=status_info.get("error"),
uptime=status_info.get("uptime"), uptime=status_info.get("uptime"),
metadata=status_info.get("metadata", {}) metadata=status_info.get("metadata", {}),
) )
logger.info(f"Service status check completed - returning status for {len(service_status)} services") logger.info(f"Service status check completed - returning status for {len(service_status)} services")

View File

@@ -1,4 +1,3 @@
from operator import truediv
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
@@ -10,7 +9,12 @@ load_dotenv()
SERVICES = { SERVICES = {
"home_assistant": { "home_assistant": {
"url": os.getenv("HOME_ASSISTANT_URL", "http://192.168.2.158:8123"), "url": os.getenv("HOME_ASSISTANT_URL", "http://192.168.2.158:8123"),
"token": os.getenv("HOME_ASSISTANT_TOKEN", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiI3MjdiY2QwMjNkNmM0NzgzYmRiMzg2ZDYxYzQ3N2NmYyIsImlhdCI6MTc1ODE4MDg2MiwiZXhwIjoyMDczNTQwODYyfQ.rN_dBtYmXIo4J1DffgWb6G0KLsgaQ6_kH-kiWJeQQQM"), "token": os.getenv(
"HOME_ASSISTANT_TOKEN",
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJpc3MiOiI3MjdiY2QwMjNkNmM0NzgzYmRiMzg2ZDYxYzQ3N2NmYyIsImlhdCI6MTc1ODE4MDg2MiwiZXhwIjoyMDczNTQwODYyfQ."
"rN_dBtYmXIo4J1DffgWb6G0KLsgaQ6_kH-kiWJeQQQM",
),
"enabled": True, "enabled": True,
"health_check_type": "sensor", # Use sensor-based health checking "health_check_type": "sensor", # Use sensor-based health checking
"sensor_entity": "sensor.uptime_34", # Check uptime sensor "sensor_entity": "sensor.uptime_34", # Check uptime sensor

View File

@@ -11,20 +11,17 @@ from typing import Any, Dict, Optional
import httpx import httpx
from httpx import HTTPError, TimeoutException from httpx import HTTPError, TimeoutException
from .base import BaseHealthChecker, HealthCheckResult
from utils.time_formatter import format_uptime_for_frontend from utils.time_formatter import format_uptime_for_frontend
from .base import BaseHealthChecker, HealthCheckResult
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class APIHealthChecker(BaseHealthChecker): class APIHealthChecker(BaseHealthChecker):
"""Health checker for services with API health endpoints.""" """Health checker for services with API health endpoints."""
async def check_health( async def check_health(self, service_name: str, config: Dict[str, Any]) -> HealthCheckResult:
self,
service_name: str,
config: Dict[str, Any]
) -> HealthCheckResult:
""" """
Check health via API endpoint. Check health via API endpoint.
@@ -70,11 +67,7 @@ class APIHealthChecker(BaseHealthChecker):
uptime_info = self._extract_uptime_from_response(response, service_name) uptime_info = self._extract_uptime_from_response(response, service_name)
formatted_uptime = format_uptime_for_frontend(uptime_info) formatted_uptime = format_uptime_for_frontend(uptime_info)
metadata = { metadata = {"http_status": response.status_code, "response_size": len(response.content), "health_status": health_status}
"http_status": response.status_code,
"response_size": len(response.content),
"health_status": health_status
}
return HealthCheckResult("healthy", response_time, metadata=metadata, uptime=formatted_uptime) return HealthCheckResult("healthy", response_time, metadata=metadata, uptime=formatted_uptime)
elif response.status_code == 401: elif response.status_code == 401:
logger.warning(f"Service {service_name} returned 401 - authentication required") logger.warning(f"Service {service_name} returned 401 - authentication required")

View File

@@ -7,7 +7,7 @@ health checking strategies.
import logging import logging
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Any, Dict, Optional, Tuple from typing import Any, Dict, Optional
import httpx import httpx
@@ -23,7 +23,7 @@ class HealthCheckResult:
response_time: Optional[float] = None, response_time: Optional[float] = None,
error: Optional[str] = None, error: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None, metadata: Optional[Dict[str, Any]] = None,
uptime: Optional[str] = None uptime: Optional[str] = None,
): ):
self.status = status self.status = status
self.response_time = response_time self.response_time = response_time
@@ -33,13 +33,7 @@ class HealthCheckResult:
def to_dict(self) -> Dict[str, Any]: def to_dict(self) -> Dict[str, Any]:
"""Convert result to dictionary.""" """Convert result to dictionary."""
return { return {"status": self.status, "response_time": self.response_time, "error": self.error, "uptime": self.uptime, "metadata": self.metadata}
"status": self.status,
"response_time": self.response_time,
"error": self.error,
"uptime": self.uptime,
"metadata": self.metadata
}
class BaseHealthChecker(ABC): class BaseHealthChecker(ABC):
@@ -57,11 +51,7 @@ class BaseHealthChecker(ABC):
logger.debug(f"Initialized {self.__class__.__name__} with timeout: {timeout}s") logger.debug(f"Initialized {self.__class__.__name__} with timeout: {timeout}s")
@abstractmethod @abstractmethod
async def check_health( async def check_health(self, service_name: str, config: Dict[str, Any]) -> HealthCheckResult:
self,
service_name: str,
config: Dict[str, Any]
) -> HealthCheckResult:
""" """
Check the health of a service. Check the health of a service.

View File

@@ -9,9 +9,6 @@ import logging
import time import time
from typing import Any, Dict, List from typing import Any, Dict, List
import httpx
from httpx import HTTPError, TimeoutException
from .base import BaseHealthChecker, HealthCheckResult from .base import BaseHealthChecker, HealthCheckResult
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -20,11 +17,7 @@ logger = logging.getLogger(__name__)
class CustomHealthChecker(BaseHealthChecker): class CustomHealthChecker(BaseHealthChecker):
"""Health checker for services requiring custom health check logic.""" """Health checker for services requiring custom health check logic."""
async def check_health( async def check_health(self, service_name: str, config: Dict[str, Any]) -> HealthCheckResult:
self,
service_name: str,
config: Dict[str, Any]
) -> HealthCheckResult:
""" """
Check health using custom logic. Check health using custom logic.
@@ -63,16 +56,12 @@ class CustomHealthChecker(BaseHealthChecker):
metadata = { metadata = {
"total_checks": len(health_checks), "total_checks": len(health_checks),
"check_results": [result.to_dict() for result in results], "check_results": [result.to_dict() for result in results],
"overall_response_time": overall_response_time "overall_response_time": overall_response_time,
} }
return HealthCheckResult(overall_status, overall_response_time, metadata=metadata) return HealthCheckResult(overall_status, overall_response_time, metadata=metadata)
async def _run_single_check( async def _run_single_check(self, service_name: str, check_config: Dict[str, Any]) -> HealthCheckResult:
self,
service_name: str,
check_config: Dict[str, Any]
) -> HealthCheckResult:
""" """
Run a single health check. Run a single health check.
@@ -155,7 +144,7 @@ class CustomHealthChecker(BaseHealthChecker):
return "error" return "error"
# Count statuses # Count statuses
status_counts = {} status_counts: Dict[str, int] = {}
for result in results: for result in results:
status = result.status status = result.status
status_counts[status] = status_counts.get(status, 0) + 1 status_counts[status] = status_counts.get(status, 0) + 1

View File

@@ -5,7 +5,7 @@ This module provides a registry and factory for different health checker types.
""" """
import logging import logging
from typing import Any, Dict, Type from typing import Any, Dict, Optional, Type
from .api_checker import APIHealthChecker from .api_checker import APIHealthChecker
from .base import BaseHealthChecker from .base import BaseHealthChecker
@@ -70,7 +70,7 @@ class HealthCheckerRegistry:
class HealthCheckerFactory: class HealthCheckerFactory:
"""Factory for creating health checker instances.""" """Factory for creating health checker instances."""
def __init__(self, registry: HealthCheckerRegistry = None): def __init__(self, registry: Optional[HealthCheckerRegistry] = None):
""" """
Initialize the factory. Initialize the factory.
@@ -80,11 +80,7 @@ class HealthCheckerFactory:
self.registry = registry or HealthCheckerRegistry() self.registry = registry or HealthCheckerRegistry()
logger.debug("Initialized health checker factory") logger.debug("Initialized health checker factory")
def create_checker( def create_checker(self, checker_type: str, timeout: float = 5.0) -> BaseHealthChecker:
self,
checker_type: str,
timeout: float = 5.0
) -> BaseHealthChecker:
""" """
Create a health checker instance. Create a health checker instance.
@@ -100,12 +96,7 @@ class HealthCheckerFactory:
logger.debug(f"Created {checker_type} health checker with timeout {timeout}s") logger.debug(f"Created {checker_type} health checker with timeout {timeout}s")
return checker return checker
def create_checker_for_service( def create_checker_for_service(self, service_name: str, config: Dict[str, Any], timeout: float = 5.0) -> BaseHealthChecker:
self,
service_name: str,
config: Dict[str, Any],
timeout: float = 5.0
) -> BaseHealthChecker:
""" """
Create a health checker for a specific service based on its configuration. Create a health checker for a specific service based on its configuration.

View File

@@ -9,23 +9,19 @@ import logging
import time import time
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
import httpx
from httpx import HTTPError, TimeoutException from httpx import HTTPError, TimeoutException
from .base import BaseHealthChecker, HealthCheckResult
from utils.time_formatter import format_uptime_for_frontend from utils.time_formatter import format_uptime_for_frontend
from .base import BaseHealthChecker, HealthCheckResult
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class SensorHealthChecker(BaseHealthChecker): class SensorHealthChecker(BaseHealthChecker):
"""Health checker for services with sensor-based health information.""" """Health checker for services with sensor-based health information."""
async def check_health( async def check_health(self, service_name: str, config: Dict[str, Any]) -> HealthCheckResult:
self,
service_name: str,
config: Dict[str, Any]
) -> HealthCheckResult:
""" """
Check health via sensor data. Check health via sensor data.
@@ -86,7 +82,7 @@ class SensorHealthChecker(BaseHealthChecker):
"sensor_state": sensor_data.get("state"), "sensor_state": sensor_data.get("state"),
"sensor_attributes": sensor_data.get("attributes", {}), "sensor_attributes": sensor_data.get("attributes", {}),
"last_updated": sensor_data.get("last_updated"), "last_updated": sensor_data.get("last_updated"),
"entity_id": sensor_data.get("entity_id") "entity_id": sensor_data.get("entity_id"),
} }
return HealthCheckResult(health_status, response_time, metadata=metadata, uptime=formatted_uptime) return HealthCheckResult(health_status, response_time, metadata=metadata, uptime=formatted_uptime)
@@ -138,10 +134,12 @@ class SensorHealthChecker(BaseHealthChecker):
# Timestamp sensor - if it has a valid timestamp, service is healthy # Timestamp sensor - if it has a valid timestamp, service is healthy
try: try:
from datetime import datetime from datetime import datetime
# Try to parse the timestamp # Try to parse the timestamp
parsed_time = datetime.fromisoformat(state.replace('Z', '+00:00')) parsed_time = datetime.fromisoformat(state.replace("Z", "+00:00"))
# If we can parse it and it's recent (within last 24 hours), it's healthy # If we can parse it and it's recent (within last 24 hours), it's healthy
from datetime import datetime, timezone from datetime import datetime, timezone
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
time_diff = now - parsed_time time_diff = now - parsed_time
is_healthy = time_diff.total_seconds() < 86400 # 24 hours is_healthy = time_diff.total_seconds() < 86400 # 24 hours

View File

@@ -14,10 +14,7 @@ DEFAULT_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(filename)s:%(lineno
def setup_logging( def setup_logging(
level: str = "INFO", level: str = "INFO", format_string: Optional[str] = None, include_timestamp: bool = True, enable_request_logging: bool = True
format_string: Optional[str] = None,
include_timestamp: bool = True,
enable_request_logging: bool = True
) -> None: ) -> None:
""" """
Set up unified logging configuration for the application and requests. Set up unified logging configuration for the application and requests.

View File

@@ -11,6 +11,7 @@ from typing import Dict
from services.config import SERVICES from services.config import SERVICES
from services.health_checkers import factory from services.health_checkers import factory
from services.health_checkers.base import BaseHealthChecker
# Configure logger # Configure logger
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -27,7 +28,7 @@ class ServiceStatusChecker:
timeout: Request timeout in seconds timeout: Request timeout in seconds
""" """
self.timeout = timeout self.timeout = timeout
self.checkers = {} # Cache for checker instances self.checkers: Dict[str, BaseHealthChecker] = {} # Cache for checker instances
logger.info(f"ServiceStatusChecker initialized with timeout: {timeout}s") logger.info(f"ServiceStatusChecker initialized with timeout: {timeout}s")
async def check_service_health(self, service_name: str, config: Dict) -> Dict: async def check_service_health(self, service_name: str, config: Dict) -> Dict:
@@ -45,12 +46,7 @@ class ServiceStatusChecker:
if not config.get("enabled", False): if not config.get("enabled", False):
logger.debug(f"Service {service_name} is disabled, skipping health check") logger.debug(f"Service {service_name} is disabled, skipping health check")
return { return {"status": "disabled", "response_time": None, "error": None, "metadata": {}}
"status": "disabled",
"response_time": None,
"error": None,
"metadata": {}
}
try: try:
# Get or create checker for this service # Get or create checker for this service
@@ -64,12 +60,7 @@ class ServiceStatusChecker:
except Exception as e: except Exception as e:
logger.error(f"Unexpected error checking {service_name}: {str(e)}") logger.error(f"Unexpected error checking {service_name}: {str(e)}")
return { return {"status": "error", "response_time": None, "error": f"Unexpected error: {str(e)}", "metadata": {}}
"status": "error",
"response_time": None,
"error": f"Unexpected error: {str(e)}",
"metadata": {}
}
async def _get_checker_for_service(self, service_name: str, config: Dict): async def _get_checker_for_service(self, service_name: str, config: Dict):
""" """
@@ -109,28 +100,32 @@ class ServiceStatusChecker:
logger.debug(f"Created {len(tasks)} concurrent health check tasks") logger.debug(f"Created {len(tasks)} concurrent health check tasks")
results = await asyncio.gather(*tasks, return_exceptions=True) results = await asyncio.gather(*tasks, return_exceptions=True)
service_status = {} service_status: Dict[str, Dict] = {}
healthy_count = 0 healthy_count = 0
error_count = 0 error_count = 0
for service_name, result in zip(service_names, results): for service_name, result in zip(service_names, results):
if isinstance(result, Exception): if isinstance(result, Exception):
logger.error(f"Exception during health check for {service_name}: {str(result)}") logger.error(f"Exception during health check for {service_name}: {str(result)}")
service_status[service_name] = { service_status[service_name] = {"status": "error", "response_time": None, "error": f"Exception: {str(result)}", "metadata": {}}
"status": "error",
"response_time": None,
"error": f"Exception: {str(result)}",
"metadata": {}
}
error_count += 1 error_count += 1
else: else:
service_status[service_name] = result # result is a Dict at this point, but we need to ensure it's actually a dict
if result["status"] == "healthy": if isinstance(result, dict):
healthy_count += 1 service_status[service_name] = result
elif result["status"] in ["error", "timeout", "unhealthy"]: if result.get("status") == "healthy":
healthy_count += 1
elif result.get("status") in ["error", "timeout", "unhealthy"]:
error_count += 1
else:
# This shouldn't happen, but handle it gracefully
logger.error(f"Unexpected result type for {service_name}: {type(result)}")
service_status[service_name] = {"status": "error", "response_time": None, "error": "Unexpected result type", "metadata": {}}
error_count += 1 error_count += 1
logger.info(f"Health check completed: {healthy_count} healthy, {error_count} errors, {len(SERVICES) - healthy_count - error_count} other statuses") logger.info(
f"Health check completed: {healthy_count} healthy, {error_count} errors, " f"{len(SERVICES) - healthy_count - error_count} other statuses"
)
return service_status return service_status
async def close(self): async def close(self):

View File

@@ -4,7 +4,7 @@ Utilities Package
This package contains utility functions for the service adapters. This package contains utility functions for the service adapters.
""" """
from .time_formatter import format_uptime_for_frontend, format_response_time from .time_formatter import format_response_time, format_uptime_for_frontend
__all__ = [ __all__ = [
"format_uptime_for_frontend", "format_uptime_for_frontend",

View File

@@ -50,7 +50,7 @@ def format_uptime_for_frontend(uptime_value: Optional[str]) -> str:
def _is_timestamp(value: str) -> bool: def _is_timestamp(value: str) -> bool:
"""Check if value is an ISO timestamp.""" """Check if value is an ISO timestamp."""
try: try:
datetime.fromisoformat(value.replace('Z', '+00:00')) datetime.fromisoformat(value.replace("Z", "+00:00"))
return True return True
except (ValueError, AttributeError): except (ValueError, AttributeError):
return False return False
@@ -68,7 +68,7 @@ def _is_epoch(value: str) -> bool:
def _is_duration_string(value: str) -> bool: def _is_duration_string(value: str) -> bool:
"""Check if value is a duration string like '2h 30m' or '5d 2h 15m'.""" """Check if value is a duration string like '2h 30m' or '5d 2h 15m'."""
# Look for patterns like "2h 30m", "5d 2h 15m", "1d 2h 3m 4s" # Look for patterns like "2h 30m", "5d 2h 15m", "1d 2h 3m 4s"
pattern = r'^\d+[dhms]\s*(\d+[dhms]\s*)*$' pattern = r"^\d+[dhms]\s*(\d+[dhms]\s*)*$"
return bool(re.match(pattern, value.strip())) return bool(re.match(pattern, value.strip()))
@@ -85,7 +85,7 @@ def _format_timestamp_uptime(timestamp: str) -> str:
"""Format timestamp uptime (time since timestamp).""" """Format timestamp uptime (time since timestamp)."""
try: try:
# Parse timestamp # Parse timestamp
dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00')) dt = datetime.fromisoformat(timestamp.replace("Z", "+00:00"))
if dt.tzinfo is None: if dt.tzinfo is None:
dt = dt.replace(tzinfo=timezone.utc) dt = dt.replace(tzinfo=timezone.utc)
@@ -131,22 +131,22 @@ def _parse_duration_string(duration: str) -> float:
total_seconds = 0 total_seconds = 0
# Extract days # Extract days
days_match = re.search(r'(\d+)d', duration) days_match = re.search(r"(\d+)d", duration)
if days_match: if days_match:
total_seconds += int(days_match.group(1)) * 86400 total_seconds += int(days_match.group(1)) * 86400
# Extract hours # Extract hours
hours_match = re.search(r'(\d+)h', duration) hours_match = re.search(r"(\d+)h", duration)
if hours_match: if hours_match:
total_seconds += int(hours_match.group(1)) * 3600 total_seconds += int(hours_match.group(1)) * 3600
# Extract minutes # Extract minutes
minutes_match = re.search(r'(\d+)m', duration) minutes_match = re.search(r"(\d+)m", duration)
if minutes_match: if minutes_match:
total_seconds += int(minutes_match.group(1)) * 60 total_seconds += int(minutes_match.group(1)) * 60
# Extract seconds # Extract seconds
seconds_match = re.search(r'(\d+)s', duration) seconds_match = re.search(r"(\d+)s", duration)
if seconds_match: if seconds_match:
total_seconds += int(seconds_match.group(1)) total_seconds += int(seconds_match.group(1))