Files
xtream2m3u/run.py

807 lines
33 KiB
Python
Raw Normal View History

import fnmatch
2025-04-24 22:25:01 -03:00
import ipaddress
2024-08-26 13:09:52 -03:00
import json
import logging
2025-04-24 21:28:27 -03:00
import os
import re
2025-04-24 22:25:01 -03:00
import socket
2025-08-30 01:38:54 -03:00
import time
import urllib.parse
import argparse
2025-08-30 01:38:54 -03:00
from concurrent.futures import ThreadPoolExecutor, as_completed
2024-08-26 13:09:52 -03:00
2025-04-24 22:25:01 -03:00
import dns.resolver
2024-08-26 13:09:52 -03:00
import requests
from fake_useragent import UserAgent
2025-08-30 21:14:15 -03:00
from flask import Flask, Response, jsonify, request, send_from_directory
2024-08-26 13:09:52 -03:00
2025-04-24 22:25:01 -03:00
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
2024-08-26 13:09:52 -03:00
app = Flask(__name__)
2025-08-30 00:17:55 -03:00
@app.route("/")
def serve_frontend():
"""Serve the frontend index.html file"""
2025-08-30 00:17:55 -03:00
return send_from_directory("frontend", "index.html")
2025-08-30 00:17:55 -03:00
@app.route("/assets/<path:filename>")
def serve_assets(filename):
"""Serve assets from the docs/assets directory"""
try:
2025-08-30 00:17:55 -03:00
return send_from_directory("docs/assets", filename)
except:
return "Asset not found", 404
2025-08-30 00:17:55 -03:00
@app.route("/<path:filename>")
def serve_static_files(filename):
"""Serve static files from the frontend directory"""
# Don't serve API routes through static file handler
2025-08-30 00:17:55 -03:00
api_routes = ["m3u", "xmltv", "categories", "image-proxy", "stream-proxy", "assets"]
if filename.split("/")[0] in api_routes:
return "Not found", 404
# Only serve files that exist in the frontend directory
try:
2025-08-30 00:17:55 -03:00
return send_from_directory("frontend", filename)
except:
# If file doesn't exist in frontend, return 404
return "File not found", 404
2025-08-30 00:17:55 -03:00
2025-04-24 22:25:01 -03:00
# Get default proxy URL from environment variable
2025-08-30 00:17:55 -03:00
DEFAULT_PROXY_URL = os.environ.get("PROXY_URL")
2025-04-24 22:25:01 -03:00
# Set up custom DNS resolver
def setup_custom_dns():
"""Configure a custom DNS resolver using reliable DNS services"""
2025-08-30 00:17:55 -03:00
dns_servers = ["1.1.1.1", "1.0.0.1", "8.8.8.8", "8.8.4.4", "9.9.9.9"]
2025-04-24 22:25:01 -03:00
custom_resolver = dns.resolver.Resolver()
custom_resolver.nameservers = dns_servers
original_getaddrinfo = socket.getaddrinfo
def new_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
if host:
try:
# Skip DNS resolution for IP addresses
try:
ipaddress.ip_address(host)
# If we get here, the host is already an IP address
logger.debug(f"Host is already an IP address: {host}, skipping DNS resolution")
except ValueError:
# Not an IP address, so use DNS resolution
answers = custom_resolver.resolve(host)
host = str(answers[0])
logger.debug(f"Custom DNS resolved {host}")
except Exception as e:
logger.info(f"Custom DNS resolution failed for {host}: {e}, falling back to system DNS")
return original_getaddrinfo(host, port, family, type, proto, flags)
socket.getaddrinfo = new_getaddrinfo
logger.info("Custom DNS resolver set up")
2025-08-30 00:17:55 -03:00
2025-04-24 22:25:01 -03:00
# Initialize DNS resolver
setup_custom_dns()
2025-08-30 00:17:55 -03:00
# No persistent connections - fresh connection for each request to avoid stale connection issues
# Common request function for API endpoints
2025-04-24 22:25:01 -03:00
def fetch_api_data(url, timeout=10):
"""Make a request to an API endpoint"""
2025-04-24 22:25:01 -03:00
ua = UserAgent()
headers = {
2025-08-30 00:17:55 -03:00
"User-Agent": ua.chrome,
2025-08-30 01:38:54 -03:00
"Accept": "application/json,text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
2025-08-30 00:17:55 -03:00
"Accept-Language": "en-US,en;q=0.5",
"Connection": "close",
2025-08-30 01:38:54 -03:00
"Accept-Encoding": "gzip, deflate",
2025-04-24 22:25:01 -03:00
}
2025-04-24 21:28:27 -03:00
2024-08-26 13:09:52 -03:00
try:
2025-08-30 00:17:55 -03:00
hostname = urllib.parse.urlparse(url).netloc.split(":")[0]
2025-04-24 22:25:01 -03:00
logger.info(f"Making request to host: {hostname}")
# Use fresh connection for each request to avoid stale connection issues
response = requests.get(url, headers=headers, timeout=timeout, stream=True)
2024-08-26 13:09:52 -03:00
response.raise_for_status()
2025-08-30 01:38:54 -03:00
# For large responses, use streaming JSON parsing
2025-04-24 22:25:01 -03:00
try:
2025-08-30 01:38:54 -03:00
# Check content length to decide parsing strategy
content_length = response.headers.get('Content-Length')
if content_length and int(content_length) > 10_000_000: # > 10MB
logger.info(f"Large response detected ({content_length} bytes), using optimized parsing")
# Stream the JSON content for better memory efficiency
response.encoding = 'utf-8' # Ensure proper encoding
return response.json()
2025-04-24 22:25:01 -03:00
except json.JSONDecodeError:
2025-08-30 01:38:54 -03:00
# Fallback to text for non-JSON responses
2025-04-24 22:25:01 -03:00
return response.text
except requests.exceptions.SSLError:
2025-08-30 00:17:55 -03:00
return {"error": "SSL Error", "details": "Failed to verify SSL certificate"}, 503
2025-04-24 22:25:01 -03:00
except requests.exceptions.RequestException as e:
logger.error(f"RequestException: {e}")
2025-08-30 00:17:55 -03:00
return {"error": "Request Exception", "details": str(e)}, 503
2024-08-26 13:09:52 -03:00
def stream_request(url, headers=None, timeout=30):
2025-04-24 22:25:01 -03:00
"""Make a streaming request that doesn't buffer the full response"""
if not headers:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
"Connection": "keep-alive",
2025-04-24 22:25:01 -03:00
}
# Use longer timeout for streams and set both connect and read timeouts
return requests.get(url, stream=True, headers=headers, timeout=(10, timeout))
2025-04-24 22:25:01 -03:00
2025-08-30 00:17:55 -03:00
2025-04-24 22:25:01 -03:00
def encode_url(url):
"""Safely encode a URL for use in proxy endpoints"""
2025-08-30 00:17:55 -03:00
return urllib.parse.quote(url, safe="") if url else ""
2025-04-24 22:25:01 -03:00
def generate_streaming_response(response, content_type=None):
"""Generate a streaming response with appropriate headers"""
if not content_type:
2025-08-30 00:17:55 -03:00
content_type = response.headers.get("Content-Type", "application/octet-stream")
2025-04-24 22:25:01 -03:00
def generate():
try:
bytes_sent = 0
for chunk in response.iter_content(chunk_size=8192):
if chunk:
bytes_sent += len(chunk)
yield chunk
logger.info(f"Stream completed, sent {bytes_sent} bytes")
except requests.exceptions.ChunkedEncodingError as e:
# Chunked encoding error from upstream - log and stop gracefully
logger.warning(f"Upstream chunked encoding error after {bytes_sent} bytes: {str(e)}")
# Don't raise - just stop yielding to close stream gracefully
except requests.exceptions.ConnectionError as e:
# Connection error (reset, timeout, etc.) - log and stop gracefully
logger.warning(f"Connection error after {bytes_sent} bytes: {str(e)}")
# Don't raise - just stop yielding to close stream gracefully
2025-04-24 22:25:01 -03:00
except Exception as e:
logger.error(f"Streaming error after {bytes_sent} bytes: {str(e)}")
# Don't raise exceptions in generators after headers are sent!
# Raising here causes Flask to inject "HTTP/1.1 500" into the chunked body,
finally:
# Always close the upstream response to free resources
try:
response.close()
except:
pass
2025-04-24 22:25:01 -03:00
headers = {
2025-08-30 00:17:55 -03:00
"Access-Control-Allow-Origin": "*",
"Content-Type": content_type,
2025-04-24 22:25:01 -03:00
}
# Add content length if available and not using chunked transfer
2025-08-30 00:17:55 -03:00
if "Content-Length" in response.headers and "Transfer-Encoding" not in response.headers:
headers["Content-Length"] = response.headers["Content-Length"]
2025-04-24 22:25:01 -03:00
else:
2025-08-30 00:17:55 -03:00
headers["Transfer-Encoding"] = "chunked"
return Response(generate(), mimetype=content_type, headers=headers, direct_passthrough=True)
2025-04-24 22:25:01 -03:00
2025-01-26 17:11:02 -03:00
2025-08-30 00:17:55 -03:00
@app.route("/image-proxy/<path:image_url>")
2025-01-26 17:11:02 -03:00
def proxy_image(image_url):
"""Proxy endpoint for images to avoid CORS issues"""
try:
original_url = urllib.parse.unquote(image_url)
logger.info(f"Image proxy request for: {original_url}")
2025-01-26 17:11:02 -03:00
response = requests.get(original_url, stream=True, timeout=10)
response.raise_for_status()
2025-08-30 00:17:55 -03:00
content_type = response.headers.get("Content-Type", "")
2025-08-30 00:17:55 -03:00
if not content_type.startswith("image/"):
logger.error(f"Invalid content type for image: {content_type}")
2025-08-30 00:17:55 -03:00
return Response("Invalid image type", status=415)
2025-01-26 17:11:02 -03:00
2025-04-24 22:25:01 -03:00
return generate_streaming_response(response, content_type)
except requests.Timeout:
2025-08-30 00:17:55 -03:00
return Response("Image fetch timeout", status=504)
except requests.HTTPError as e:
2025-08-30 00:17:55 -03:00
return Response(f"Failed to fetch image: {str(e)}", status=e.response.status_code)
2025-01-26 17:11:02 -03:00
except Exception as e:
logger.error(f"Image proxy error: {str(e)}")
2025-08-30 00:17:55 -03:00
return Response("Failed to process image", status=500)
2025-01-26 17:11:02 -03:00
2025-08-30 00:17:55 -03:00
@app.route("/stream-proxy/<path:stream_url>")
def proxy_stream(stream_url):
"""Proxy endpoint for streams"""
try:
original_url = urllib.parse.unquote(stream_url)
logger.info(f"Stream proxy request for: {original_url}")
response = stream_request(original_url, timeout=60) # Longer timeout for live streams
response.raise_for_status()
2025-04-24 22:25:01 -03:00
# Determine content type
2025-08-30 00:17:55 -03:00
content_type = response.headers.get("Content-Type")
if not content_type:
2025-08-30 00:17:55 -03:00
if original_url.endswith(".ts"):
content_type = "video/MP2T"
elif original_url.endswith(".m3u8"):
content_type = "application/vnd.apple.mpegurl"
else:
2025-08-30 00:17:55 -03:00
content_type = "application/octet-stream"
logger.info(f"Using content type: {content_type}")
2025-04-24 22:25:01 -03:00
return generate_streaming_response(response, content_type)
except requests.Timeout:
logger.error(f"Timeout connecting to stream: {original_url}")
2025-08-30 00:17:55 -03:00
return Response("Stream timeout", status=504)
except requests.HTTPError as e:
logger.error(f"HTTP error fetching stream: {e.response.status_code} - {original_url}")
2025-08-30 00:17:55 -03:00
return Response(f"Failed to fetch stream: {str(e)}", status=e.response.status_code)
except Exception as e:
logger.error(f"Stream proxy error: {str(e)} - {original_url}")
2025-08-30 00:17:55 -03:00
return Response("Failed to process stream", status=500)
2025-04-24 22:25:01 -03:00
def parse_group_list(group_string):
"""Parse a comma-separated string into a list of trimmed strings"""
2025-08-30 00:17:55 -03:00
return [group.strip() for group in group_string.split(",")] if group_string else []
2025-04-24 22:25:01 -03:00
def group_matches(group_title, pattern):
"""Check if a group title matches a pattern, supporting wildcards and exact matching"""
# Convert to lowercase for case-insensitive matching
group_lower = group_title.lower()
pattern_lower = pattern.lower()
2025-04-28 21:52:59 -03:00
# Handle spaces in pattern
2025-08-30 00:17:55 -03:00
if " " in pattern_lower:
2025-04-28 21:52:59 -03:00
# For patterns with spaces, split and check each part
pattern_parts = pattern_lower.split()
group_parts = group_lower.split()
# If pattern has more parts than group, can't match
if len(pattern_parts) > len(group_parts):
return False
# Check each part of the pattern against group parts
for i, part in enumerate(pattern_parts):
if i >= len(group_parts):
return False
2025-08-30 00:17:55 -03:00
if "*" in part or "?" in part:
2025-04-28 21:52:59 -03:00
if not fnmatch.fnmatch(group_parts[i], part):
return False
else:
if part not in group_parts[i]:
return False
return True
# Check for wildcard patterns
2025-08-30 00:17:55 -03:00
if "*" in pattern_lower or "?" in pattern_lower:
return fnmatch.fnmatch(group_lower, pattern_lower)
else:
# Simple substring match for non-wildcard patterns
return pattern_lower in group_lower
2025-08-30 00:17:55 -03:00
2025-04-24 22:25:01 -03:00
def get_required_params():
2025-08-30 21:14:15 -03:00
"""Get and validate the required parameters from the request (supports both GET and POST)"""
# Handle both GET and POST requests
if request.method == "POST":
data = request.get_json() or {}
url = data.get("url")
username = data.get("username")
password = data.get("password")
proxy_url = data.get("proxy_url", DEFAULT_PROXY_URL) or request.host_url.rstrip("/")
else:
url = request.args.get("url")
username = request.args.get("username")
password = request.args.get("password")
proxy_url = request.args.get("proxy_url", DEFAULT_PROXY_URL) or request.host_url.rstrip("/")
2025-01-26 17:11:02 -03:00
if not url or not username or not password:
2025-08-30 00:17:55 -03:00
return (
None,
None,
None,
2025-08-30 21:14:15 -03:00
None,
jsonify({"error": "Missing Parameters", "details": "Required parameters: url, username, and password"}),
400
2025-08-30 00:17:55 -03:00
)
2025-04-24 22:25:01 -03:00
2025-08-30 21:14:15 -03:00
return url, username, password, proxy_url, None, None
2025-04-24 22:25:01 -03:00
2025-08-30 00:17:55 -03:00
2025-04-24 22:25:01 -03:00
def validate_xtream_credentials(url, username, password):
"""Validate the Xtream API credentials"""
2025-08-30 00:17:55 -03:00
api_url = f"{url}/player_api.php?username={username}&password={password}"
2025-04-24 22:25:01 -03:00
data = fetch_api_data(api_url)
2025-01-26 17:11:02 -03:00
2025-04-24 22:25:01 -03:00
if isinstance(data, tuple): # Error response
return None, data[0], data[1]
2025-01-26 17:11:02 -03:00
2025-08-30 00:17:55 -03:00
if "user_info" not in data or "server_info" not in data:
return (
None,
json.dumps(
{
"error": "Invalid Response",
"details": "Server response missing required data (user_info or server_info)",
}
),
400,
)
2025-04-24 22:25:01 -03:00
return data, None, None
2025-01-26 17:11:02 -03:00
2025-08-30 00:17:55 -03:00
2025-08-30 01:38:54 -03:00
def fetch_api_endpoint(url_info):
"""Fetch a single API endpoint - used for concurrent requests"""
url, name, timeout = url_info
try:
logger.info(f"🚀 Fetching {name}...")
start_time = time.time()
data = fetch_api_data(url, timeout=timeout)
end_time = time.time()
if isinstance(data, list):
logger.info(f"✅ Completed {name} in {end_time-start_time:.1f}s - got {len(data)} items")
else:
logger.info(f"✅ Completed {name} in {end_time-start_time:.1f}s")
return name, data
except Exception as e:
logger.warning(f"❌ Failed to fetch {name}: {e}")
return name, None
2025-08-30 00:17:55 -03:00
def fetch_categories_and_channels(url, username, password, include_vod=False):
2025-08-30 01:38:54 -03:00
"""Fetch categories and channels from the Xtream API using concurrent requests"""
2025-08-30 00:17:55 -03:00
all_categories = []
all_streams = []
2025-08-30 01:13:03 -03:00
try:
2025-08-30 01:38:54 -03:00
# Prepare all API endpoints to fetch concurrently
api_endpoints = [
(f"{url}/player_api.php?username={username}&password={password}&action=get_live_categories",
"live_categories", 60),
(f"{url}/player_api.php?username={username}&password={password}&action=get_live_streams",
"live_streams", 180),
]
# Add VOD endpoints if requested (WARNING: This will be much slower!)
if include_vod:
logger.warning("⚠️ Including VOD content - this will take significantly longer!")
logger.info("💡 For faster loading, use the API without include_vod=true")
# Only add the most essential VOD endpoints - skip the massive streams for categories-only requests
api_endpoints.extend([
(f"{url}/player_api.php?username={username}&password={password}&action=get_vod_categories",
"vod_categories", 60),
(f"{url}/player_api.php?username={username}&password={password}&action=get_series_categories",
"series_categories", 60),
])
# Only fetch the massive stream lists if explicitly needed for M3U generation
vod_for_m3u = request.endpoint == 'generate_m3u'
if vod_for_m3u:
logger.warning("🐌 Fetching massive VOD/Series streams for M3U generation...")
api_endpoints.extend([
(f"{url}/player_api.php?username={username}&password={password}&action=get_vod_streams",
"vod_streams", 240),
(f"{url}/player_api.php?username={username}&password={password}&action=get_series",
"series", 240),
])
else:
logger.info("⚡ Skipping massive VOD streams for categories-only request")
2025-08-30 01:13:03 -03:00
2025-08-30 01:38:54 -03:00
# Fetch all endpoints concurrently using ThreadPoolExecutor
logger.info(f"Starting concurrent fetch of {len(api_endpoints)} API endpoints...")
results = {}
with ThreadPoolExecutor(max_workers=10) as executor: # Increased workers for better concurrency
# Submit all API calls
future_to_name = {executor.submit(fetch_api_endpoint, endpoint): endpoint[1]
for endpoint in api_endpoints}
2025-08-30 01:13:03 -03:00
2025-08-30 01:38:54 -03:00
# Collect results as they complete
for future in as_completed(future_to_name):
name, data = future.result()
results[name] = data
2025-08-30 01:13:03 -03:00
2025-08-30 01:38:54 -03:00
logger.info("All concurrent API calls completed!")
2025-08-30 01:13:03 -03:00
2025-08-30 01:38:54 -03:00
# Process live categories and streams (required)
live_categories = results.get("live_categories")
live_streams = results.get("live_streams")
if isinstance(live_categories, tuple): # Error response
return None, None, live_categories[0], live_categories[1]
if isinstance(live_streams, tuple): # Error response
return None, None, live_streams[0], live_streams[1]
if not isinstance(live_categories, list) or not isinstance(live_streams, list):
2025-08-30 01:13:03 -03:00
return (
None,
None,
json.dumps(
{
"error": "Invalid Data Format",
2025-08-30 01:38:54 -03:00
"details": "Live categories or streams data is not in the expected format",
2025-08-30 01:13:03 -03:00
}
),
500,
)
2025-08-30 01:38:54 -03:00
# Optimized data processing - batch operations for massive datasets
logger.info("Processing live content...")
2025-08-30 01:13:03 -03:00
2025-08-30 01:38:54 -03:00
# Batch set content_type for live content
if live_categories:
for category in live_categories:
category["content_type"] = "live"
all_categories.extend(live_categories)
2025-08-30 01:13:03 -03:00
2025-08-30 01:38:54 -03:00
if live_streams:
for stream in live_streams:
stream["content_type"] = "live"
all_streams.extend(live_streams)
2025-08-30 00:17:55 -03:00
2025-08-30 01:38:54 -03:00
logger.info(f"✅ Added {len(live_categories)} live categories and {len(live_streams)} live streams")
2025-08-30 00:17:55 -03:00
2025-08-30 01:38:54 -03:00
# Process VOD content if requested and available
if include_vod:
logger.info("Processing VOD content...")
# Process VOD categories
vod_categories = results.get("vod_categories")
if isinstance(vod_categories, list) and vod_categories:
for category in vod_categories:
category["content_type"] = "vod"
all_categories.extend(vod_categories)
logger.info(f"✅ Added {len(vod_categories)} VOD categories")
# Process series categories first (lightweight)
series_categories = results.get("series_categories")
if isinstance(series_categories, list) and series_categories:
for category in series_categories:
category["content_type"] = "series"
all_categories.extend(series_categories)
logger.info(f"✅ Added {len(series_categories)} series categories")
# Only process massive stream lists if they were actually fetched
vod_streams = results.get("vod_streams")
if isinstance(vod_streams, list) and vod_streams:
logger.info(f"🔥 Processing {len(vod_streams)} VOD streams (this is the slow part)...")
# Batch process for better performance
batch_size = 5000
for i in range(0, len(vod_streams), batch_size):
batch = vod_streams[i:i + batch_size]
for stream in batch:
stream["content_type"] = "vod"
if i + batch_size < len(vod_streams):
logger.info(f" Processed {i + batch_size}/{len(vod_streams)} VOD streams...")
all_streams.extend(vod_streams)
logger.info(f"✅ Added {len(vod_streams)} VOD streams")
# Process series (this can also be huge!)
series = results.get("series")
if isinstance(series, list) and series:
logger.info(f"🔥 Processing {len(series)} series (this is also slow)...")
# Batch process for better performance
batch_size = 5000
for i in range(0, len(series), batch_size):
batch = series[i:i + batch_size]
for show in batch:
show["content_type"] = "series"
if i + batch_size < len(series):
logger.info(f" Processed {i + batch_size}/{len(series)} series...")
all_streams.extend(series)
logger.info(f"✅ Added {len(series)} series")
2025-08-30 00:17:55 -03:00
2025-08-30 01:13:03 -03:00
except Exception as e:
logger.error(f"Critical error fetching API data: {e}")
2025-08-30 00:17:55 -03:00
return (
None,
None,
json.dumps(
{
2025-08-30 01:13:03 -03:00
"error": "API Fetch Error",
"details": f"Failed to fetch data from IPTV service: {str(e)}",
2025-08-30 00:17:55 -03:00
}
),
500,
)
2025-08-30 01:38:54 -03:00
logger.info(f"🚀 CONCURRENT FETCH COMPLETE: {len(all_categories)} total categories and {len(all_streams)} total streams")
2025-08-30 00:17:55 -03:00
return all_categories, all_streams, None, None
2025-04-24 22:25:01 -03:00
2025-08-30 00:17:55 -03:00
@app.route("/categories", methods=["GET"])
def get_categories():
"""Get all available categories from the Xtream API"""
# Get and validate parameters
2025-08-30 21:14:15 -03:00
url, username, password, proxy_url, error, status_code = get_required_params()
if error:
2025-08-30 21:14:15 -03:00
return error, status_code
2025-08-30 01:38:54 -03:00
# Check for VOD parameter - default to false to avoid timeouts (VOD is massive and slow!)
2025-08-30 01:13:03 -03:00
include_vod = request.args.get("include_vod", "false").lower() == "true"
2025-08-30 01:38:54 -03:00
logger.info(f"VOD content requested: {include_vod}")
2025-08-30 00:17:55 -03:00
# Validate credentials
user_data, error_json, error_code = validate_xtream_credentials(url, username, password)
if error_json:
2025-08-30 00:17:55 -03:00
return error_json, error_code, {"Content-Type": "application/json"}
# Fetch categories
2025-08-30 00:17:55 -03:00
categories, channels, error_json, error_code = fetch_categories_and_channels(url, username, password, include_vod)
if error_json:
2025-08-30 00:17:55 -03:00
return error_json, error_code, {"Content-Type": "application/json"}
# Return categories as JSON
2025-08-30 00:17:55 -03:00
return json.dumps(categories), 200, {"Content-Type": "application/json"}
2025-08-30 00:17:55 -03:00
@app.route("/xmltv", methods=["GET"])
2025-04-24 22:25:01 -03:00
def generate_xmltv():
"""Generate a filtered XMLTV file from the Xtream API"""
# Get and validate parameters
2025-08-30 21:14:15 -03:00
url, username, password, proxy_url, error, status_code = get_required_params()
2025-04-24 22:25:01 -03:00
if error:
2025-08-30 21:14:15 -03:00
return error, status_code
2025-04-24 22:25:01 -03:00
# No filtering supported for XMLTV endpoint
2025-04-28 21:52:59 -03:00
2025-04-24 22:25:01 -03:00
# Validate credentials
user_data, error_json, error_code = validate_xtream_credentials(url, username, password)
if error_json:
2025-08-30 00:17:55 -03:00
return error_json, error_code, {"Content-Type": "application/json"}
2025-04-24 22:25:01 -03:00
# Fetch XMLTV data
2025-08-30 00:17:55 -03:00
base_url = url.rstrip("/")
xmltv_url = f"{base_url}/xmltv.php?username={username}&password={password}"
2025-04-24 22:25:01 -03:00
xmltv_data = fetch_api_data(xmltv_url, timeout=20) # Longer timeout for XMLTV
if isinstance(xmltv_data, tuple): # Error response
2025-08-30 00:17:55 -03:00
return json.dumps(xmltv_data[0]), xmltv_data[1], {"Content-Type": "application/json"}
2025-04-24 22:25:01 -03:00
# If not proxying, return the original XMLTV
if not proxy_url:
2025-04-24 22:25:01 -03:00
return Response(
2025-08-30 00:17:55 -03:00
xmltv_data, mimetype="application/xml", headers={"Content-Disposition": "attachment; filename=guide.xml"}
2025-04-24 22:25:01 -03:00
)
2025-01-26 17:11:02 -03:00
# Replace image URLs in the XMLTV content with proxy URLs
def replace_icon_url(match):
original_url = match.group(1)
proxied_url = f"{proxy_url}/image-proxy/{encode_url(original_url)}"
return f'<icon src="{proxied_url}"'
2025-01-26 17:11:02 -03:00
2025-08-30 00:17:55 -03:00
xmltv_data = re.sub(r'<icon src="([^"]+)"', replace_icon_url, xmltv_data)
2025-01-26 17:11:02 -03:00
2025-04-24 22:25:01 -03:00
# Return the XMLTV data
2025-01-26 17:11:02 -03:00
return Response(
2025-08-30 00:17:55 -03:00
xmltv_data, mimetype="application/xml", headers={"Content-Disposition": "attachment; filename=guide.xml"}
2025-01-26 17:11:02 -03:00
)
2025-08-30 00:17:55 -03:00
2025-08-30 21:14:15 -03:00
@app.route("/m3u", methods=["GET", "POST"])
2024-08-26 13:09:52 -03:00
def generate_m3u():
2025-04-24 22:25:01 -03:00
"""Generate a filtered M3U playlist from the Xtream API"""
# Get and validate parameters
2025-08-30 21:14:15 -03:00
url, username, password, proxy_url, error, status_code = get_required_params()
2025-04-24 22:25:01 -03:00
if error:
2025-08-30 21:14:15 -03:00
return error, status_code
# Parse filter parameters (support both GET and POST for large filter lists)
if request.method == "POST":
data = request.get_json() or {}
unwanted_groups = parse_group_list(data.get("unwanted_groups", ""))
wanted_groups = parse_group_list(data.get("wanted_groups", ""))
no_stream_proxy = str(data.get("nostreamproxy", "")).lower() == "true"
include_vod = str(data.get("include_vod", "false")).lower() == "true"
include_channel_id = str(data.get("include_channel_id", "false")).lower() == "true"
channel_id_tag = str(data.get("channel_id_tag", "channel-id"))
2025-08-30 21:14:15 -03:00
logger.info("🔄 Processing POST request for M3U generation")
else:
unwanted_groups = parse_group_list(request.args.get("unwanted_groups", ""))
wanted_groups = parse_group_list(request.args.get("wanted_groups", ""))
no_stream_proxy = request.args.get("nostreamproxy", "").lower() == "true"
include_vod = request.args.get("include_vod", "false").lower() == "true"
include_channel_id = request.args.get("include_channel_id", "false") == "true"
channel_id_tag = request.args.get("channel_id_tag", "channel-id")
2025-08-30 21:14:15 -03:00
logger.info("🔄 Processing GET request for M3U generation")
2024-08-26 13:09:52 -03:00
2025-08-30 01:38:54 -03:00
# For M3U generation, warn about VOD performance impact
if include_vod:
logger.warning("⚠️ M3U generation with VOD enabled - expect 2-5 minute generation time!")
else:
logger.info("⚡ M3U generation for live content only - should be fast!")
2025-08-30 21:14:15 -03:00
# Log filter parameters (truncate if too long for readability)
wanted_display = f"{len(wanted_groups)} groups" if len(wanted_groups) > 10 else str(wanted_groups)
unwanted_display = f"{len(unwanted_groups)} groups" if len(unwanted_groups) > 10 else str(unwanted_groups)
logger.info(f"Filter parameters - wanted_groups: {wanted_display}, unwanted_groups: {unwanted_display}, include_vod: {include_vod}")
# Warn about massive filter lists
total_filters = len(wanted_groups) + len(unwanted_groups)
if total_filters > 20:
logger.warning(f"⚠️ Large filter list detected ({total_filters} categories) - this will be slower!")
if total_filters > 50:
logger.warning(f"🐌 MASSIVE filter list ({total_filters} categories) - expect 3-5 minute processing time!")
2025-04-28 21:52:59 -03:00
2025-04-24 22:25:01 -03:00
# Validate credentials
user_data, error_json, error_code = validate_xtream_credentials(url, username, password)
if error_json:
2025-08-30 00:17:55 -03:00
return error_json, error_code, {"Content-Type": "application/json"}
2024-08-26 13:09:52 -03:00
2025-04-24 22:25:01 -03:00
# Fetch categories and channels
2025-08-30 00:17:55 -03:00
categories, streams, error_json, error_code = fetch_categories_and_channels(url, username, password, include_vod)
2025-04-24 22:25:01 -03:00
if error_json:
2025-08-30 00:17:55 -03:00
return error_json, error_code, {"Content-Type": "application/json"}
2024-08-26 13:09:52 -03:00
2025-04-24 22:25:01 -03:00
# Extract user info and server URL
2025-08-30 00:17:55 -03:00
username = user_data["user_info"]["username"]
password = user_data["user_info"]["password"]
2024-08-26 13:09:52 -03:00
2025-04-24 22:25:01 -03:00
server_url = f"http://{user_data['server_info']['url']}:{user_data['server_info']['port']}"
2024-08-26 13:09:52 -03:00
2025-04-24 22:25:01 -03:00
# Create category name lookup
2025-08-30 00:17:55 -03:00
category_names = {cat["category_id"]: cat["category_name"] for cat in categories}
2025-01-26 17:11:02 -03:00
2025-04-28 21:52:59 -03:00
# Log all available groups
all_groups = set(category_names.values())
logger.info(f"All available groups: {sorted(all_groups)}")
2024-08-26 13:09:52 -03:00
# Generate M3U playlist
m3u_playlist = "#EXTM3U\n"
2025-04-24 22:25:01 -03:00
2025-04-28 21:52:59 -03:00
# Track included groups
included_groups = set()
2025-08-30 21:14:15 -03:00
processed_streams = 0
total_streams = len(streams)
# Pre-compile filter patterns for massive filter lists (performance optimization)
wanted_patterns = [pattern.lower() for pattern in wanted_groups] if wanted_groups else []
unwanted_patterns = [pattern.lower() for pattern in unwanted_groups] if unwanted_groups else []
logger.info(f"🔍 Starting to filter {total_streams} streams...")
batch_size = 10000 # Process streams in batches for better performance
2025-04-28 21:52:59 -03:00
2025-08-30 00:17:55 -03:00
for stream in streams:
content_type = stream.get("content_type", "live")
# Determine group title based on content type
if content_type == "series":
# For series, use series name as group title
group_title = f"Series - {category_names.get(stream.get('category_id'), 'Uncategorized')}"
stream_name = stream.get("name", "Unknown Series")
else:
# For live and VOD content
group_title = category_names.get(stream.get("category_id"), "Uncategorized")
stream_name = stream.get("name", "Unknown")
# Add content type prefix for VOD
if content_type == "vod":
group_title = f"VOD - {group_title}"
2025-08-30 21:14:15 -03:00
# Optimized filtering logic using pre-compiled patterns
2025-08-30 00:17:55 -03:00
include_stream = True
2025-08-30 21:14:15 -03:00
group_title_lower = group_title.lower()
2025-08-30 00:17:55 -03:00
2025-08-30 21:14:15 -03:00
if wanted_patterns:
# Only include streams from specified groups (optimized matching)
include_stream = any(
group_matches(group_title, wanted_group) for wanted_group in wanted_groups
)
elif unwanted_patterns:
# Exclude streams from unwanted groups (optimized matching)
include_stream = not any(
group_matches(group_title, unwanted_group) for unwanted_group in unwanted_groups
)
processed_streams += 1
# Progress logging for large datasets
if processed_streams % batch_size == 0:
logger.info(f" 📊 Processed {processed_streams}/{total_streams} streams ({(processed_streams/total_streams)*100:.1f}%)")
2025-08-30 00:17:55 -03:00
if include_stream:
included_groups.add(group_title)
tags = [
f'tvg-name="{stream_name}"',
f'group-title="{group_title}"',
]
2025-08-30 00:17:55 -03:00
# Handle logo URL - proxy only if stream proxying is enabled
original_logo = stream.get("stream_icon", "")
if original_logo and not no_stream_proxy:
logo_url = f"{proxy_url}/image-proxy/{encode_url(original_logo)}"
else:
logo_url = original_logo
tags.append(f'tvg-logo="{logo_url}"')
# Handle channel id if enabled
if include_channel_id:
channel_id = stream.get("epg_channel_id")
if channel_id:
tags.append(f'{channel_id_tag}="{channel_id}"')
2025-08-30 00:17:55 -03:00
# Create the stream URL based on content type
if content_type == "live":
# Live TV streams
stream_url = f"{server_url}/live/{username}/{password}/{stream['stream_id']}.ts"
elif content_type == "vod":
# VOD streams
stream_url = f"{server_url}/movie/{username}/{password}/{stream['stream_id']}.{stream.get('container_extension', 'mp4')}"
elif content_type == "series":
# Series streams - use the first episode if available
if "episodes" in stream and stream["episodes"]:
first_episode = list(stream["episodes"].values())[0][0] if stream["episodes"] else None
if first_episode:
episode_id = first_episode.get("id", stream.get("series_id", ""))
stream_url = f"{server_url}/series/{username}/{password}/{episode_id}.{first_episode.get('container_extension', 'mp4')}"
else:
continue # Skip series without episodes
else:
2025-08-30 00:17:55 -03:00
# Fallback for series without episode data
series_id = stream.get("series_id", stream.get("stream_id", ""))
stream_url = f"{server_url}/series/{username}/{password}/{series_id}.mp4"
2025-01-26 17:11:02 -03:00
2025-08-30 00:17:55 -03:00
# Apply stream proxying if enabled
if not no_stream_proxy:
stream_url = f"{proxy_url}/stream-proxy/{encode_url(stream_url)}"
2025-08-30 00:17:55 -03:00
# Add stream to playlist
m3u_playlist += (
f'#EXTINF:0 {" ".join(tags)},{stream_name}\n'
2025-08-30 00:17:55 -03:00
)
m3u_playlist += f"{stream_url}\n"
2024-08-26 13:09:52 -03:00
2025-04-28 21:52:59 -03:00
# Log included groups after filtering
logger.info(f"Groups included after filtering: {sorted(included_groups)}")
logger.info(f"Groups excluded after filtering: {sorted(all_groups - included_groups)}")
2025-08-30 00:17:55 -03:00
# Determine filename based on content included
filename = "FullPlaylist.m3u" if include_vod else "LiveStream.m3u"
2025-08-30 21:14:15 -03:00
logger.info(f"✅ M3U generation complete! Generated playlist with {len(included_groups)} groups")
# Return the M3U playlist with proper CORS headers for frontend
headers = {
"Content-Disposition": f"attachment; filename={filename}",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET, POST, OPTIONS",
"Access-Control-Allow-Headers": "Content-Type"
}
return Response(m3u_playlist, mimetype="audio/x-scpls", headers=headers)
2024-08-26 13:09:52 -03:00
2025-08-30 00:17:55 -03:00
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Flask app.")
parser.add_argument(
"--port", type=int, default=5000, help="Port number to run the app on"
)
args = parser.parse_args()
app.run(debug=True, host="0.0.0.0", port=args.port)