mirror of
https://github.com/NohamR/OqeeRewind.git
synced 2026-01-11 00:28:16 +00:00
Initial project setup with main modules and utilities
Add main application logic, utility modules for stream and time handling, DRM key fetching, and manifest parsing. Includes example environment file, requirements, .gitignore, and README with project goals and todos.
This commit is contained in:
2
.env.example
Normal file
2
.env.example
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
API_KEY = 'your_api_key_here'
|
||||||
|
API_URL = 'https://example.com/get-cached-keys'
|
||||||
222
.gitignore
vendored
Normal file
222
.gitignore
vendored
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[codz]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py.cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
# Pipfile.lock
|
||||||
|
|
||||||
|
# UV
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# uv.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
# poetry.lock
|
||||||
|
# poetry.toml
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
|
||||||
|
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
|
||||||
|
# pdm.lock
|
||||||
|
# pdm.toml
|
||||||
|
.pdm-python
|
||||||
|
.pdm-build/
|
||||||
|
|
||||||
|
# pixi
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
|
||||||
|
# pixi.lock
|
||||||
|
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
|
||||||
|
# in the .venv directory. It is recommended not to include this directory in version control.
|
||||||
|
.pixi
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# Redis
|
||||||
|
*.rdb
|
||||||
|
*.aof
|
||||||
|
*.pid
|
||||||
|
|
||||||
|
# RabbitMQ
|
||||||
|
mnesia/
|
||||||
|
rabbitmq/
|
||||||
|
rabbitmq-data/
|
||||||
|
|
||||||
|
# ActiveMQ
|
||||||
|
activemq-data/
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.envrc
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
# .idea/
|
||||||
|
|
||||||
|
# Abstra
|
||||||
|
# Abstra is an AI-powered process automation framework.
|
||||||
|
# Ignore directories containing user credentials, local state, and settings.
|
||||||
|
# Learn more at https://abstra.io/docs
|
||||||
|
.abstra/
|
||||||
|
|
||||||
|
# Visual Studio Code
|
||||||
|
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
|
||||||
|
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. However, if you prefer,
|
||||||
|
# you could uncomment the following to ignore the entire vscode folder
|
||||||
|
# .vscode/
|
||||||
|
|
||||||
|
# Ruff stuff:
|
||||||
|
.ruff_cache/
|
||||||
|
|
||||||
|
# PyPI configuration file
|
||||||
|
.pypirc
|
||||||
|
|
||||||
|
# Marimo
|
||||||
|
marimo/_static/
|
||||||
|
marimo/_lsp/
|
||||||
|
__marimo__/
|
||||||
|
|
||||||
|
# Streamlit
|
||||||
|
.streamlit/secrets.toml
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# output
|
||||||
|
*.mkv
|
||||||
|
video/
|
||||||
|
dev_test/
|
||||||
17
README.md
Normal file
17
README.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# OqeeRewind - Oqee TV Live Downloader
|
||||||
|
|
||||||
|
## Todo
|
||||||
|
- [x] Bruteforce implementation
|
||||||
|
- [ ] Better README
|
||||||
|
- [ ] Lib used
|
||||||
|
- [ ] How to use
|
||||||
|
- [ ] Lib to install (pip + mp4ff + ffmpeg)
|
||||||
|
- [ ] Demo GIF
|
||||||
|
- [ ] License
|
||||||
|
- [ ] Lint code
|
||||||
|
- [ ] Full implementation
|
||||||
|
- [ ] Frenc/English full translation
|
||||||
|
- [ ] Add more comments in the code
|
||||||
|
- [ ] Oqee widevine license implementation (.wvd) + mention README
|
||||||
|
- [ ] Better output system
|
||||||
|
- [ ] Verify mp4ff installation
|
||||||
214
main.py
Normal file
214
main.py
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
"""Main module for Oqee channel selection and stream management."""
|
||||||
|
from pprint import pprint
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from InquirerPy import prompt
|
||||||
|
from InquirerPy.validator import EmptyInputValidator
|
||||||
|
from InquirerPy.base.control import Choice
|
||||||
|
|
||||||
|
from utils.stream import (
|
||||||
|
get_manifest,
|
||||||
|
parse_mpd_manifest,
|
||||||
|
organize_by_content_type
|
||||||
|
)
|
||||||
|
|
||||||
|
SERVICE_PLAN_API_URL = "https://api.oqee.net/api/v6/service_plan"
|
||||||
|
|
||||||
|
def select_oqee_channel():
|
||||||
|
"""Select an Oqee channel from the API.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Selected channel details or None if cancelled/error.
|
||||||
|
"""
|
||||||
|
api_url = SERVICE_PLAN_API_URL
|
||||||
|
try:
|
||||||
|
print("Chargement de la liste des chaînes depuis l'API Oqee...")
|
||||||
|
response = requests.get(api_url, timeout=10)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = response.json()
|
||||||
|
if not data.get("success") or "channels" not in data.get("result", {}):
|
||||||
|
print("Erreur: Le format de la réponse de l'API est inattendu.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
channels_data = data["result"]["channels"]
|
||||||
|
choices = [
|
||||||
|
{"name": f"{channel_info.get('name', 'Nom inconnu')}", "value": channel_id}
|
||||||
|
for channel_id, channel_info in channels_data.items()
|
||||||
|
]
|
||||||
|
choices.sort(key=lambda x: x['name'])
|
||||||
|
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"Une erreur réseau est survenue : {e}")
|
||||||
|
return None
|
||||||
|
except ValueError:
|
||||||
|
print("Erreur lors de l'analyse de la réponse JSON.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
questions = [
|
||||||
|
{
|
||||||
|
"type": "fuzzy",
|
||||||
|
"message": "Veuillez choisir une chaîne (tapez pour filtrer) :",
|
||||||
|
"choices": choices,
|
||||||
|
"multiselect": False,
|
||||||
|
"validate": EmptyInputValidator(),
|
||||||
|
"invalid_message": "Vous devez sélectionner une chaîne.",
|
||||||
|
"long_instruction": "Utilisez les flèches pour naviguer, Entrée pour sélectionner.",
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = prompt(questions)
|
||||||
|
selected_channel_id = result[0]
|
||||||
|
selected_channel_details = channels_data.get(selected_channel_id)
|
||||||
|
if selected_channel_details:
|
||||||
|
print("\n✅ Vous avez sélectionné :")
|
||||||
|
print(f" - Nom : {selected_channel_details.get('name')}")
|
||||||
|
print(f" - ID : {selected_channel_details.get('id')}")
|
||||||
|
print(f" - ID Freebox : {selected_channel_details.get('freebox_id')}")
|
||||||
|
else:
|
||||||
|
print("Impossible de retrouver les détails de la chaîne sélectionnée.")
|
||||||
|
return selected_channel_details
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nOpération annulée par l'utilisateur.")
|
||||||
|
return None
|
||||||
|
except (ValueError, KeyError, IndexError) as e:
|
||||||
|
print(f"Une erreur inattenante est survenue : {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def prompt_for_stream_selection(stream_info, already_selected_types):
|
||||||
|
"""Guide l'utilisateur pour sélectionner un flux, en désactivant les types déjà choisis."""
|
||||||
|
try:
|
||||||
|
content_type_choices = [
|
||||||
|
Choice(value, name=value, enabled=value not in already_selected_types)
|
||||||
|
for value in stream_info.keys()
|
||||||
|
]
|
||||||
|
|
||||||
|
questions = [
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"message": "Quel type de flux souhaitez-vous sélectionner ?",
|
||||||
|
"choices": content_type_choices
|
||||||
|
}
|
||||||
|
]
|
||||||
|
result = prompt(questions)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
selected_type = result[0]
|
||||||
|
|
||||||
|
selected_content_data = stream_info[selected_type]
|
||||||
|
|
||||||
|
questions = [
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"message": f"Choisissez une qualité pour '{selected_type}':",
|
||||||
|
"choices": list(selected_content_data.keys())
|
||||||
|
}
|
||||||
|
]
|
||||||
|
result = prompt(questions)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
quality_group_key = result[0]
|
||||||
|
|
||||||
|
available_streams = selected_content_data[quality_group_key]
|
||||||
|
|
||||||
|
final_selection = None
|
||||||
|
if len(available_streams) == 1:
|
||||||
|
final_selection = available_streams[0]
|
||||||
|
print("Un seul flux disponible pour cette qualité, sélection automatique.")
|
||||||
|
else:
|
||||||
|
stream_choices = [
|
||||||
|
{
|
||||||
|
"name": (
|
||||||
|
f"Bitrate: {s.get('bitrate_kbps')} kbps | "
|
||||||
|
f"Codec: {s.get('codec', 'N/A')} | ID: {s.get('track_id')}"
|
||||||
|
),
|
||||||
|
"value": s
|
||||||
|
}
|
||||||
|
for s in available_streams
|
||||||
|
]
|
||||||
|
questions = [
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"message": "Plusieurs flux sont disponibles, choisissez-en un :",
|
||||||
|
"choices": stream_choices
|
||||||
|
}
|
||||||
|
]
|
||||||
|
result = prompt(questions)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
final_selection = result[0]
|
||||||
|
|
||||||
|
final_selection['content_type'] = selected_type
|
||||||
|
return final_selection
|
||||||
|
|
||||||
|
except (KeyboardInterrupt, TypeError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
try:
|
||||||
|
selected_channel = select_oqee_channel()
|
||||||
|
|
||||||
|
if selected_channel:
|
||||||
|
print("\n✅ Chaîne sélectionnée :")
|
||||||
|
print(f" - Nom : {selected_channel.get('name')}")
|
||||||
|
print(f" - ID : {selected_channel.get('id')}")
|
||||||
|
|
||||||
|
dash_id = selected_channel.get('streams', {}).get('dash')
|
||||||
|
if dash_id:
|
||||||
|
mpd_content = get_manifest(dash_id)
|
||||||
|
manifest_info = parse_mpd_manifest(mpd_content)
|
||||||
|
organized_info = organize_by_content_type(manifest_info)
|
||||||
|
|
||||||
|
final_selections = {}
|
||||||
|
|
||||||
|
while True:
|
||||||
|
selection = prompt_for_stream_selection(
|
||||||
|
organized_info, final_selections.keys()
|
||||||
|
)
|
||||||
|
|
||||||
|
if selection:
|
||||||
|
content_type = selection.pop('content_type')
|
||||||
|
final_selections[content_type] = selection
|
||||||
|
|
||||||
|
print("\n--- Récapitulatif de votre sélection ---")
|
||||||
|
for stream_type, details in final_selections.items():
|
||||||
|
bitrate = details.get('bitrate_kbps')
|
||||||
|
track_id = details.get('track_id')
|
||||||
|
print(
|
||||||
|
f" - {stream_type.capitalize()}: "
|
||||||
|
f"Bitrate {bitrate} kbps (ID: {track_id})"
|
||||||
|
)
|
||||||
|
print("----------------------------------------")
|
||||||
|
|
||||||
|
continue_prompt = [
|
||||||
|
{
|
||||||
|
"type": "list",
|
||||||
|
"message": "Que souhaitez-vous faire ?",
|
||||||
|
"choices": [
|
||||||
|
"Sélectionner un autre flux",
|
||||||
|
"Terminer et continuer"
|
||||||
|
],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
action_result = prompt(continue_prompt)
|
||||||
|
|
||||||
|
if (
|
||||||
|
not action_result or
|
||||||
|
action_result[0] == "Terminer et continuer"
|
||||||
|
):
|
||||||
|
break
|
||||||
|
|
||||||
|
if final_selections:
|
||||||
|
print("\n✅ Sélection finale terminée. Voici les flux choisis :")
|
||||||
|
pprint(final_selections)
|
||||||
|
else:
|
||||||
|
print("\nAucun flux n'a été sélectionné.")
|
||||||
|
|
||||||
|
else:
|
||||||
|
print("Aucun flux DASH trouvé pour cette chaîne.")
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\n\nProgramme interrompu par l'utilisateur. Au revoir !")
|
||||||
127
past.py
Normal file
127
past.py
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
"""Script for testing and analyzing past Oqee streams and manifests."""
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
from utils.stream import fetch_drm_keys
|
||||||
|
from utils.times import (
|
||||||
|
convert_date_to_sec,
|
||||||
|
convert_sec_to_ticks
|
||||||
|
)
|
||||||
|
|
||||||
|
TIMESCALE = 90000
|
||||||
|
DURATION = 288000
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# mpd_content = get_manifest('201') # 201: france 2
|
||||||
|
# manifest_info = parse_mpd_manifest(mpd_content)
|
||||||
|
|
||||||
|
# organized_info = organize_by_content_type(manifest_info)
|
||||||
|
# with open('manifest_organized.json', 'w') as f:
|
||||||
|
# json.dump(organized_info, f, indent=4)
|
||||||
|
|
||||||
|
drm_kid = "0dfa399a-425d-3095-0255-f357e2407edf"
|
||||||
|
drm_kid = drm_kid.replace("-", "")
|
||||||
|
print('kid: ', drm_kid)
|
||||||
|
print(fetch_drm_keys(drm_kid))
|
||||||
|
|
||||||
|
|
||||||
|
# dt = datetime.datetime.strptime("2023-12-14 23:02:14", "%Y-%m-%d %H:%M:%S")
|
||||||
|
dt = datetime.datetime.strptime("2025-11-13 23:02:14", "%Y-%m-%d %H:%M:%S")
|
||||||
|
tick = int(convert_sec_to_ticks(convert_date_to_sec(dt), TIMESCALE))
|
||||||
|
# print(tick)
|
||||||
|
|
||||||
|
# # 1280x720, 3000
|
||||||
|
# track_id = "0_1_390"
|
||||||
|
# base = 153232896078968
|
||||||
|
|
||||||
|
# # 1920x1080, 14800
|
||||||
|
# track_id = "0_1_3525"
|
||||||
|
# # not found
|
||||||
|
|
||||||
|
# # 1920x1080, 4800
|
||||||
|
video_track_id = "0_1_3524"
|
||||||
|
video_base1 = 153232896150968
|
||||||
|
|
||||||
|
# audio fra_main
|
||||||
|
audio_track_id = "0_1_384"
|
||||||
|
audio_base2 = 153232896097804
|
||||||
|
|
||||||
|
|
||||||
|
# asyncio.run(bruteforce(track_id, tick))
|
||||||
|
|
||||||
|
|
||||||
|
# # https://catalogue.ina.fr/doc/TV-RADIO/TV_8165000.001/Bordeaux_%2Bchampagne%2B_%2Bquand%2Bles%2Bescrocs%2Bs_attaquent%2Ba%2Bnos%2Bbouteilles%2B_
|
||||||
|
# # 14/12/2023 23:02:22 - 24:12:22
|
||||||
|
# start = "2023-12-14 23:02:22"
|
||||||
|
# start_tick1, start_rep1 = find_nearest_tick_by_hour(
|
||||||
|
# video_base1, start, TIMESCALE, DURATION
|
||||||
|
# )
|
||||||
|
# start_tick2, start_rep2 = find_nearest_tick_by_hour(
|
||||||
|
# audio_base2, start, TIMESCALE, DURATION
|
||||||
|
# )
|
||||||
|
|
||||||
|
# end = "2023-12-15 00:12:22"
|
||||||
|
# end_tick1, end_rep1 = find_nearest_tick_by_hour(
|
||||||
|
# video_base1, end, TIMESCALE, DURATION
|
||||||
|
# )
|
||||||
|
# end_tick2, end_rep2 = find_nearest_tick_by_hour(
|
||||||
|
# audio_base2, end, TIMESCALE, DURATION
|
||||||
|
# )
|
||||||
|
|
||||||
|
# rep_nb = end_rep1 - start_rep1
|
||||||
|
|
||||||
|
# diff_start = start_tick2 - start_tick1
|
||||||
|
# diff_start_sec = convert_ticks_to_sec(diff_start, TIMESCALE)
|
||||||
|
# print('diff_start_sec: ', diff_start_sec)
|
||||||
|
# print(f"Total segments to fetch: {rep_nb}")
|
||||||
|
|
||||||
|
# # Download the segments
|
||||||
|
# asyncio.run(save_segments(track_id, start_tick, rep_nb, duration))
|
||||||
|
|
||||||
|
|
||||||
|
# cat $(ls -v *.m4s) > merged.m4s
|
||||||
|
|
||||||
|
# get_init(track_id)
|
||||||
|
# kid = get_kid(track_id)
|
||||||
|
# print("KID:", kid)
|
||||||
|
# key = fetch_drm_keys(kid)
|
||||||
|
# print("KEY:", key)
|
||||||
|
|
||||||
|
# mp4ff-decrypt -init init.mp4 -key f31708f7237632849c591202e3043417
|
||||||
|
# merged.m4s merged_dec.m4s
|
||||||
|
# command_decrypt = (
|
||||||
|
# f"mp4ff-decrypt -init segments/segments_{track_id}/init.mp4 "
|
||||||
|
# f"-key {key} segments/segments_{track_id}/merged.m4s merged_dec.m4s"
|
||||||
|
# )
|
||||||
|
# print("Decrypt command:", command_decrypt)
|
||||||
|
|
||||||
|
# ffmpeg -i "concat:init.mp4|merged_dec.m4s" -c copy output.mp4
|
||||||
|
# command_ffmpeg = (
|
||||||
|
# f'ffmpeg -i "concat:segments/segments_{track_id}/init.mp4|'
|
||||||
|
# f'merged_dec.m4s" -c copy output.mp4'
|
||||||
|
# )
|
||||||
|
# print("FFmpeg command:", command_ffmpeg)
|
||||||
|
|
||||||
|
# command_merge = (
|
||||||
|
# f"ffmpeg -i video.mp4 -itsoffset {diff_start_sec} "
|
||||||
|
# f"-i audio.mp4 -c copy -map 0:v -map 1:a output.mp4"
|
||||||
|
# )
|
||||||
|
# print("Merge command:", command_merge)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# TF1 research (manifest id : 612)
|
||||||
|
# 2023-06-10 is the latest available date for TF1 0_1_382 and 0_1_381
|
||||||
|
|
||||||
|
# dt = datetime.datetime.strptime("2023-06-10 08:00:00", "%Y-%m-%d %H:%M:%S")
|
||||||
|
# # dt = datetime.datetime.strptime("2023-09-01 08:00:00", "%Y-%m-%d %H:%M:%S")
|
||||||
|
# tick = int(convert_sec_to_ticks(convert_date_to_sec(dt), TIMESCALE))
|
||||||
|
|
||||||
|
# # 1280x720, 3000
|
||||||
|
# track_id = "0_1_382"
|
||||||
|
# asyncio.run(bruteforce(track_id, tick))
|
||||||
8
pssh.py
Normal file
8
pssh.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
from uuid import UUID
|
||||||
|
from pywidevine.pssh import PSSH
|
||||||
|
|
||||||
|
|
||||||
|
data = "AAAAiHBzc2gAAAAA7e+LqXnWSs6jyCfc1R0h7QAAAGgIARIQrKzUjhLvvbqkebbW2/EQtBIQWxKIsxtqP3iaIFYUu9f6xxIQXn4atxoopds39jbUXbiFVBIQUUJpv9uuzWKv4ccKTtooMRIQocf9FUFCoGm775zPIBr3HRoAKgAyADgASABQAA=="
|
||||||
|
pssh = PSSH(data)
|
||||||
|
pssh.set_key_ids([UUID("540103d1e13713f8ebdc90e468e6f97e"), UUID("acacd48e12efbdbaa479b6d6dbf110b4"), UUID("5b1288b31b6a3f789a205614bbd7fac7"), UUID("514269bfdbaecd62afe1c70a4eda2831"), UUID("a1c7fd154142a069bbef9ccf201af71d")])
|
||||||
|
print(pssh)
|
||||||
5
requirements.txt
Normal file
5
requirements.txt
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
aiohttp==3.13.2
|
||||||
|
InquirerPy==0.3.4
|
||||||
|
python-dotenv==1.2.1
|
||||||
|
pywidevine==1.9.0
|
||||||
|
Requests==2.32.5
|
||||||
484
utils/stream.py
Normal file
484
utils/stream.py
Normal file
@@ -0,0 +1,484 @@
|
|||||||
|
"""Utility module for streaming and manifest parsing."""
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
import base64
|
||||||
|
import os
|
||||||
|
import asyncio
|
||||||
|
import time
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_drm_keys(kid: str) -> str:
|
||||||
|
"""Fetch DRM keys for a given KID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
kid: The key identifier string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The DRM key as a string.
|
||||||
|
"""
|
||||||
|
headers = {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Api-Key': os.getenv("API_KEY"),
|
||||||
|
}
|
||||||
|
data = {"service": "oqee", "kid": kid}
|
||||||
|
response = requests.post(
|
||||||
|
os.getenv("API_URL"), headers=headers, json=data, timeout=10
|
||||||
|
)
|
||||||
|
return response.json()["key"]
|
||||||
|
|
||||||
|
|
||||||
|
def parse_mpd_manifest(mpd_content: str) -> Dict[str, Any]:
|
||||||
|
"""Parse an MPD manifest and extract metadata.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
mpd_content: The MPD manifest content as a string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing parsed manifest information.
|
||||||
|
"""
|
||||||
|
root = ET.fromstring(mpd_content)
|
||||||
|
namespaces = {
|
||||||
|
'mpd': 'urn:mpeg:dash:schema:mpd:2011',
|
||||||
|
'cenc': 'urn:mpeg:cenc:2013'
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest_info = {
|
||||||
|
'type': root.get('type'),
|
||||||
|
'profiles': root.get('profiles'),
|
||||||
|
'publishTime': root.get('publishTime'),
|
||||||
|
'availabilityStartTime': root.get('availabilityStartTime'),
|
||||||
|
'minimumUpdatePeriod': root.get('minimumUpdatePeriod'),
|
||||||
|
'minBufferTime': root.get('minBufferTime'),
|
||||||
|
'timeShiftBufferDepth': root.get('timeShiftBufferDepth'),
|
||||||
|
'suggestedPresentationDelay': root.get('suggestedPresentationDelay'),
|
||||||
|
'periods': []
|
||||||
|
}
|
||||||
|
|
||||||
|
for period in root.findall('mpd:Period', namespaces):
|
||||||
|
period_info = {
|
||||||
|
'id': period.get('id'),
|
||||||
|
'start': period.get('start'),
|
||||||
|
'adaptation_sets': []
|
||||||
|
}
|
||||||
|
for adaptation_set in period.findall('mpd:AdaptationSet', namespaces):
|
||||||
|
adaptation_info = parse_adaptation_set(adaptation_set, namespaces)
|
||||||
|
period_info['adaptation_sets'].append(adaptation_info)
|
||||||
|
manifest_info['periods'].append(period_info)
|
||||||
|
return manifest_info
|
||||||
|
|
||||||
|
|
||||||
|
def parse_adaptation_set(
|
||||||
|
adaptation_set: ET.Element, namespaces: Dict[str, str]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Parse an AdaptationSet element from MPD manifest.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
adaptation_set: The AdaptationSet XML element.
|
||||||
|
namespaces: XML namespaces dictionary.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing parsed adaptation set information.
|
||||||
|
"""
|
||||||
|
adaptation_info = {
|
||||||
|
'id': adaptation_set.get('id'),
|
||||||
|
'group': adaptation_set.get('group'),
|
||||||
|
'contentType': adaptation_set.get('contentType'),
|
||||||
|
'lang': adaptation_set.get('lang'),
|
||||||
|
'segmentAlignment': adaptation_set.get('segmentAlignment'),
|
||||||
|
'startWithSAP': adaptation_set.get('startWithSAP'),
|
||||||
|
'drm_info': [],
|
||||||
|
'representations': []
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse ContentProtection
|
||||||
|
for content_protection in adaptation_set.findall(
|
||||||
|
'mpd:ContentProtection', namespaces
|
||||||
|
):
|
||||||
|
drm_info = parse_content_protection(content_protection, namespaces)
|
||||||
|
adaptation_info['drm_info'].append(drm_info)
|
||||||
|
|
||||||
|
# Parse Role
|
||||||
|
role = adaptation_set.find('mpd:Role', namespaces)
|
||||||
|
if role is not None:
|
||||||
|
adaptation_info['role'] = role.get('value')
|
||||||
|
|
||||||
|
# Parse Representations
|
||||||
|
for representation in adaptation_set.findall('mpd:Representation', namespaces):
|
||||||
|
rep_info = parse_representation(representation, namespaces)
|
||||||
|
adaptation_info['representations'].append(rep_info)
|
||||||
|
|
||||||
|
return adaptation_info
|
||||||
|
|
||||||
|
|
||||||
|
def parse_content_protection(
|
||||||
|
content_protection: ET.Element, namespaces: Dict[str, str]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Parse ContentProtection element for DRM information.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
content_protection: The ContentProtection XML element.
|
||||||
|
namespaces: XML namespaces dictionary.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing DRM information.
|
||||||
|
"""
|
||||||
|
drm_info = {
|
||||||
|
'schemeIdUri': content_protection.get('schemeIdUri'),
|
||||||
|
'value': content_protection.get('value')
|
||||||
|
}
|
||||||
|
|
||||||
|
default_kid = content_protection.get('{urn:mpeg:cenc:2013}default_KID')
|
||||||
|
if default_kid:
|
||||||
|
drm_info['default_KID'] = default_kid
|
||||||
|
|
||||||
|
pssh_element = content_protection.find('cenc:pssh', namespaces)
|
||||||
|
if pssh_element is not None and pssh_element.text:
|
||||||
|
drm_info['pssh'] = pssh_element.text.strip()
|
||||||
|
try:
|
||||||
|
pssh_decoded = base64.b64decode(drm_info['pssh'])
|
||||||
|
drm_info['pssh_hex'] = pssh_decoded.hex()
|
||||||
|
except (ValueError, base64.binascii.Error):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return drm_info
|
||||||
|
|
||||||
|
|
||||||
|
def parse_representation(
|
||||||
|
representation: ET.Element, namespaces: Dict[str, str]
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Parse Representation element from MPD manifest.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
representation: The Representation XML element.
|
||||||
|
namespaces: XML namespaces dictionary.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary containing parsed representation information.
|
||||||
|
"""
|
||||||
|
rep_info = {
|
||||||
|
'id': representation.get('id'),
|
||||||
|
'bandwidth': representation.get('bandwidth'),
|
||||||
|
'codecs': representation.get('codecs'),
|
||||||
|
'mimeType': representation.get('mimeType'),
|
||||||
|
'width': representation.get('width'),
|
||||||
|
'height': representation.get('height'),
|
||||||
|
'frameRate': representation.get('frameRate'),
|
||||||
|
'segments': {}
|
||||||
|
}
|
||||||
|
|
||||||
|
segment_template = representation.find('mpd:SegmentTemplate', namespaces)
|
||||||
|
if segment_template is not None:
|
||||||
|
rep_info['segments'] = {
|
||||||
|
'timescale': segment_template.get('timescale'),
|
||||||
|
'initialization': segment_template.get('initialization'),
|
||||||
|
'media': segment_template.get('media'),
|
||||||
|
'timeline': []
|
||||||
|
}
|
||||||
|
|
||||||
|
segment_timeline = segment_template.find('mpd:SegmentTimeline', namespaces)
|
||||||
|
if segment_timeline is not None:
|
||||||
|
for s_element in segment_timeline.findall('mpd:S', namespaces):
|
||||||
|
timeline_info = {
|
||||||
|
't': s_element.get('t'), # start time
|
||||||
|
'd': s_element.get('d'), # duration
|
||||||
|
'r': s_element.get('r') # repeat count
|
||||||
|
}
|
||||||
|
rep_info['segments']['timeline'].append(timeline_info)
|
||||||
|
|
||||||
|
return rep_info
|
||||||
|
|
||||||
|
|
||||||
|
def organize_by_content_type(manifest_info: Dict[str, Any]) -> Dict[str, Any]:
|
||||||
|
"""Organize manifest information by content type.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
manifest_info: Parsed manifest information dictionary.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary organized by content type (video, audio, text).
|
||||||
|
"""
|
||||||
|
organized = {
|
||||||
|
'video': {},
|
||||||
|
'audio': {},
|
||||||
|
'text': {},
|
||||||
|
'manifest_metadata': {
|
||||||
|
'type': manifest_info.get('type'),
|
||||||
|
'publishTime': manifest_info.get('publishTime'),
|
||||||
|
'minBufferTime': manifest_info.get('minBufferTime'),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for period in manifest_info.get('periods', []):
|
||||||
|
for adaptation_set in period.get('adaptation_sets', []):
|
||||||
|
content_type = adaptation_set.get('contentType')
|
||||||
|
|
||||||
|
if not content_type:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for rep in adaptation_set.get('representations', []):
|
||||||
|
track_info = {
|
||||||
|
'track_id': rep.get('id'),
|
||||||
|
'adaptation_set_id': adaptation_set.get('id'),
|
||||||
|
'bandwidth': int(rep.get('bandwidth', 0)),
|
||||||
|
'bitrate_kbps': int(rep.get('bandwidth', 0)) // 1000,
|
||||||
|
'codec': rep.get('codecs'),
|
||||||
|
'mime_type': rep.get('mimeType'),
|
||||||
|
'drm_info': adaptation_set.get('drm_info', []),
|
||||||
|
'segments': rep.get('segments', {}),
|
||||||
|
}
|
||||||
|
|
||||||
|
if content_type == 'video':
|
||||||
|
width = rep.get('width')
|
||||||
|
height = rep.get('height')
|
||||||
|
frame_rate = rep.get('frameRate')
|
||||||
|
|
||||||
|
track_info.update({
|
||||||
|
'resolution': (
|
||||||
|
f"{width}x{height}" if width and height else 'unknown'
|
||||||
|
),
|
||||||
|
'width': int(width) if width else None,
|
||||||
|
'height': int(height) if height else None,
|
||||||
|
'frame_rate': frame_rate,
|
||||||
|
})
|
||||||
|
|
||||||
|
resolution_key = track_info['resolution']
|
||||||
|
if resolution_key not in organized['video']:
|
||||||
|
organized['video'][resolution_key] = []
|
||||||
|
organized['video'][resolution_key].append(track_info)
|
||||||
|
|
||||||
|
elif content_type == 'audio':
|
||||||
|
lang = adaptation_set.get('lang', 'unknown')
|
||||||
|
role = adaptation_set.get('role', 'main')
|
||||||
|
|
||||||
|
track_info.update({
|
||||||
|
'language': lang,
|
||||||
|
'role': role,
|
||||||
|
})
|
||||||
|
|
||||||
|
lang_key = f"{lang}_{role}"
|
||||||
|
if lang_key not in organized['audio']:
|
||||||
|
organized['audio'][lang_key] = []
|
||||||
|
organized['audio'][lang_key].append(track_info)
|
||||||
|
|
||||||
|
elif content_type == 'text':
|
||||||
|
lang = adaptation_set.get('lang', 'unknown')
|
||||||
|
role = adaptation_set.get('role', 'caption')
|
||||||
|
|
||||||
|
track_info.update({
|
||||||
|
'language': lang,
|
||||||
|
'role': role,
|
||||||
|
})
|
||||||
|
|
||||||
|
lang_key = f"{lang}_{role}"
|
||||||
|
if lang_key not in organized['text']:
|
||||||
|
organized['text'][lang_key] = []
|
||||||
|
organized['text'][lang_key].append(track_info)
|
||||||
|
|
||||||
|
# Sort video tracks by resolution (descending) and then by bitrate (descending)
|
||||||
|
for resolution in organized['video']:
|
||||||
|
organized['video'][resolution].sort(
|
||||||
|
key=lambda x: x['bandwidth'], reverse=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sort audio tracks by bitrate (descending)
|
||||||
|
for lang in organized['audio']:
|
||||||
|
organized['audio'][lang].sort(key=lambda x: x['bandwidth'], reverse=True)
|
||||||
|
|
||||||
|
# Sort video resolutions by pixel count (descending)
|
||||||
|
sorted_video = {}
|
||||||
|
for resolution in sorted(
|
||||||
|
organized['video'].keys(),
|
||||||
|
key=lambda r: (
|
||||||
|
int(r.split('x')[0]) * int(r.split('x')[1])
|
||||||
|
if 'x' in r and r.split('x')[0].isdigit() else 0
|
||||||
|
),
|
||||||
|
reverse=True
|
||||||
|
):
|
||||||
|
sorted_video[resolution] = organized['video'][resolution]
|
||||||
|
organized['video'] = sorted_video
|
||||||
|
|
||||||
|
return organized
|
||||||
|
|
||||||
|
|
||||||
|
def get_manifest(manifest_id):
|
||||||
|
"""Fetch the MPD manifest for a given channel ID.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
manifest_id: The channel/manifest identifier.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The manifest content as text.
|
||||||
|
"""
|
||||||
|
headers = {
|
||||||
|
'accept': '*/*',
|
||||||
|
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
|
||||||
|
'cache-control': 'no-cache',
|
||||||
|
'origin': 'https://tv.free.fr',
|
||||||
|
'pragma': 'no-cache',
|
||||||
|
'priority': 'u=1, i',
|
||||||
|
'referer': 'https://tv.free.fr/',
|
||||||
|
'sec-ch-ua': '"Google Chrome";v="143", "Chromium";v="143", "Not A(Brand";v="24"',
|
||||||
|
'sec-ch-ua-mobile': '?0',
|
||||||
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
|
'sec-fetch-dest': 'empty',
|
||||||
|
'sec-fetch-mode': 'cors',
|
||||||
|
'sec-fetch-site': 'cross-site',
|
||||||
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
|
||||||
|
url = (
|
||||||
|
f'https://api-proxad.dc2.oqee.net/playlist/v1/live/'
|
||||||
|
f'{manifest_id}/1/live.mpd'
|
||||||
|
)
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
return response.text
|
||||||
|
|
||||||
|
|
||||||
|
async def fetch_segment(session, ticks, track_id):
|
||||||
|
"""Fetch a media segment asynchronously.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
session: The aiohttp ClientSession.
|
||||||
|
ticks: The tick value for the segment.
|
||||||
|
track_id: The track identifier.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The tick value if successful, None otherwise.
|
||||||
|
"""
|
||||||
|
url = f"https://media.stream.proxad.net/media/{track_id}_{ticks}"
|
||||||
|
headers = {
|
||||||
|
'Accept': '*/*',
|
||||||
|
'Referer': 'https://tv.free.fr/',
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
async with session.get(url, headers=headers) as resp:
|
||||||
|
if resp.status == 200:
|
||||||
|
print(f"✅ {ticks} → 200 OK")
|
||||||
|
return ticks
|
||||||
|
print(f"❌ {ticks} → {resp.status}")
|
||||||
|
return None
|
||||||
|
except aiohttp.ClientError as e:
|
||||||
|
print(f"⚠️ {ticks} → {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_init(track_id):
|
||||||
|
"""Download and save the initialization segment for a track.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
track_id: The track identifier.
|
||||||
|
"""
|
||||||
|
url = f"https://media.stream.proxad.net/media/{track_id}_init"
|
||||||
|
headers = {
|
||||||
|
'Accept': '*/*',
|
||||||
|
'Referer': 'https://tv.free.fr/',
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
response = requests.get(url, headers=headers, timeout=10)
|
||||||
|
if response.status_code == 200:
|
||||||
|
with open(f'segments/segments_{track_id}/init.mp4', 'wb') as f:
|
||||||
|
f.write(response.content)
|
||||||
|
print(f"✅ Saved initialization segment to init_{track_id}.mp4")
|
||||||
|
|
||||||
|
|
||||||
|
async def save_segments(track_id, start_tick, rep_nb, duration):
|
||||||
|
"""Download and save multiple media segments.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
track_id: The track identifier.
|
||||||
|
start_tick: The starting tick value.
|
||||||
|
rep_nb: The number of segments to download.
|
||||||
|
duration: The duration per segment.
|
||||||
|
"""
|
||||||
|
os.makedirs(f'segments/segments_{track_id}', exist_ok=True)
|
||||||
|
|
||||||
|
async def download_segment(session, tick, rep):
|
||||||
|
"""Download a single segment."""
|
||||||
|
url = f"https://media.stream.proxad.net/media/{track_id}_{tick}"
|
||||||
|
headers = {
|
||||||
|
'Accept': '*/*',
|
||||||
|
'Referer': 'https://tv.free.fr/',
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/143.0.0.0 Safari/537.36',
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
async with session.get(url, headers=headers) as resp:
|
||||||
|
if resp.status == 200:
|
||||||
|
content = await resp.read()
|
||||||
|
filename = f"segments/segments_{track_id}/{tick}.m4s"
|
||||||
|
with open(filename, 'wb') as f:
|
||||||
|
f.write(content)
|
||||||
|
print(
|
||||||
|
f"✅ Saved segment {rep} (tick {tick}) to {filename}"
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
print(
|
||||||
|
f"❌ Failed to download segment {rep} (tick {tick}): "
|
||||||
|
f"HTTP {resp.status}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
except aiohttp.ClientError as e:
|
||||||
|
print(f"⚠️ Error downloading segment {rep} (tick {tick}): {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
print(f"\n🎬 Starting download of {rep_nb} segments...")
|
||||||
|
print(f"📦 Track ID: {track_id}")
|
||||||
|
print(f"🎯 Base tick: {start_tick}")
|
||||||
|
print(f"{'='*50}\n")
|
||||||
|
|
||||||
|
start_time = time.time()
|
||||||
|
successful = 0
|
||||||
|
failed = 0
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = []
|
||||||
|
for i in range(rep_nb):
|
||||||
|
tick = start_tick + i * duration
|
||||||
|
tasks.append(download_segment(session, tick, i))
|
||||||
|
|
||||||
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||||
|
successful = sum(1 for r in results if r is True)
|
||||||
|
failed = rep_nb - successful
|
||||||
|
|
||||||
|
end_time = time.time()
|
||||||
|
elapsed = end_time - start_time
|
||||||
|
|
||||||
|
print(f"\n{'='*50}")
|
||||||
|
print(f"✅ Download completed in {elapsed:.2f}s")
|
||||||
|
print(f"📊 Successful: {successful}/{rep_nb}")
|
||||||
|
print(f"❌ Failed: {failed}/{rep_nb}")
|
||||||
|
print(f"💾 Files saved to segments_{track_id}/")
|
||||||
|
print(f"{'='*50}")
|
||||||
|
|
||||||
|
|
||||||
|
def get_kid(track_id):
|
||||||
|
"""Extract the Key ID (KID) from downloaded segments.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
track_id: The track identifier.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The KID as a hex string if found, None otherwise.
|
||||||
|
"""
|
||||||
|
folder = f'segments/segments_{track_id}'
|
||||||
|
for filename in os.listdir(folder):
|
||||||
|
if filename.endswith('.m4s'):
|
||||||
|
filepath = os.path.join(folder, filename)
|
||||||
|
print(f"Checking file: {filepath}")
|
||||||
|
with open(filepath, 'rb') as f:
|
||||||
|
data = f.read()
|
||||||
|
# Pattern before KID
|
||||||
|
index = data.find(
|
||||||
|
b'\x73\x65\x69\x67\x00\x00\x00\x14'
|
||||||
|
b'\x00\x00\x00\x01\x00\x00\x01\x10'
|
||||||
|
)
|
||||||
|
if index != -1:
|
||||||
|
kid_bytes = data[index + 16:index + 16 + 16]
|
||||||
|
kid = kid_bytes.hex()
|
||||||
|
return kid
|
||||||
|
return None
|
||||||
125
utils/times.py
Normal file
125
utils/times.py
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
"""Utility functions for time and tick conversions, and bruteforce operations."""
|
||||||
|
import asyncio
|
||||||
|
import datetime
|
||||||
|
import time
|
||||||
|
|
||||||
|
import aiohttp
|
||||||
|
|
||||||
|
from utils.stream import fetch_segment
|
||||||
|
|
||||||
|
|
||||||
|
def convert_ticks_to_sec(ticks, timescale):
|
||||||
|
"""Convert ticks to seconds."""
|
||||||
|
return ticks / timescale
|
||||||
|
|
||||||
|
|
||||||
|
def convert_sec_to_ticks(seconds, timescale):
|
||||||
|
"""Convert seconds to ticks."""
|
||||||
|
return seconds * timescale
|
||||||
|
|
||||||
|
|
||||||
|
def convert_sec_to_date(seconds, offset_hours=1):
|
||||||
|
"""Convert seconds to datetime with offset."""
|
||||||
|
dt = datetime.datetime.utcfromtimestamp(seconds) + datetime.timedelta(hours=offset_hours)
|
||||||
|
return dt
|
||||||
|
|
||||||
|
|
||||||
|
def convert_date_to_sec(dt, offset_hours=1):
|
||||||
|
"""Convert datetime to seconds with offset."""
|
||||||
|
epoch = datetime.datetime(1970, 1, 1)
|
||||||
|
utc_dt = dt - datetime.timedelta(hours=offset_hours)
|
||||||
|
return (utc_dt - epoch).total_seconds()
|
||||||
|
|
||||||
|
|
||||||
|
def convert_date_to_ticks(dt, timescale, offset_hours=1):
|
||||||
|
"""Convert datetime to ticks with offset."""
|
||||||
|
return int(round(convert_date_to_sec(dt, offset_hours) * timescale))
|
||||||
|
|
||||||
|
|
||||||
|
def past(rep, base, duration):
|
||||||
|
"""Calculate past tick."""
|
||||||
|
return base - rep * duration
|
||||||
|
|
||||||
|
|
||||||
|
def future(rep, base, duration):
|
||||||
|
"""Calculate future tick."""
|
||||||
|
return base + rep * duration
|
||||||
|
|
||||||
|
|
||||||
|
async def bruteforce(track_id, date):
|
||||||
|
"""Bruteforce segments to find valid ticks."""
|
||||||
|
valid_ticks = []
|
||||||
|
|
||||||
|
total_requests = 288000
|
||||||
|
pas = 20000
|
||||||
|
|
||||||
|
for i in range(total_requests // pas):
|
||||||
|
debut = pas * i
|
||||||
|
fin = debut + pas
|
||||||
|
|
||||||
|
segment_num = i + 1
|
||||||
|
total_segments = total_requests // pas
|
||||||
|
print(f"\n🚀 Starting bruteforce segment {segment_num}/{total_segments} "
|
||||||
|
f"(ticks {debut} to {fin})...")
|
||||||
|
|
||||||
|
checked_ticks = set()
|
||||||
|
ticks_to_check = list(range(debut, fin))
|
||||||
|
start_time = time.time()
|
||||||
|
try:
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = [fetch_segment(session, t+date, track_id) for t in ticks_to_check]
|
||||||
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
||||||
|
new_valid = [r for r in results if r and not isinstance(r, Exception)]
|
||||||
|
valid_ticks.extend(new_valid)
|
||||||
|
|
||||||
|
# Mark all checked ticks
|
||||||
|
checked_ticks.update(ticks_to_check)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\n\n🛑 Interrupted by user (Ctrl+C)")
|
||||||
|
# Save progress even if interrupted
|
||||||
|
checked_ticks.update(list(ticks_to_check)) # Mark attempted as checked
|
||||||
|
end_time = time.time()
|
||||||
|
elapsed = end_time - start_time
|
||||||
|
req_per_sec = len(ticks_to_check) / elapsed if elapsed > 0 else 0
|
||||||
|
|
||||||
|
print(f"\n{'='*50}")
|
||||||
|
print(f"✅ Completed in {elapsed:.2f}s")
|
||||||
|
print(f"⚡ Speed: {req_per_sec:.2f} req/s")
|
||||||
|
print(f"📊 Total checked: {len(checked_ticks)}/{total_requests}")
|
||||||
|
print(f"🎯 Valid ticks found: {len(valid_ticks)}")
|
||||||
|
# print(f"💾 Progress saved to {PROGRESS_FILE}")
|
||||||
|
print(f"{'='*50}")
|
||||||
|
if valid_ticks:
|
||||||
|
print("Ticks valides :", valid_ticks)
|
||||||
|
# break from the for loop if valid ticks found
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
def find_nearest_tick_by_hour(base_tick, datetime_str, timescale, duration, offset_hours=1):
|
||||||
|
"""Find the nearest tick for a given datetime."""
|
||||||
|
dt = datetime.datetime.strptime(datetime_str, "%Y-%m-%d %H:%M:%S")
|
||||||
|
target_ticks = convert_date_to_ticks(dt, timescale, offset_hours)
|
||||||
|
diff_ticks = base_tick - target_ticks
|
||||||
|
rep_estimate = diff_ticks / duration
|
||||||
|
|
||||||
|
# Determine if we need to go to past or future
|
||||||
|
if rep_estimate < 0:
|
||||||
|
# Target is in the future from base
|
||||||
|
rep = int(round(abs(rep_estimate)))
|
||||||
|
nearest_tick = base_tick + rep * duration
|
||||||
|
else:
|
||||||
|
# Target is in the past from base
|
||||||
|
rep = int(round(rep_estimate))
|
||||||
|
nearest_tick = base_tick - rep * duration
|
||||||
|
|
||||||
|
nearest_seconds = convert_ticks_to_sec(nearest_tick, timescale)
|
||||||
|
target_seconds = convert_ticks_to_sec(target_ticks, timescale)
|
||||||
|
delta_seconds = abs(nearest_seconds - target_seconds)
|
||||||
|
|
||||||
|
print(f"Requested datetime: {dt} (offset +{offset_hours}h)")
|
||||||
|
print(f"Nearest rep: {rep}")
|
||||||
|
print(f"Tick: {nearest_tick}")
|
||||||
|
print(f"Date: {convert_sec_to_date(nearest_seconds, offset_hours)}")
|
||||||
|
print(f"Difference: {delta_seconds:.2f} seconds")
|
||||||
|
|
||||||
|
return nearest_tick, rep
|
||||||
Reference in New Issue
Block a user