2021-08-28 15:31:36 +02:00
|
|
|
import base64
|
|
|
|
|
import json
|
|
|
|
|
import logging
|
2021-11-19 17:14:40 +01:00
|
|
|
import re
|
2021-09-17 00:35:22 +02:00
|
|
|
from getpass import getpass
|
2021-11-20 16:52:37 +01:00
|
|
|
from dataclasses import dataclass
|
2021-11-19 17:14:40 +01:00
|
|
|
from shutil import copyfileobj
|
|
|
|
|
from xml.etree import ElementTree
|
|
|
|
|
|
|
|
|
|
import ffmpeg
|
2021-11-20 16:52:37 +01:00
|
|
|
from tqdm import tqdm
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
from utils.models import *
|
2021-11-19 17:14:40 +01:00
|
|
|
from utils.utils import sanitise_name, silentremove, download_to_temp, create_temp_filename
|
2022-01-20 01:05:30 +01:00
|
|
|
from .tidal_api import TidalTvSession, TidalApi, SessionStorage, TidalMobileSession, SessionType
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
module_information = ModuleInformation(
|
|
|
|
|
service_name='Tidal',
|
2022-01-20 01:05:30 +01:00
|
|
|
module_supported_modes=ModuleModes.download | ModuleModes.credits | ModuleModes.covers | ModuleModes.lyrics,
|
2021-10-12 15:07:04 +02:00
|
|
|
login_behaviour=ManualEnum.manual,
|
2021-09-17 00:35:22 +02:00
|
|
|
global_settings={
|
2021-11-19 17:14:40 +01:00
|
|
|
'tv_token': '7m7Ap0JC9j1cOM3n',
|
|
|
|
|
'tv_secret': 'vRAdA108tlvkJpTsGZS8rGZ7xTlbJ0qaZ2K9saEzsgY=',
|
2021-10-16 15:09:36 +02:00
|
|
|
'mobile_token': 'dN2N95wCyEBTllu4',
|
|
|
|
|
'enable_mobile': True
|
2021-09-17 00:35:22 +02:00
|
|
|
},
|
2021-10-16 15:09:36 +02:00
|
|
|
session_storage_variables=[SessionType.TV.name, SessionType.MOBILE.name],
|
2021-08-28 15:31:36 +02:00
|
|
|
netlocation_constant='tidal',
|
|
|
|
|
test_url='https://tidal.com/browse/track/92265335'
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
@dataclass
|
|
|
|
|
class AudioTrack:
|
2022-01-20 01:05:30 +01:00
|
|
|
codec: CodecEnum
|
2021-11-19 17:14:40 +01:00
|
|
|
sample_rate: int
|
|
|
|
|
bitrate: int
|
2021-11-25 13:02:28 +01:00
|
|
|
urls: list
|
2021-11-19 17:14:40 +01:00
|
|
|
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
class ModuleInterface:
|
2022-01-20 01:05:30 +01:00
|
|
|
# noinspection PyTypeChecker
|
2021-08-28 15:31:36 +02:00
|
|
|
def __init__(self, module_controller: ModuleController):
|
2021-11-07 18:39:21 +01:00
|
|
|
self.cover_size = module_controller.orpheus_options.default_cover_options.resolution
|
2021-11-20 16:52:37 +01:00
|
|
|
self.oprinter = module_controller.printer_controller
|
|
|
|
|
self.print = module_controller.printer_controller.oprint
|
2022-01-20 01:05:30 +01:00
|
|
|
self.disable_subscription_check = module_controller.orpheus_options.disable_subscription_check
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
settings = module_controller.module_settings
|
|
|
|
|
|
2021-10-16 15:09:36 +02:00
|
|
|
# LOW = 96kbit/s AAC, HIGH = 320kbit/s AAC, LOSSLESS = 44.1/16 FLAC, HI_RES <= 48/24 FLAC with MQA
|
|
|
|
|
self.quality_parse = {
|
|
|
|
|
QualityEnum.LOW: 'LOW',
|
|
|
|
|
QualityEnum.MEDIUM: 'HIGH',
|
|
|
|
|
QualityEnum.HIGH: 'HIGH',
|
|
|
|
|
QualityEnum.LOSSLESS: 'LOSSLESS',
|
|
|
|
|
QualityEnum.HIFI: 'HI_RES'
|
|
|
|
|
}
|
|
|
|
|
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions = {}
|
2021-10-16 15:09:36 +02:00
|
|
|
self.available_sessions = [SessionType.TV.name, SessionType.MOBILE.name]
|
|
|
|
|
|
|
|
|
|
if settings['enable_mobile']:
|
|
|
|
|
storage: SessionStorage = module_controller.temporary_settings_controller.read(SessionType.MOBILE.name)
|
|
|
|
|
if not storage:
|
2022-01-20 01:05:30 +01:00
|
|
|
confirm = input(' "enable_mobile" is enabled but no MOBILE session was found. Do you want to create a '
|
2021-10-16 15:09:36 +02:00
|
|
|
'MOBILE session (used for AC-4/360RA) [Y/n]? ')
|
|
|
|
|
if confirm.upper() == 'N':
|
|
|
|
|
self.available_sessions = [SessionType.TV.name]
|
|
|
|
|
else:
|
|
|
|
|
self.available_sessions = [SessionType.TV.name]
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-10-16 15:09:36 +02:00
|
|
|
for session_type in self.available_sessions:
|
2021-09-17 00:35:22 +02:00
|
|
|
storage: SessionStorage = module_controller.temporary_settings_controller.read(session_type)
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-10-16 15:09:36 +02:00
|
|
|
if session_type == SessionType.TV.name:
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions[session_type] = TidalTvSession(settings['tv_token'], settings['tv_secret'])
|
|
|
|
|
else:
|
|
|
|
|
sessions[session_type] = TidalMobileSession(settings['mobile_token'])
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-09-17 00:35:22 +02:00
|
|
|
if storage:
|
|
|
|
|
logging.debug(f'Tidal: {session_type} session found, loading')
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions[session_type].set_storage(storage)
|
|
|
|
|
else:
|
|
|
|
|
logging.debug(f'Tidal: No {session_type} session found, creating new one')
|
2021-10-16 15:09:36 +02:00
|
|
|
if session_type == SessionType.TV.name:
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions[session_type].auth()
|
|
|
|
|
else:
|
2022-01-20 01:05:30 +01:00
|
|
|
self.print('Tidal: Enter your Tidal username and password:')
|
|
|
|
|
username = input(' Username: ')
|
|
|
|
|
password = getpass(' Password: ')
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions[session_type].auth(username, password)
|
2022-01-20 01:05:30 +01:00
|
|
|
self.print('Successfully logged in!')
|
2021-09-17 00:35:22 +02:00
|
|
|
|
|
|
|
|
module_controller.temporary_settings_controller.set(session_type, sessions[session_type].get_storage())
|
|
|
|
|
|
|
|
|
|
# Always try to refresh session
|
|
|
|
|
if not sessions[session_type].valid():
|
|
|
|
|
sessions[session_type].refresh()
|
|
|
|
|
# Save the refreshed session in the temporary settings
|
2021-09-21 02:13:42 +02:00
|
|
|
module_controller.temporary_settings_controller.set(session_type, sessions[session_type].get_storage())
|
2021-09-17 00:35:22 +02:00
|
|
|
|
|
|
|
|
while True:
|
2022-01-20 01:05:30 +01:00
|
|
|
# check for a valid subscription
|
|
|
|
|
subscription = self.check_subscription(sessions[session_type].get_subscription())
|
|
|
|
|
if subscription:
|
2021-09-17 00:35:22 +02:00
|
|
|
break
|
2022-01-20 01:05:30 +01:00
|
|
|
|
|
|
|
|
confirm = input(' Do you want to create a new session? [Y/n]: ')
|
|
|
|
|
|
|
|
|
|
if confirm.upper() == 'N':
|
|
|
|
|
self.print('Exiting...')
|
|
|
|
|
exit()
|
|
|
|
|
|
|
|
|
|
# create a new session finally
|
|
|
|
|
if session_type == SessionType.TV.name:
|
|
|
|
|
sessions[session_type].auth()
|
|
|
|
|
else:
|
|
|
|
|
self.print('Tidal: Enter your Tidal username and password:')
|
|
|
|
|
username = input('Username: ')
|
|
|
|
|
password = getpass('Password: ')
|
|
|
|
|
sessions[session_type].auth(username, password)
|
|
|
|
|
|
|
|
|
|
module_controller.temporary_settings_controller.set(session_type,
|
|
|
|
|
sessions[session_type].get_storage())
|
2021-09-17 00:35:22 +02:00
|
|
|
|
2021-10-15 23:33:47 +02:00
|
|
|
self.session: TidalApi = TidalApi(sessions)
|
2021-10-12 15:07:04 +02:00
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
# Track cache for credits
|
|
|
|
|
self.track_cache = {}
|
2021-09-17 00:35:22 +02:00
|
|
|
# Album cache
|
|
|
|
|
self.album_cache = {}
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
def check_subscription(self, subscription: str) -> bool:
|
|
|
|
|
# returns true if "disable_subscription_checks" is enabled or subscription is HIFI Plus
|
|
|
|
|
if not self.disable_subscription_check and subscription not in {'HIFI', 'PREMIUM_PLUS'}:
|
|
|
|
|
self.print(f'Tidal: Account is not a HiFi Plus account, detected subscription: {subscription}')
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def generate_artwork_url(cover_id: str, size: int, max_size: int = 1280):
|
2021-11-07 18:39:21 +01:00
|
|
|
# not the best idea, but it rounds the self.cover_size to the nearest number in supported_sizes, 1281 is needed
|
|
|
|
|
# for the "uncompressed" cover
|
|
|
|
|
supported_sizes = [80, 160, 320, 480, 640, 1080, 1280, 1281]
|
2022-01-20 01:05:30 +01:00
|
|
|
best_size = min(supported_sizes, key=lambda x: abs(x - size))
|
2021-11-07 18:39:21 +01:00
|
|
|
# only supports 80x80, 160x160, 320x320, 480x480, 640x640, 1080x1080 and 1280x1280 only for non playlists
|
|
|
|
|
# return "uncompressed" cover if self.cover_resolution > max_size
|
|
|
|
|
image_name = '{0}x{0}.jpg'.format(best_size) if best_size <= max_size else 'origin.jpg'
|
|
|
|
|
return f'https://resources.tidal.com/images/{cover_id.replace("-", "/")}/{image_name}'
|
2021-09-17 18:21:26 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
2022-01-20 01:05:30 +01:00
|
|
|
def generate_animated_artwork_url(cover_id: str, size=1280):
|
2021-09-17 18:21:26 +02:00
|
|
|
return 'https://resources.tidal.com/videos/{0}/{1}x{1}.mp4'.format(cover_id.replace('-', '/'), size)
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-10-15 23:33:47 +02:00
|
|
|
def search(self, query_type: DownloadTypeEnum, query: str, track_info: TrackInfo = None, limit: int = 20):
|
|
|
|
|
results = self.session.get_search_data(query, limit=limit)
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
items = []
|
|
|
|
|
for i in results[query_type.name + 's']['items']:
|
|
|
|
|
if query_type is DownloadTypeEnum.artist:
|
|
|
|
|
name = i['name']
|
|
|
|
|
artists = None
|
2021-09-10 03:07:11 +02:00
|
|
|
year = None
|
2021-08-28 15:31:36 +02:00
|
|
|
elif query_type is DownloadTypeEnum.playlist:
|
|
|
|
|
name = i['title']
|
|
|
|
|
artists = [i['creator']['name']]
|
2021-09-10 03:07:11 +02:00
|
|
|
year = ""
|
2021-08-28 15:31:36 +02:00
|
|
|
elif query_type is DownloadTypeEnum.track:
|
|
|
|
|
name = i['title']
|
|
|
|
|
artists = [j['name'] for j in i['artists']]
|
2021-09-10 03:07:11 +02:00
|
|
|
# Getting the year from the album?
|
|
|
|
|
year = i['album']['releaseDate'][:4]
|
2021-08-28 15:31:36 +02:00
|
|
|
elif query_type is DownloadTypeEnum.album:
|
|
|
|
|
name = i['title']
|
|
|
|
|
artists = [j['name'] for j in i['artists']]
|
2021-09-10 03:07:11 +02:00
|
|
|
year = i['releaseDate'][:4]
|
2021-08-28 15:31:36 +02:00
|
|
|
else:
|
|
|
|
|
raise Exception('Query type is invalid')
|
|
|
|
|
|
2021-09-05 22:17:01 +02:00
|
|
|
additional = None
|
|
|
|
|
if query_type is not DownloadTypeEnum.artist:
|
2021-08-28 15:31:36 +02:00
|
|
|
if i['audioModes'] == ['DOLBY_ATMOS']:
|
|
|
|
|
additional = "Dolby Atmos"
|
|
|
|
|
elif i['audioModes'] == ['SONY_360RA']:
|
|
|
|
|
additional = "360 Reality Audio"
|
|
|
|
|
elif i['audioQuality'] == 'HI_RES':
|
|
|
|
|
additional = "MQA"
|
|
|
|
|
else:
|
|
|
|
|
additional = 'HiFi'
|
|
|
|
|
|
|
|
|
|
item = SearchResult(
|
|
|
|
|
name=name,
|
|
|
|
|
artists=artists,
|
2021-09-10 03:07:11 +02:00
|
|
|
year=year,
|
2021-08-28 15:31:36 +02:00
|
|
|
result_id=str(i['id']),
|
|
|
|
|
explicit=bool(i['explicit']) if 'explicit' in i else None,
|
2021-09-05 22:17:01 +02:00
|
|
|
additional=[additional] if additional else None
|
2021-08-28 15:31:36 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
items.append(item)
|
|
|
|
|
|
|
|
|
|
return items
|
|
|
|
|
|
2021-10-12 15:07:04 +02:00
|
|
|
def get_track_info(self, track_id: str, quality_tier: QualityEnum, codec_options: CodecOptions) -> TrackInfo:
|
2021-08-28 15:31:36 +02:00
|
|
|
track_data = self.session.get_track(track_id)
|
|
|
|
|
|
|
|
|
|
album_id = str(track_data['album']['id'])
|
2021-09-17 00:35:22 +02:00
|
|
|
|
|
|
|
|
# Check if album is already in album cache, add it
|
|
|
|
|
if album_id in self.album_cache:
|
|
|
|
|
album_data = self.album_cache[album_id]
|
|
|
|
|
else:
|
|
|
|
|
album_data = self.session.get_album(album_id)
|
|
|
|
|
|
2021-09-17 18:21:26 +02:00
|
|
|
# Get Sony 360RA and switch to mobile session
|
2021-10-16 15:09:36 +02:00
|
|
|
if track_data['audioModes'] == ['SONY_360RA'] and SessionType.MOBILE.name in self.available_sessions:
|
|
|
|
|
self.session.default = SessionType.MOBILE
|
2021-09-17 00:35:22 +02:00
|
|
|
else:
|
2021-10-16 15:09:36 +02:00
|
|
|
self.session.default = SessionType.TV
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-10-16 15:09:36 +02:00
|
|
|
stream_data = self.session.get_stream_url(track_id, self.quality_parse[quality_tier])
|
2021-11-19 17:14:40 +01:00
|
|
|
# Only needed for MPEG-DASH
|
|
|
|
|
audio_track = None
|
2021-09-05 21:54:23 +02:00
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
if stream_data['manifestMimeType'] == 'application/dash+xml':
|
|
|
|
|
manifest = base64.b64decode(stream_data['manifest'])
|
|
|
|
|
audio_track = self.parse_mpd(manifest)[0] # Only one AudioTrack?
|
|
|
|
|
track_codec = audio_track.codec
|
|
|
|
|
else:
|
|
|
|
|
manifest = json.loads(base64.b64decode(stream_data['manifest']))
|
|
|
|
|
track_codec = CodecEnum['AAC' if 'mp4a' in manifest['codecs'] else manifest['codecs'].upper()]
|
2021-09-05 21:54:23 +02:00
|
|
|
|
|
|
|
|
if not codec_data[track_codec].spatial:
|
|
|
|
|
if not codec_options.proprietary_codecs and codec_data[track_codec].proprietary:
|
2021-11-20 16:52:37 +01:00
|
|
|
self.print(f'Proprietary codecs are disabled, if you want to download {track_codec.name}, '
|
|
|
|
|
f'set "proprietary_codecs": true', drop_level=1)
|
2021-09-05 21:54:23 +02:00
|
|
|
stream_data = self.session.get_stream_url(track_id, 'LOSSLESS')
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
if stream_data['manifestMimeType'] == 'application/dash+xml':
|
|
|
|
|
manifest = base64.b64decode(stream_data['manifest'])
|
|
|
|
|
audio_track = self.parse_mpd(manifest)[0] # Only one AudioTrack?
|
|
|
|
|
track_codec = audio_track.codec
|
|
|
|
|
else:
|
|
|
|
|
manifest = json.loads(base64.b64decode(stream_data['manifest']))
|
|
|
|
|
track_codec = CodecEnum['AAC' if 'mp4a' in manifest['codecs'] else manifest['codecs'].upper()]
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-09-17 18:21:26 +02:00
|
|
|
track_name = track_data["title"]
|
|
|
|
|
track_name += f' ({track_data["version"]})' if track_data['version'] else ''
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
if audio_track:
|
|
|
|
|
download_args = {'audio_track': audio_track}
|
|
|
|
|
else:
|
|
|
|
|
download_args = {'file_url': manifest['urls'][0]}
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
track_info = TrackInfo(
|
2021-10-12 15:07:04 +02:00
|
|
|
name=track_name,
|
|
|
|
|
album=album_data['title'],
|
2021-08-28 15:31:36 +02:00
|
|
|
album_id=album_id,
|
2021-11-11 21:16:29 +01:00
|
|
|
artists=[a['name'] for a in track_data['artists']],
|
2021-08-28 15:31:36 +02:00
|
|
|
artist_id=track_data['artist']['id'],
|
2021-10-12 15:07:04 +02:00
|
|
|
release_year=track_data['streamStartDate'][:4],
|
2021-09-21 02:13:42 +02:00
|
|
|
# TODO: Get correct bit_depth and sample_rate for MQA, even possible?
|
|
|
|
|
bit_depth=24 if track_codec in [CodecEnum.MQA, CodecEnum.EAC3, CodecEnum.MHA1] else 16,
|
|
|
|
|
sample_rate=48 if track_codec in [CodecEnum.EAC3, CodecEnum.MHA1] else 44.1,
|
2022-01-20 01:05:30 +01:00
|
|
|
cover_url=self.generate_artwork_url(track_data['album']['cover'], size=self.cover_size),
|
2021-11-19 17:14:40 +01:00
|
|
|
explicit=track_data['explicit'] if 'explicit' in track_data else None,
|
2021-08-28 15:31:36 +02:00
|
|
|
tags=self.convert_tags(track_data, album_data),
|
2021-10-12 15:07:04 +02:00
|
|
|
codec=track_codec,
|
2022-01-20 01:05:30 +01:00
|
|
|
download_extra_kwargs=download_args,
|
|
|
|
|
lyrics_extra_kwargs={'track_data': track_data}
|
2021-08-28 15:31:36 +02:00
|
|
|
)
|
|
|
|
|
|
2021-09-05 21:54:23 +02:00
|
|
|
if not codec_options.spatial_codecs and codec_data[track_codec].spatial:
|
|
|
|
|
track_info.error = 'Spatial codecs are disabled, if you want to download it, set "spatial_codecs": true'
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
return track_info
|
|
|
|
|
|
2021-10-12 15:07:04 +02:00
|
|
|
@staticmethod
|
2021-11-25 13:02:28 +01:00
|
|
|
def parse_mpd(xml: bytes) -> list:
|
2021-11-19 17:14:40 +01:00
|
|
|
xml = xml.decode('UTF-8')
|
|
|
|
|
# Removes default namespace definition, don't do that!
|
|
|
|
|
xml = re.sub(r'xmlns="[^"]+"', '', xml, count=1)
|
|
|
|
|
root = ElementTree.fromstring(xml)
|
|
|
|
|
|
|
|
|
|
# List of AudioTracks
|
|
|
|
|
tracks = []
|
|
|
|
|
|
|
|
|
|
for period in root.findall('Period'):
|
|
|
|
|
for adaptation_set in period.findall('AdaptationSet'):
|
|
|
|
|
for rep in adaptation_set.findall('Representation'):
|
|
|
|
|
# Check if representation is audio
|
|
|
|
|
content_type = adaptation_set.get('contentType')
|
|
|
|
|
if content_type != 'audio':
|
|
|
|
|
raise ValueError('Only supports audio MPDs!')
|
|
|
|
|
|
|
|
|
|
# Codec checks
|
|
|
|
|
codec = rep.get('codecs').upper()
|
|
|
|
|
if codec.startswith('MP4A'):
|
|
|
|
|
codec = 'AAC'
|
|
|
|
|
|
|
|
|
|
# Segment template
|
|
|
|
|
seg_template = rep.find('SegmentTemplate')
|
|
|
|
|
# Add init file to track_urls
|
|
|
|
|
track_urls = [seg_template.get('initialization')]
|
|
|
|
|
start_number = int(seg_template.get('startNumber') or 1)
|
|
|
|
|
|
|
|
|
|
# https://dashif-documents.azurewebsites.net/Guidelines-TimingModel/master/Guidelines-TimingModel.html#addressing-explicit
|
|
|
|
|
# Also see example 9
|
|
|
|
|
seg_timeline = seg_template.find('SegmentTimeline')
|
|
|
|
|
if seg_timeline is not None:
|
|
|
|
|
seg_time_list = []
|
|
|
|
|
cur_time = 0
|
|
|
|
|
|
|
|
|
|
for s in seg_timeline.findall('S'):
|
|
|
|
|
# Media segments start time
|
|
|
|
|
if s.get('t'):
|
|
|
|
|
cur_time = int(s.get('t'))
|
|
|
|
|
|
|
|
|
|
# Segment reference
|
|
|
|
|
for i in range((int(s.get('r') or 0) + 1)):
|
|
|
|
|
seg_time_list.append(cur_time)
|
|
|
|
|
# Add duration to current time
|
|
|
|
|
cur_time += int(s.get('d'))
|
|
|
|
|
|
|
|
|
|
# Create list with $Number$ indices
|
|
|
|
|
seg_num_list = list(range(start_number, len(seg_time_list) + start_number))
|
|
|
|
|
# Replace $Number$ with all the seg_num_list indices
|
|
|
|
|
track_urls += [seg_template.get('media').replace('$Number$', str(n)) for n in seg_num_list]
|
|
|
|
|
|
|
|
|
|
tracks.append(AudioTrack(
|
|
|
|
|
codec=CodecEnum[codec],
|
|
|
|
|
sample_rate=int(rep.get('audioSamplingRate') or 0),
|
|
|
|
|
bitrate=int(rep.get('bandwidth') or 0),
|
|
|
|
|
urls=track_urls
|
|
|
|
|
))
|
|
|
|
|
|
|
|
|
|
return tracks
|
|
|
|
|
|
2021-11-20 16:52:37 +01:00
|
|
|
def get_track_download(self, file_url: str = None, audio_track: AudioTrack = None) -> TrackDownloadInfo:
|
2022-01-20 01:05:30 +01:00
|
|
|
# no MPEG-DASH, just a simple file
|
2021-11-19 17:14:40 +01:00
|
|
|
if file_url:
|
|
|
|
|
return TrackDownloadInfo(download_type=DownloadEnum.URL, file_url=file_url)
|
|
|
|
|
|
|
|
|
|
# MPEG-DASH
|
2022-01-20 01:05:30 +01:00
|
|
|
# use the total_file size for a better progress bar? Is it even possible to calculate the total size from MPD?
|
2021-11-20 16:52:37 +01:00
|
|
|
try:
|
|
|
|
|
columns = os.get_terminal_size().columns
|
|
|
|
|
if os.name == 'nt':
|
2021-11-25 13:02:28 +01:00
|
|
|
bar = tqdm(audio_track.urls, ncols=(columns - self.oprinter.indent_number),
|
2021-11-20 16:52:37 +01:00
|
|
|
bar_format=' ' * self.oprinter.indent_number + '{l_bar}{bar}{r_bar}')
|
|
|
|
|
else:
|
|
|
|
|
raise OSError
|
|
|
|
|
except OSError:
|
|
|
|
|
bar = tqdm(audio_track.urls, bar_format=' ' * self.oprinter.indent_number + '{l_bar}{bar}{r_bar}')
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# download all segments and save the locations inside temp_locations
|
2021-11-25 13:02:28 +01:00
|
|
|
temp_locations = []
|
2021-11-20 16:52:37 +01:00
|
|
|
for download_url in bar:
|
|
|
|
|
temp_locations.append(download_to_temp(download_url, extension='mp4'))
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# concatenated/Merged .mp4 file
|
2021-11-19 17:14:40 +01:00
|
|
|
merged_temp_location = create_temp_filename() + '.mp4'
|
2022-01-20 01:05:30 +01:00
|
|
|
# actual converted .flac file
|
2021-11-19 17:14:40 +01:00
|
|
|
output_location = create_temp_filename() + '.' + codec_data[audio_track.codec].container.name
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# download is finished, merge chunks into 1 file
|
2021-11-19 17:14:40 +01:00
|
|
|
with open(merged_temp_location, 'wb') as dest_file:
|
|
|
|
|
for temp_location in temp_locations:
|
|
|
|
|
with open(temp_location, 'rb') as segment_file:
|
|
|
|
|
copyfileobj(segment_file, dest_file)
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# convert .mp4 back to .flac
|
2021-11-19 17:14:40 +01:00
|
|
|
try:
|
|
|
|
|
ffmpeg.input(merged_temp_location, hide_banner=None, y=None).output(output_location, acodec='copy',
|
|
|
|
|
loglevel='error').run()
|
|
|
|
|
# Remove all files
|
|
|
|
|
silentremove(merged_temp_location)
|
|
|
|
|
for temp_location in temp_locations:
|
|
|
|
|
silentremove(temp_location)
|
|
|
|
|
except Exception:
|
2022-01-20 01:05:30 +01:00
|
|
|
self.print('FFmpeg is not installed or working! Using fallback, may have errors')
|
|
|
|
|
|
|
|
|
|
# return the MP4 temp file, but tell orpheus to change the container to .m4a (AAC)
|
|
|
|
|
return TrackDownloadInfo(
|
|
|
|
|
download_type=DownloadEnum.TEMP_FILE_PATH,
|
|
|
|
|
temp_file_path=merged_temp_location,
|
|
|
|
|
different_codec=CodecEnum.AAC
|
|
|
|
|
)
|
2021-11-19 17:14:40 +01:00
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# return the converted flac file now
|
2021-11-19 17:14:40 +01:00
|
|
|
return TrackDownloadInfo(
|
|
|
|
|
download_type=DownloadEnum.TEMP_FILE_PATH,
|
2022-01-20 01:05:30 +01:00
|
|
|
temp_file_path=output_location,
|
2021-11-19 17:14:40 +01:00
|
|
|
)
|
2021-10-12 15:07:04 +02:00
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
def get_track_cover(self, track_id: str, cover_options: CoverOptions, data=None) -> CoverInfo:
|
|
|
|
|
if data is None:
|
|
|
|
|
data = {}
|
|
|
|
|
|
|
|
|
|
track_data = data[track_id] if track_id in data else self.session.get_track(track_id)
|
|
|
|
|
cover_id = track_data['album']['cover']
|
|
|
|
|
|
|
|
|
|
# Tidal don't support PNG, so it will always get JPG
|
|
|
|
|
cover_url = self.generate_artwork_url(cover_id, size=cover_options.resolution)
|
|
|
|
|
return CoverInfo(url=cover_url, file_type=ImageFileTypeEnum.jpg)
|
|
|
|
|
|
|
|
|
|
def get_track_lyrics(self, track_id: str, track_data: dict) -> LyricsInfo:
|
2021-08-28 15:31:36 +02:00
|
|
|
embedded, synced = None, None
|
|
|
|
|
|
|
|
|
|
lyrics_data = self.session.get_lyrics(track_id)
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
if 'error' in lyrics_data:
|
|
|
|
|
# search for title and artist to find a matching track (non Atmos)
|
|
|
|
|
results = self.search(
|
|
|
|
|
DownloadTypeEnum.track,
|
|
|
|
|
f'{track_data["title"]} {"".join(a["name"] for a in track_data["artists"])}',
|
|
|
|
|
limit=10)
|
|
|
|
|
|
|
|
|
|
# check every result to find a matching result
|
|
|
|
|
best_tracks = [r.result_id for r in results
|
|
|
|
|
if r.name == track_data['title'] and
|
|
|
|
|
r.artists[0] == track_data['artist']['name'] and
|
|
|
|
|
'Dolby Atmos' not in r.additional]
|
|
|
|
|
|
|
|
|
|
# retrieve the lyrics for the first one, otherwise return empty dict
|
|
|
|
|
lyrics_data = self.session.get_lyrics(best_tracks[0]) if len(best_tracks) > 0 else {}
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
if 'lyrics' in lyrics_data:
|
|
|
|
|
embedded = lyrics_data['lyrics']
|
|
|
|
|
|
|
|
|
|
if 'subtitles' in lyrics_data:
|
|
|
|
|
synced = lyrics_data['subtitles']
|
|
|
|
|
|
|
|
|
|
return LyricsInfo(
|
|
|
|
|
embedded=embedded,
|
|
|
|
|
synced=synced
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def get_playlist_info(self, playlist_id: str) -> PlaylistInfo:
|
|
|
|
|
playlist_data = self.session.get_playlist(playlist_id)
|
|
|
|
|
playlist_tracks = self.session.get_playlist_items(playlist_id)
|
|
|
|
|
|
|
|
|
|
tracks = [track['item']['id'] for track in playlist_tracks['items'] if track['type'] == 'track']
|
|
|
|
|
|
|
|
|
|
if 'name' in playlist_data['creator']:
|
|
|
|
|
creator_name = playlist_data['creator']['name']
|
|
|
|
|
elif playlist_data['creator']['id'] == 0:
|
|
|
|
|
creator_name = 'TIDAL'
|
|
|
|
|
else:
|
|
|
|
|
creator_name = 'Unknown'
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
return PlaylistInfo(
|
2021-10-12 15:07:04 +02:00
|
|
|
name=playlist_data['title'],
|
|
|
|
|
creator=creator_name,
|
2021-08-28 15:31:36 +02:00
|
|
|
tracks=tracks,
|
2021-10-12 15:07:04 +02:00
|
|
|
# TODO: Use playlist creation date or lastUpdated?
|
|
|
|
|
release_year=playlist_data['created'][:4],
|
|
|
|
|
creator_id=playlist_data['creator']['id'],
|
2022-01-20 01:05:30 +01:00
|
|
|
cover_url=self.generate_artwork_url(playlist_data['squareImage'], size=self.cover_size, max_size=1080)
|
2021-08-28 15:31:36 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def get_album_info(self, album_id):
|
2021-09-17 00:35:22 +02:00
|
|
|
# Check if album is already in album cache, add it
|
|
|
|
|
if album_id in self.album_cache:
|
|
|
|
|
album_data = self.album_cache[album_id]
|
|
|
|
|
else:
|
|
|
|
|
album_data = self.session.get_album(album_id)
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
# Get all album tracks with corresponding credits
|
|
|
|
|
tracks_data = self.session.get_album_contributors(album_id)
|
|
|
|
|
|
|
|
|
|
tracks = [str(track['item']['id']) for track in tracks_data['items']]
|
|
|
|
|
|
|
|
|
|
# Cache all track (+credits) in track_cache
|
|
|
|
|
self.track_cache.update({str(track['item']['id']): track for track in tracks_data['items']})
|
|
|
|
|
|
2021-09-21 02:13:42 +02:00
|
|
|
if album_data['audioModes'] == ['DOLBY_ATMOS']:
|
|
|
|
|
quality = 'Dolby Atmos'
|
|
|
|
|
elif album_data['audioModes'] == ['SONY_360RA']:
|
|
|
|
|
quality = '360'
|
|
|
|
|
elif album_data['audioQuality'] == 'HI_RES':
|
|
|
|
|
quality = 'M'
|
|
|
|
|
else:
|
|
|
|
|
quality = None
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
return AlbumInfo(
|
2021-10-12 15:07:04 +02:00
|
|
|
name=album_data['title'],
|
|
|
|
|
release_year=album_data['releaseDate'][:4],
|
2021-09-05 22:17:01 +02:00
|
|
|
explicit=album_data['explicit'],
|
2021-09-21 02:13:42 +02:00
|
|
|
quality=quality,
|
2021-11-27 12:18:37 +01:00
|
|
|
upc=album_data['upc'],
|
2022-01-20 01:05:30 +01:00
|
|
|
all_track_cover_jpg_url=self.generate_artwork_url(album_data['cover'],
|
|
|
|
|
size=self.cover_size) if album_data['cover'] else None,
|
2021-10-16 15:09:36 +02:00
|
|
|
animated_cover_url=self.generate_animated_artwork_url(album_data['videoCover']) if album_data[
|
|
|
|
|
'videoCover'] else None,
|
2021-10-12 15:07:04 +02:00
|
|
|
artist=album_data['artist']['name'],
|
2021-08-28 15:31:36 +02:00
|
|
|
artist_id=album_data['artist']['id'],
|
|
|
|
|
tracks=tracks,
|
|
|
|
|
)
|
|
|
|
|
|
2021-10-21 18:33:10 +02:00
|
|
|
def get_artist_info(self, artist_id: str, get_credited_albums: bool) -> ArtistInfo:
|
2021-08-28 15:31:36 +02:00
|
|
|
artist_data = self.session.get_artist(artist_id)
|
|
|
|
|
|
|
|
|
|
artist_albums = self.session.get_artist_albums(artist_id)['items']
|
|
|
|
|
artist_singles = self.session.get_artist_albums_ep_singles(artist_id)['items']
|
|
|
|
|
|
2021-10-21 18:33:10 +02:00
|
|
|
# Only works with a mobile session, annoying, never do this again
|
|
|
|
|
credit_albums = []
|
|
|
|
|
if get_credited_albums and SessionType.MOBILE.name in self.available_sessions:
|
|
|
|
|
self.session.default = SessionType.MOBILE
|
|
|
|
|
credited_albums_page = self.session.get_page('contributor', params={'artistId': artist_id})
|
|
|
|
|
|
|
|
|
|
# This is so retarded
|
|
|
|
|
page_list = credited_albums_page['rows'][-1]['modules'][0]['pagedList']
|
|
|
|
|
total_items = page_list['totalNumberOfItems']
|
|
|
|
|
more_items_link = page_list['dataApiPath'][6:]
|
|
|
|
|
|
|
|
|
|
# Now fetch all the found total_items
|
|
|
|
|
items = []
|
|
|
|
|
for offset in range(0, total_items // 50 + 1):
|
|
|
|
|
print(f'Fetching {offset * 50}/{total_items}', end='\r')
|
|
|
|
|
items += self.session.get_page(more_items_link, params={'limit': 50, 'offset': offset * 50})['items']
|
|
|
|
|
|
|
|
|
|
credit_albums = [item['item']['album'] for item in items]
|
|
|
|
|
self.session.default = SessionType.TV
|
|
|
|
|
|
|
|
|
|
albums = [str(album['id']) for album in artist_albums + artist_singles + credit_albums]
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
return ArtistInfo(
|
2021-10-12 15:07:04 +02:00
|
|
|
name=artist_data['name'],
|
2021-08-28 15:31:36 +02:00
|
|
|
albums=albums
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def get_track_credits(self, track_id: str) -> Optional[list]:
|
|
|
|
|
credits_dict = {}
|
|
|
|
|
|
|
|
|
|
# Fetch credits from cache if not fetch those credits
|
|
|
|
|
if track_id in self.track_cache:
|
|
|
|
|
track_contributors = self.track_cache[track_id]['credits']
|
|
|
|
|
|
|
|
|
|
for contributor in track_contributors:
|
|
|
|
|
credits_dict[contributor['type']] = [c['name'] for c in contributor['contributors']]
|
|
|
|
|
else:
|
|
|
|
|
track_contributors = self.session.get_track_contributors(track_id)['items']
|
|
|
|
|
|
|
|
|
|
if len(track_contributors) > 0:
|
|
|
|
|
for contributor in track_contributors:
|
|
|
|
|
# Check if the dict contains no list, create one
|
|
|
|
|
if contributor['role'] not in credits_dict:
|
|
|
|
|
credits_dict[contributor['role']] = []
|
|
|
|
|
|
|
|
|
|
credits_dict[contributor['role']].append(contributor['name'])
|
|
|
|
|
|
|
|
|
|
if len(credits_dict) > 0:
|
|
|
|
|
# Convert the dictionary back to a list of CreditsInfo
|
|
|
|
|
return [CreditsInfo(sanitise_name(k), v) for k, v in credits_dict.items()]
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def convert_tags(track_data: dict, album_data: dict) -> Tags:
|
2021-09-17 18:21:26 +02:00
|
|
|
track_name = track_data["title"]
|
|
|
|
|
track_name += f' ({track_data["version"]})' if track_data['version'] else ''
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
return Tags(
|
2021-08-28 15:31:36 +02:00
|
|
|
album_artist=album_data['artist']['name'],
|
|
|
|
|
track_number=track_data['trackNumber'],
|
|
|
|
|
total_tracks=album_data['numberOfTracks'],
|
|
|
|
|
disc_number=track_data['volumeNumber'],
|
|
|
|
|
total_discs=album_data['numberOfVolumes'],
|
|
|
|
|
isrc=track_data['isrc'],
|
2021-11-27 12:18:37 +01:00
|
|
|
upc=album_data['upc'],
|
2021-11-27 02:29:02 +01:00
|
|
|
release_date=album_data['releaseDate'] if 'releaseDate' in album_data else None,
|
2021-08-28 15:31:36 +02:00
|
|
|
copyright=track_data['copyright'],
|
|
|
|
|
replay_gain=track_data['replayGain'],
|
|
|
|
|
replay_peak=track_data['peak']
|
|
|
|
|
)
|