2021-08-28 15:31:36 +02:00
|
|
|
import base64
|
|
|
|
|
import json
|
|
|
|
|
import logging
|
2021-11-19 17:14:40 +01:00
|
|
|
import re
|
2022-02-02 00:27:28 +01:00
|
|
|
from datetime import datetime
|
2021-09-17 00:35:22 +02:00
|
|
|
from getpass import getpass
|
2021-11-20 16:52:37 +01:00
|
|
|
from dataclasses import dataclass
|
2021-11-19 17:14:40 +01:00
|
|
|
from shutil import copyfileobj
|
|
|
|
|
from xml.etree import ElementTree
|
|
|
|
|
|
|
|
|
|
import ffmpeg
|
2021-11-20 16:52:37 +01:00
|
|
|
from tqdm import tqdm
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
from utils.models import *
|
2022-02-03 15:50:16 +01:00
|
|
|
from utils.utils import sanitise_name, silentremove, download_to_temp, create_temp_filename, create_requests_session
|
2022-02-02 00:27:28 +01:00
|
|
|
from .mqa_identifier_python.mqa_identifier import MqaIdentifier
|
2022-02-14 01:10:06 +01:00
|
|
|
from .tidal_api import TidalTvSession, TidalApi, SessionStorage, TidalMobileSession, SessionType, TidalError
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
module_information = ModuleInformation(
|
|
|
|
|
service_name='Tidal',
|
2022-01-20 01:05:30 +01:00
|
|
|
module_supported_modes=ModuleModes.download | ModuleModes.credits | ModuleModes.covers | ModuleModes.lyrics,
|
2021-10-12 15:07:04 +02:00
|
|
|
login_behaviour=ManualEnum.manual,
|
2021-09-17 00:35:22 +02:00
|
|
|
global_settings={
|
2021-11-19 17:14:40 +01:00
|
|
|
'tv_token': '7m7Ap0JC9j1cOM3n',
|
|
|
|
|
'tv_secret': 'vRAdA108tlvkJpTsGZS8rGZ7xTlbJ0qaZ2K9saEzsgY=',
|
2022-02-07 20:14:41 +01:00
|
|
|
'mobile_atmos_token': 'dN2N95wCyEBTllu4',
|
|
|
|
|
'mobile_default_token': 'WAU9gXp3tHhK4Nns',
|
2022-01-31 00:51:37 +01:00
|
|
|
'enable_mobile': True,
|
2022-02-07 20:14:41 +01:00
|
|
|
'force_non_spatial': False,
|
2022-02-02 00:27:28 +01:00
|
|
|
'prefer_ac4': False,
|
|
|
|
|
'fix_mqa': True
|
2021-09-17 00:35:22 +02:00
|
|
|
},
|
2022-02-07 20:14:41 +01:00
|
|
|
session_storage_variables=['sessions'],
|
2021-08-28 15:31:36 +02:00
|
|
|
netlocation_constant='tidal',
|
|
|
|
|
test_url='https://tidal.com/browse/track/92265335'
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
@dataclass
|
|
|
|
|
class AudioTrack:
|
2022-01-20 01:05:30 +01:00
|
|
|
codec: CodecEnum
|
2021-11-19 17:14:40 +01:00
|
|
|
sample_rate: int
|
|
|
|
|
bitrate: int
|
2021-11-25 13:02:28 +01:00
|
|
|
urls: list
|
2021-11-19 17:14:40 +01:00
|
|
|
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
class ModuleInterface:
|
2022-01-20 01:05:30 +01:00
|
|
|
# noinspection PyTypeChecker
|
2021-08-28 15:31:36 +02:00
|
|
|
def __init__(self, module_controller: ModuleController):
|
2021-11-07 18:39:21 +01:00
|
|
|
self.cover_size = module_controller.orpheus_options.default_cover_options.resolution
|
2021-11-20 16:52:37 +01:00
|
|
|
self.oprinter = module_controller.printer_controller
|
|
|
|
|
self.print = module_controller.printer_controller.oprint
|
2022-01-20 01:05:30 +01:00
|
|
|
self.disable_subscription_check = module_controller.orpheus_options.disable_subscription_check
|
2022-02-02 00:27:28 +01:00
|
|
|
self.settings = module_controller.module_settings
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-10-16 15:09:36 +02:00
|
|
|
# LOW = 96kbit/s AAC, HIGH = 320kbit/s AAC, LOSSLESS = 44.1/16 FLAC, HI_RES <= 48/24 FLAC with MQA
|
|
|
|
|
self.quality_parse = {
|
|
|
|
|
QualityEnum.LOW: 'LOW',
|
|
|
|
|
QualityEnum.MEDIUM: 'HIGH',
|
|
|
|
|
QualityEnum.HIGH: 'HIGH',
|
|
|
|
|
QualityEnum.LOSSLESS: 'LOSSLESS',
|
|
|
|
|
QualityEnum.HIFI: 'HI_RES'
|
|
|
|
|
}
|
|
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
# save all the TidalSession objects
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions = {}
|
2022-02-07 20:14:41 +01:00
|
|
|
self.available_sessions = [SessionType.TV.name, SessionType.MOBILE_DEFAULT.name, SessionType.MOBILE_ATMOS.name]
|
|
|
|
|
|
|
|
|
|
# load all saved sessions (TV, Mobile Atmos, Mobile Default)
|
|
|
|
|
saved_sessions = module_controller.temporary_settings_controller.read('sessions')
|
|
|
|
|
if not saved_sessions:
|
|
|
|
|
saved_sessions = {}
|
2021-10-16 15:09:36 +02:00
|
|
|
|
2022-02-02 00:27:28 +01:00
|
|
|
if self.settings['enable_mobile']:
|
2022-02-07 20:14:41 +01:00
|
|
|
# check all saved session for a session starting with "MOBILE"
|
|
|
|
|
if not any(session for session in saved_sessions.keys() if session[:6] == 'MOBILE'):
|
2022-01-20 01:05:30 +01:00
|
|
|
confirm = input(' "enable_mobile" is enabled but no MOBILE session was found. Do you want to create a '
|
2021-10-16 15:09:36 +02:00
|
|
|
'MOBILE session (used for AC-4/360RA) [Y/n]? ')
|
|
|
|
|
if confirm.upper() == 'N':
|
|
|
|
|
self.available_sessions = [SessionType.TV.name]
|
|
|
|
|
else:
|
|
|
|
|
self.available_sessions = [SessionType.TV.name]
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
username, password = None, None
|
2021-10-16 15:09:36 +02:00
|
|
|
for session_type in self.available_sessions:
|
2022-02-07 20:14:41 +01:00
|
|
|
# create all sessions with the needed API keys
|
2021-10-16 15:09:36 +02:00
|
|
|
if session_type == SessionType.TV.name:
|
2022-02-02 00:27:28 +01:00
|
|
|
sessions[session_type] = TidalTvSession(self.settings['tv_token'], self.settings['tv_secret'])
|
2022-02-07 20:14:41 +01:00
|
|
|
elif session_type == SessionType.MOBILE_ATMOS.name:
|
|
|
|
|
sessions[session_type] = TidalMobileSession(self.settings['mobile_atmos_token'])
|
2021-09-17 00:35:22 +02:00
|
|
|
else:
|
2022-02-07 20:14:41 +01:00
|
|
|
sessions[session_type] = TidalMobileSession(self.settings['mobile_default_token'])
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
if session_type in saved_sessions:
|
2021-09-17 00:35:22 +02:00
|
|
|
logging.debug(f'Tidal: {session_type} session found, loading')
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
# load the dictionary from the temporary_settings_controller inside the TidalSession class
|
|
|
|
|
sessions[session_type].set_storage(saved_sessions[session_type])
|
2021-09-17 00:35:22 +02:00
|
|
|
else:
|
|
|
|
|
logging.debug(f'Tidal: No {session_type} session found, creating new one')
|
2021-10-16 15:09:36 +02:00
|
|
|
if session_type == SessionType.TV.name:
|
2022-02-07 20:14:41 +01:00
|
|
|
self.print('Tidal: Creating a TV session')
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions[session_type].auth()
|
|
|
|
|
else:
|
2022-02-07 20:14:41 +01:00
|
|
|
if not username or not password:
|
|
|
|
|
self.print('Tidal: Creating a Mobile session')
|
|
|
|
|
self.print('Tidal: Enter your Tidal username and password:')
|
|
|
|
|
username = input(' Username: ')
|
|
|
|
|
password = getpass(' Password: ')
|
2021-09-17 00:35:22 +02:00
|
|
|
sessions[session_type].auth(username, password)
|
2022-02-07 20:14:41 +01:00
|
|
|
self.print(f'Successfully logged in, using {session_type} token!')
|
2021-09-17 00:35:22 +02:00
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
# get the dict representation from the TidalSession object and save it into saved_session/loginstorage
|
|
|
|
|
saved_sessions[session_type] = sessions[session_type].get_storage()
|
|
|
|
|
module_controller.temporary_settings_controller.set('sessions', saved_sessions)
|
2021-09-17 00:35:22 +02:00
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
# always try to refresh session
|
2021-09-17 00:35:22 +02:00
|
|
|
if not sessions[session_type].valid():
|
|
|
|
|
sessions[session_type].refresh()
|
|
|
|
|
# Save the refreshed session in the temporary settings
|
2022-02-07 20:14:41 +01:00
|
|
|
saved_sessions[session_type] = sessions[session_type].get_storage()
|
|
|
|
|
module_controller.temporary_settings_controller.set('sessions', saved_sessions)
|
2021-09-17 00:35:22 +02:00
|
|
|
|
|
|
|
|
while True:
|
2022-01-20 01:05:30 +01:00
|
|
|
# check for a valid subscription
|
|
|
|
|
subscription = self.check_subscription(sessions[session_type].get_subscription())
|
|
|
|
|
if subscription:
|
2021-09-17 00:35:22 +02:00
|
|
|
break
|
2022-01-20 01:05:30 +01:00
|
|
|
|
|
|
|
|
confirm = input(' Do you want to create a new session? [Y/n]: ')
|
|
|
|
|
|
|
|
|
|
if confirm.upper() == 'N':
|
|
|
|
|
self.print('Exiting...')
|
|
|
|
|
exit()
|
|
|
|
|
|
|
|
|
|
# create a new session finally
|
|
|
|
|
if session_type == SessionType.TV.name:
|
2022-02-07 20:14:41 +01:00
|
|
|
self.print('Tidal: Recreating a TV session')
|
2022-01-20 01:05:30 +01:00
|
|
|
sessions[session_type].auth()
|
|
|
|
|
else:
|
2022-02-07 20:14:41 +01:00
|
|
|
self.print('Tidal: Recreating a Mobile session')
|
2022-01-20 01:05:30 +01:00
|
|
|
self.print('Tidal: Enter your Tidal username and password:')
|
|
|
|
|
username = input('Username: ')
|
|
|
|
|
password = getpass('Password: ')
|
|
|
|
|
sessions[session_type].auth(username, password)
|
|
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
saved_sessions[session_type] = sessions[session_type].get_storage()
|
|
|
|
|
module_controller.temporary_settings_controller.set('sessions', saved_sessions)
|
|
|
|
|
|
|
|
|
|
# reset username and password
|
|
|
|
|
username, password = None, None
|
2021-09-17 00:35:22 +02:00
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
# load the Tidal session with all saved sessions (TV, Mobile Atmos, Mobile Default)
|
2021-10-15 23:33:47 +02:00
|
|
|
self.session: TidalApi = TidalApi(sessions)
|
2021-10-12 15:07:04 +02:00
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
def check_subscription(self, subscription: str) -> bool:
|
|
|
|
|
# returns true if "disable_subscription_checks" is enabled or subscription is HIFI Plus
|
|
|
|
|
if not self.disable_subscription_check and subscription not in {'HIFI', 'PREMIUM_PLUS'}:
|
|
|
|
|
self.print(f'Tidal: Account is not a HiFi Plus account, detected subscription: {subscription}')
|
|
|
|
|
return False
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def generate_artwork_url(cover_id: str, size: int, max_size: int = 1280):
|
2021-11-07 18:39:21 +01:00
|
|
|
# not the best idea, but it rounds the self.cover_size to the nearest number in supported_sizes, 1281 is needed
|
|
|
|
|
# for the "uncompressed" cover
|
|
|
|
|
supported_sizes = [80, 160, 320, 480, 640, 1080, 1280, 1281]
|
2022-01-20 01:05:30 +01:00
|
|
|
best_size = min(supported_sizes, key=lambda x: abs(x - size))
|
2021-11-07 18:39:21 +01:00
|
|
|
# only supports 80x80, 160x160, 320x320, 480x480, 640x640, 1080x1080 and 1280x1280 only for non playlists
|
|
|
|
|
# return "uncompressed" cover if self.cover_resolution > max_size
|
|
|
|
|
image_name = '{0}x{0}.jpg'.format(best_size) if best_size <= max_size else 'origin.jpg'
|
|
|
|
|
return f'https://resources.tidal.com/images/{cover_id.replace("-", "/")}/{image_name}'
|
2021-09-17 18:21:26 +02:00
|
|
|
|
|
|
|
|
@staticmethod
|
2022-01-20 01:05:30 +01:00
|
|
|
def generate_animated_artwork_url(cover_id: str, size=1280):
|
2021-09-17 18:21:26 +02:00
|
|
|
return 'https://resources.tidal.com/videos/{0}/{1}x{1}.mp4'.format(cover_id.replace('-', '/'), size)
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-10-15 23:33:47 +02:00
|
|
|
def search(self, query_type: DownloadTypeEnum, query: str, track_info: TrackInfo = None, limit: int = 20):
|
|
|
|
|
results = self.session.get_search_data(query, limit=limit)
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
items = []
|
2022-02-06 02:50:05 +01:00
|
|
|
for i in results[query_type.name + 's'].get('items'):
|
2021-08-28 15:31:36 +02:00
|
|
|
if query_type is DownloadTypeEnum.artist:
|
2022-02-06 02:50:05 +01:00
|
|
|
name = i.get('name')
|
2021-08-28 15:31:36 +02:00
|
|
|
artists = None
|
2021-09-10 03:07:11 +02:00
|
|
|
year = None
|
2021-08-28 15:31:36 +02:00
|
|
|
elif query_type is DownloadTypeEnum.playlist:
|
2022-02-06 02:50:05 +01:00
|
|
|
name = i.get('title')
|
|
|
|
|
artists = [i.get('creator').get('name')]
|
2021-09-10 03:07:11 +02:00
|
|
|
year = ""
|
2021-08-28 15:31:36 +02:00
|
|
|
elif query_type is DownloadTypeEnum.track:
|
2022-02-06 02:50:05 +01:00
|
|
|
name = i.get('title')
|
|
|
|
|
artists = [j.get('name') for j in i.get('artists')]
|
2021-09-10 03:07:11 +02:00
|
|
|
# Getting the year from the album?
|
2022-02-06 02:50:05 +01:00
|
|
|
year = i.get('album').get('releaseDate')[:4]
|
2021-08-28 15:31:36 +02:00
|
|
|
elif query_type is DownloadTypeEnum.album:
|
2022-02-06 02:50:05 +01:00
|
|
|
name = i.get('title')
|
|
|
|
|
artists = [j.get('name') for j in i.get('artists')]
|
|
|
|
|
year = i.get('releaseDate')[:4]
|
2021-08-28 15:31:36 +02:00
|
|
|
else:
|
|
|
|
|
raise Exception('Query type is invalid')
|
|
|
|
|
|
2021-09-05 22:17:01 +02:00
|
|
|
additional = None
|
|
|
|
|
if query_type is not DownloadTypeEnum.artist:
|
2022-02-06 02:50:05 +01:00
|
|
|
if i.get('audioModes') == ['DOLBY_ATMOS']:
|
2021-08-28 15:31:36 +02:00
|
|
|
additional = "Dolby Atmos"
|
2022-02-06 02:50:05 +01:00
|
|
|
elif i.get('audioModes') == ['SONY_360RA']:
|
2021-08-28 15:31:36 +02:00
|
|
|
additional = "360 Reality Audio"
|
2022-02-06 02:50:05 +01:00
|
|
|
elif i.get('audioQuality') == 'HI_RES':
|
2021-08-28 15:31:36 +02:00
|
|
|
additional = "MQA"
|
|
|
|
|
else:
|
|
|
|
|
additional = 'HiFi'
|
|
|
|
|
|
|
|
|
|
item = SearchResult(
|
|
|
|
|
name=name,
|
|
|
|
|
artists=artists,
|
2021-09-10 03:07:11 +02:00
|
|
|
year=year,
|
2022-02-06 02:50:05 +01:00
|
|
|
result_id=str(i.get('id')),
|
|
|
|
|
explicit=i.get('explicit'),
|
2021-09-05 22:17:01 +02:00
|
|
|
additional=[additional] if additional else None
|
2021-08-28 15:31:36 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
items.append(item)
|
|
|
|
|
|
|
|
|
|
return items
|
|
|
|
|
|
2022-01-20 21:27:00 +01:00
|
|
|
def get_playlist_info(self, playlist_id: str) -> PlaylistInfo:
|
|
|
|
|
playlist_data = self.session.get_playlist(playlist_id)
|
|
|
|
|
playlist_tracks = self.session.get_playlist_items(playlist_id)
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
tracks = [track.get('item').get('id') for track in playlist_tracks.get('items') if track.get('type') == 'track']
|
2021-09-17 00:35:22 +02:00
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
if 'name' in playlist_data.get('creator'):
|
|
|
|
|
creator_name = playlist_data.get('creator').get('name')
|
|
|
|
|
elif playlist_data.get('creator').get('id') == 0:
|
2022-01-20 21:27:00 +01:00
|
|
|
creator_name = 'TIDAL'
|
2021-09-17 00:35:22 +02:00
|
|
|
else:
|
2022-01-20 21:27:00 +01:00
|
|
|
creator_name = 'Unknown'
|
|
|
|
|
|
|
|
|
|
return PlaylistInfo(
|
2022-02-06 02:50:05 +01:00
|
|
|
name=playlist_data.get('title'),
|
2022-01-20 21:27:00 +01:00
|
|
|
creator=creator_name,
|
|
|
|
|
tracks=tracks,
|
|
|
|
|
# TODO: Use playlist creation date or lastUpdated?
|
2022-02-06 02:50:05 +01:00
|
|
|
release_year=playlist_data.get('created')[:4],
|
|
|
|
|
creator_id=playlist_data['creator'].get('id'),
|
|
|
|
|
cover_url=self.generate_artwork_url(playlist_data['squareImage'], size=self.cover_size,
|
|
|
|
|
max_size=1080) if playlist_data['squareImage'] else None,
|
|
|
|
|
track_extra_kwargs={
|
|
|
|
|
'data': {track.get('item').get('id'): track.get('item') for track in playlist_tracks.get('items')}
|
|
|
|
|
}
|
2022-01-20 21:27:00 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def get_artist_info(self, artist_id: str, get_credited_albums: bool) -> ArtistInfo:
|
|
|
|
|
artist_data = self.session.get_artist(artist_id)
|
|
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
artist_albums = self.session.get_artist_albums(artist_id).get('items')
|
|
|
|
|
artist_singles = self.session.get_artist_albums_ep_singles(artist_id).get('items')
|
2022-01-20 21:27:00 +01:00
|
|
|
|
|
|
|
|
# Only works with a mobile session, annoying, never do this again
|
|
|
|
|
credit_albums = []
|
2022-02-07 20:14:41 +01:00
|
|
|
if get_credited_albums and SessionType.MOBILE_DEFAULT.name in self.available_sessions:
|
|
|
|
|
self.session.default = SessionType.MOBILE_DEFAULT
|
2022-01-20 21:27:00 +01:00
|
|
|
credited_albums_page = self.session.get_page('contributor', params={'artistId': artist_id})
|
|
|
|
|
|
|
|
|
|
# This is so retarded
|
|
|
|
|
page_list = credited_albums_page['rows'][-1]['modules'][0]['pagedList']
|
|
|
|
|
total_items = page_list['totalNumberOfItems']
|
|
|
|
|
more_items_link = page_list['dataApiPath'][6:]
|
|
|
|
|
|
|
|
|
|
# Now fetch all the found total_items
|
|
|
|
|
items = []
|
|
|
|
|
for offset in range(0, total_items // 50 + 1):
|
|
|
|
|
print(f'Fetching {offset * 50}/{total_items}', end='\r')
|
|
|
|
|
items += self.session.get_page(more_items_link, params={'limit': 50, 'offset': offset * 50})['items']
|
|
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
credit_albums = [item.get('item').get('album') for item in items]
|
2022-01-20 21:27:00 +01:00
|
|
|
self.session.default = SessionType.TV
|
|
|
|
|
|
2022-02-07 19:11:10 +01:00
|
|
|
# use set to filter out duplicate album ids
|
|
|
|
|
albums = {str(album.get('id')) for album in artist_albums + artist_singles + credit_albums}
|
2022-01-20 21:27:00 +01:00
|
|
|
|
|
|
|
|
return ArtistInfo(
|
2022-02-06 02:50:05 +01:00
|
|
|
name=artist_data.get('name'),
|
2022-02-07 19:11:10 +01:00
|
|
|
albums=list(albums),
|
2022-02-14 22:57:33 +01:00
|
|
|
album_extra_kwargs={'data': {str(album.get('id')): album for album in artist_albums + artist_singles}}
|
2022-01-20 21:27:00 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def get_album_info(self, album_id: str, data=None) -> AlbumInfo:
|
|
|
|
|
# check if album is already in album cache, add it
|
|
|
|
|
if data is None:
|
|
|
|
|
data = {}
|
|
|
|
|
|
|
|
|
|
album_data = data[album_id] if album_id in data else self.session.get_album(album_id)
|
|
|
|
|
|
2022-02-07 19:11:10 +01:00
|
|
|
# get all album tracks with corresponding credits with a limit of 100
|
|
|
|
|
limit = 100
|
|
|
|
|
tracks_data = self.session.get_album_contributors(album_id, limit=limit)
|
|
|
|
|
total_tracks = tracks_data.get('totalNumberOfItems')
|
|
|
|
|
|
|
|
|
|
# round total_tracks to the next 100 and loop over the offset, that's hideous
|
|
|
|
|
for offset in range(limit, ((total_tracks // limit) + 1) * limit, limit):
|
|
|
|
|
# fetch the new album tracks with the given offset
|
|
|
|
|
track_items = self.session.get_album_contributors(album_id, offset=offset, limit=limit)
|
|
|
|
|
# append those tracks to the album_data
|
|
|
|
|
tracks_data['items'] += track_items
|
2022-01-20 21:27:00 +01:00
|
|
|
|
|
|
|
|
# add the track contributors to a new list called 'credits'
|
|
|
|
|
cache = {'data': {}}
|
2022-02-06 02:50:05 +01:00
|
|
|
for track in tracks_data.get('items'):
|
|
|
|
|
track.get('item').update({'credits': track.get('credits')})
|
|
|
|
|
cache.get('data')[str(track.get('item').get('id'))] = track.get('item')
|
2022-01-20 21:27:00 +01:00
|
|
|
|
2022-02-02 16:52:30 +01:00
|
|
|
# filter out video clips
|
2022-02-06 02:50:05 +01:00
|
|
|
tracks = [str(track['item']['id']) for track in tracks_data.get('items') if track.get('type') == 'track']
|
2022-01-20 21:27:00 +01:00
|
|
|
|
2022-02-02 16:16:38 +01:00
|
|
|
quality = None
|
|
|
|
|
if 'audioModes' in album_data:
|
|
|
|
|
if album_data['audioModes'] == ['DOLBY_ATMOS']:
|
|
|
|
|
quality = 'Dolby Atmos'
|
|
|
|
|
elif album_data['audioModes'] == ['SONY_360RA']:
|
|
|
|
|
quality = '360'
|
|
|
|
|
elif album_data['audioQuality'] == 'HI_RES':
|
|
|
|
|
quality = 'M'
|
2022-01-20 21:27:00 +01:00
|
|
|
|
|
|
|
|
return AlbumInfo(
|
2022-02-06 02:50:05 +01:00
|
|
|
name=album_data.get('title'),
|
|
|
|
|
release_year=album_data.get('releaseDate')[:4],
|
|
|
|
|
explicit=album_data.get('explicit'),
|
2022-01-20 21:27:00 +01:00
|
|
|
quality=quality,
|
2022-02-06 02:50:05 +01:00
|
|
|
upc=album_data.get('upc'),
|
|
|
|
|
cover_url=self.generate_artwork_url(album_data.get('cover'),
|
|
|
|
|
size=self.cover_size) if album_data.get('cover') else None,
|
|
|
|
|
animated_cover_url=self.generate_animated_artwork_url(album_data.get('videoCover')) if album_data.get(
|
|
|
|
|
'videoCover') else None,
|
|
|
|
|
artist=album_data.get('artist').get('name'),
|
|
|
|
|
artist_id=album_data.get('artist').get('id'),
|
2022-01-20 21:27:00 +01:00
|
|
|
tracks=tracks,
|
|
|
|
|
track_extra_kwargs=cache
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
def get_track_info(self, track_id: str, quality_tier: QualityEnum, codec_options: CodecOptions,
|
|
|
|
|
data=None) -> TrackInfo:
|
|
|
|
|
if data is None:
|
|
|
|
|
data = {}
|
|
|
|
|
|
|
|
|
|
track_data = data[track_id] if track_id in data else self.session.get_track(track_id)
|
|
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
album_id = str(track_data.get('album').get('id'))
|
2022-01-20 21:27:00 +01:00
|
|
|
# check if album is already in album cache, get it
|
2022-02-14 01:10:06 +01:00
|
|
|
try:
|
|
|
|
|
album_data = data[album_id] if album_id in data else self.session.get_album(album_id)
|
|
|
|
|
except TidalError as e:
|
|
|
|
|
# if an error occurs, catch it and set the album_data to an empty dict to catch it
|
|
|
|
|
self.print(f'Tidal: {e}, trying anyway', drop_level=1)
|
|
|
|
|
album_data = {}
|
2021-09-17 00:35:22 +02:00
|
|
|
|
2022-02-07 20:14:41 +01:00
|
|
|
# check if album is only available in LOSSLESS and STEREO, so it switches to the MOBILE_DEFAULT which will
|
|
|
|
|
# get FLACs faster
|
|
|
|
|
if (self.settings['force_non_spatial'] or (
|
|
|
|
|
album_data.get('audioQuality') == 'LOSSLESS' and album_data.get('audioModes') == ['STEREO'])) and \
|
|
|
|
|
SessionType.MOBILE_DEFAULT.name in self.available_sessions:
|
|
|
|
|
self.session.default = SessionType.MOBILE_DEFAULT
|
|
|
|
|
elif (track_data.get('audioModes') == ['SONY_360RA']
|
|
|
|
|
or (track_data.get('audioModes') == ['DOLBY_ATMOS'] and self.settings['prefer_ac4'])) \
|
|
|
|
|
and SessionType.MOBILE_ATMOS.name in self.available_sessions:
|
|
|
|
|
self.session.default = SessionType.MOBILE_ATMOS
|
2021-09-17 00:35:22 +02:00
|
|
|
else:
|
2021-10-16 15:09:36 +02:00
|
|
|
self.session.default = SessionType.TV
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2021-10-16 15:09:36 +02:00
|
|
|
stream_data = self.session.get_stream_url(track_id, self.quality_parse[quality_tier])
|
2022-01-20 21:27:00 +01:00
|
|
|
# only needed for MPEG-DASH
|
2021-11-19 17:14:40 +01:00
|
|
|
audio_track = None
|
2021-09-05 21:54:23 +02:00
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
if stream_data['manifestMimeType'] == 'application/dash+xml':
|
|
|
|
|
manifest = base64.b64decode(stream_data['manifest'])
|
|
|
|
|
audio_track = self.parse_mpd(manifest)[0] # Only one AudioTrack?
|
|
|
|
|
track_codec = audio_track.codec
|
|
|
|
|
else:
|
|
|
|
|
manifest = json.loads(base64.b64decode(stream_data['manifest']))
|
|
|
|
|
track_codec = CodecEnum['AAC' if 'mp4a' in manifest['codecs'] else manifest['codecs'].upper()]
|
2021-09-05 21:54:23 +02:00
|
|
|
|
|
|
|
|
if not codec_data[track_codec].spatial:
|
|
|
|
|
if not codec_options.proprietary_codecs and codec_data[track_codec].proprietary:
|
2021-11-20 16:52:37 +01:00
|
|
|
self.print(f'Proprietary codecs are disabled, if you want to download {track_codec.name}, '
|
|
|
|
|
f'set "proprietary_codecs": true', drop_level=1)
|
2021-09-05 21:54:23 +02:00
|
|
|
stream_data = self.session.get_stream_url(track_id, 'LOSSLESS')
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
if stream_data['manifestMimeType'] == 'application/dash+xml':
|
|
|
|
|
manifest = base64.b64decode(stream_data['manifest'])
|
|
|
|
|
audio_track = self.parse_mpd(manifest)[0] # Only one AudioTrack?
|
|
|
|
|
track_codec = audio_track.codec
|
|
|
|
|
else:
|
|
|
|
|
manifest = json.loads(base64.b64decode(stream_data['manifest']))
|
|
|
|
|
track_codec = CodecEnum['AAC' if 'mp4a' in manifest['codecs'] else manifest['codecs'].upper()]
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
track_name = track_data.get('title')
|
|
|
|
|
track_name += f' ({track_data.get("version")})' if track_data.get("version") else ''
|
2021-09-17 18:21:26 +02:00
|
|
|
|
2022-02-02 00:27:28 +01:00
|
|
|
mqa_file = None
|
2021-11-19 17:14:40 +01:00
|
|
|
if audio_track:
|
|
|
|
|
download_args = {'audio_track': audio_track}
|
|
|
|
|
else:
|
2022-02-02 00:27:28 +01:00
|
|
|
# check if MQA
|
|
|
|
|
if track_codec is CodecEnum.MQA and self.settings['fix_mqa']:
|
2022-02-03 15:50:16 +01:00
|
|
|
# download the first chunk of the flac file to analyze it
|
|
|
|
|
temp_file_path = self.download_temp_header(manifest['urls'][0])
|
2022-02-02 00:27:28 +01:00
|
|
|
|
|
|
|
|
# detect MQA file
|
|
|
|
|
mqa_file = MqaIdentifier(temp_file_path)
|
|
|
|
|
|
2022-02-03 15:50:16 +01:00
|
|
|
# add the file to download_args
|
2021-11-19 17:14:40 +01:00
|
|
|
download_args = {'file_url': manifest['urls'][0]}
|
|
|
|
|
|
2022-02-02 00:27:28 +01:00
|
|
|
bit_depth = 24 if track_codec in [CodecEnum.EAC3, CodecEnum.MHA1] else 16
|
|
|
|
|
sample_rate = 48 if track_codec in [CodecEnum.EAC3, CodecEnum.MHA1, CodecEnum.AC4] else 44.1
|
|
|
|
|
|
|
|
|
|
# now set everything for MQA
|
|
|
|
|
if mqa_file is not None and mqa_file.is_mqa:
|
|
|
|
|
bit_depth = mqa_file.bit_depth
|
|
|
|
|
sample_rate = mqa_file.get_original_sample_rate()
|
2021-11-19 17:14:40 +01:00
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
track_info = TrackInfo(
|
2021-10-12 15:07:04 +02:00
|
|
|
name=track_name,
|
2022-02-06 02:50:05 +01:00
|
|
|
album=album_data.get('title'),
|
2021-08-28 15:31:36 +02:00
|
|
|
album_id=album_id,
|
2022-02-06 02:50:05 +01:00
|
|
|
artists=[a.get('name') for a in track_data.get('artists')],
|
|
|
|
|
artist_id=track_data['artist'].get('id'),
|
2022-02-14 01:10:06 +01:00
|
|
|
release_year=track_data.get('streamStartDate')[:4] if track_data[
|
|
|
|
|
'streamStartDate'] else track_data.get('dateAdded')[:4],
|
2022-02-02 00:27:28 +01:00
|
|
|
bit_depth=bit_depth,
|
|
|
|
|
sample_rate=sample_rate,
|
2022-02-06 02:50:05 +01:00
|
|
|
cover_url=self.generate_artwork_url(track_data['album'].get('cover'),
|
|
|
|
|
size=self.cover_size) if track_data['album'].get('cover') else None,
|
|
|
|
|
explicit=track_data.get('explicit'),
|
2022-02-02 00:27:28 +01:00
|
|
|
tags=self.convert_tags(track_data, album_data, mqa_file),
|
2021-10-12 15:07:04 +02:00
|
|
|
codec=track_codec,
|
2022-01-20 01:05:30 +01:00
|
|
|
download_extra_kwargs=download_args,
|
2022-01-20 21:27:00 +01:00
|
|
|
lyrics_extra_kwargs={'track_data': track_data},
|
|
|
|
|
# check if 'credits' are present (only from get_album_data)
|
|
|
|
|
credits_extra_kwargs={'data': {track_id: track_data['credits']} if 'credits' in track_data else {}}
|
2021-08-28 15:31:36 +02:00
|
|
|
)
|
|
|
|
|
|
2021-09-05 21:54:23 +02:00
|
|
|
if not codec_options.spatial_codecs and codec_data[track_codec].spatial:
|
|
|
|
|
track_info.error = 'Spatial codecs are disabled, if you want to download it, set "spatial_codecs": true'
|
|
|
|
|
|
2021-08-28 15:31:36 +02:00
|
|
|
return track_info
|
|
|
|
|
|
2022-02-03 15:50:16 +01:00
|
|
|
@staticmethod
|
|
|
|
|
def download_temp_header(file_url: str, chunk_size: int = 16384) -> str:
|
|
|
|
|
# create flac temp_location
|
|
|
|
|
temp_location = create_temp_filename() + '.flac'
|
|
|
|
|
|
|
|
|
|
# create session and download the file to the temp_location
|
|
|
|
|
r_session = create_requests_session()
|
|
|
|
|
|
|
|
|
|
r = r_session.get(file_url, stream=True, verify=False)
|
|
|
|
|
with open(temp_location, 'wb') as f:
|
|
|
|
|
# only download the first chunk_size bytes
|
|
|
|
|
for chunk in r.iter_content(chunk_size=chunk_size):
|
|
|
|
|
if chunk: # filter out keep-alive new chunks
|
|
|
|
|
f.write(chunk)
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
return temp_location
|
|
|
|
|
|
2021-10-12 15:07:04 +02:00
|
|
|
@staticmethod
|
2021-11-25 13:02:28 +01:00
|
|
|
def parse_mpd(xml: bytes) -> list:
|
2021-11-19 17:14:40 +01:00
|
|
|
xml = xml.decode('UTF-8')
|
|
|
|
|
# Removes default namespace definition, don't do that!
|
|
|
|
|
xml = re.sub(r'xmlns="[^"]+"', '', xml, count=1)
|
|
|
|
|
root = ElementTree.fromstring(xml)
|
|
|
|
|
|
|
|
|
|
# List of AudioTracks
|
|
|
|
|
tracks = []
|
|
|
|
|
|
|
|
|
|
for period in root.findall('Period'):
|
|
|
|
|
for adaptation_set in period.findall('AdaptationSet'):
|
|
|
|
|
for rep in adaptation_set.findall('Representation'):
|
|
|
|
|
# Check if representation is audio
|
|
|
|
|
content_type = adaptation_set.get('contentType')
|
|
|
|
|
if content_type != 'audio':
|
|
|
|
|
raise ValueError('Only supports audio MPDs!')
|
|
|
|
|
|
|
|
|
|
# Codec checks
|
|
|
|
|
codec = rep.get('codecs').upper()
|
|
|
|
|
if codec.startswith('MP4A'):
|
|
|
|
|
codec = 'AAC'
|
|
|
|
|
|
|
|
|
|
# Segment template
|
|
|
|
|
seg_template = rep.find('SegmentTemplate')
|
|
|
|
|
# Add init file to track_urls
|
|
|
|
|
track_urls = [seg_template.get('initialization')]
|
|
|
|
|
start_number = int(seg_template.get('startNumber') or 1)
|
|
|
|
|
|
|
|
|
|
# https://dashif-documents.azurewebsites.net/Guidelines-TimingModel/master/Guidelines-TimingModel.html#addressing-explicit
|
|
|
|
|
# Also see example 9
|
|
|
|
|
seg_timeline = seg_template.find('SegmentTimeline')
|
|
|
|
|
if seg_timeline is not None:
|
|
|
|
|
seg_time_list = []
|
|
|
|
|
cur_time = 0
|
|
|
|
|
|
|
|
|
|
for s in seg_timeline.findall('S'):
|
|
|
|
|
# Media segments start time
|
|
|
|
|
if s.get('t'):
|
|
|
|
|
cur_time = int(s.get('t'))
|
|
|
|
|
|
|
|
|
|
# Segment reference
|
|
|
|
|
for i in range((int(s.get('r') or 0) + 1)):
|
|
|
|
|
seg_time_list.append(cur_time)
|
|
|
|
|
# Add duration to current time
|
|
|
|
|
cur_time += int(s.get('d'))
|
|
|
|
|
|
|
|
|
|
# Create list with $Number$ indices
|
|
|
|
|
seg_num_list = list(range(start_number, len(seg_time_list) + start_number))
|
|
|
|
|
# Replace $Number$ with all the seg_num_list indices
|
|
|
|
|
track_urls += [seg_template.get('media').replace('$Number$', str(n)) for n in seg_num_list]
|
|
|
|
|
|
|
|
|
|
tracks.append(AudioTrack(
|
|
|
|
|
codec=CodecEnum[codec],
|
|
|
|
|
sample_rate=int(rep.get('audioSamplingRate') or 0),
|
|
|
|
|
bitrate=int(rep.get('bandwidth') or 0),
|
|
|
|
|
urls=track_urls
|
|
|
|
|
))
|
|
|
|
|
|
|
|
|
|
return tracks
|
|
|
|
|
|
2022-02-03 15:50:16 +01:00
|
|
|
def get_track_download(self, file_url: str = None, audio_track: AudioTrack = None) \
|
2022-02-02 00:27:28 +01:00
|
|
|
-> TrackDownloadInfo:
|
2022-02-03 15:50:16 +01:00
|
|
|
# only file_url or audio_track at a time
|
2022-02-02 00:27:28 +01:00
|
|
|
|
2022-02-03 15:50:16 +01:00
|
|
|
# MHA1, EC-3 or MQA
|
2021-11-19 17:14:40 +01:00
|
|
|
if file_url:
|
|
|
|
|
return TrackDownloadInfo(download_type=DownloadEnum.URL, file_url=file_url)
|
|
|
|
|
|
|
|
|
|
# MPEG-DASH
|
2022-01-20 01:05:30 +01:00
|
|
|
# use the total_file size for a better progress bar? Is it even possible to calculate the total size from MPD?
|
2021-11-20 16:52:37 +01:00
|
|
|
try:
|
|
|
|
|
columns = os.get_terminal_size().columns
|
|
|
|
|
if os.name == 'nt':
|
2021-11-25 13:02:28 +01:00
|
|
|
bar = tqdm(audio_track.urls, ncols=(columns - self.oprinter.indent_number),
|
2021-11-20 16:52:37 +01:00
|
|
|
bar_format=' ' * self.oprinter.indent_number + '{l_bar}{bar}{r_bar}')
|
|
|
|
|
else:
|
|
|
|
|
raise OSError
|
|
|
|
|
except OSError:
|
|
|
|
|
bar = tqdm(audio_track.urls, bar_format=' ' * self.oprinter.indent_number + '{l_bar}{bar}{r_bar}')
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# download all segments and save the locations inside temp_locations
|
2021-11-25 13:02:28 +01:00
|
|
|
temp_locations = []
|
2021-11-20 16:52:37 +01:00
|
|
|
for download_url in bar:
|
|
|
|
|
temp_locations.append(download_to_temp(download_url, extension='mp4'))
|
|
|
|
|
|
2022-02-02 16:52:30 +01:00
|
|
|
# needed for bar indent
|
|
|
|
|
bar.close()
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# concatenated/Merged .mp4 file
|
2021-11-19 17:14:40 +01:00
|
|
|
merged_temp_location = create_temp_filename() + '.mp4'
|
2022-01-20 01:05:30 +01:00
|
|
|
# actual converted .flac file
|
2021-11-19 17:14:40 +01:00
|
|
|
output_location = create_temp_filename() + '.' + codec_data[audio_track.codec].container.name
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# download is finished, merge chunks into 1 file
|
2021-11-19 17:14:40 +01:00
|
|
|
with open(merged_temp_location, 'wb') as dest_file:
|
|
|
|
|
for temp_location in temp_locations:
|
|
|
|
|
with open(temp_location, 'rb') as segment_file:
|
|
|
|
|
copyfileobj(segment_file, dest_file)
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# convert .mp4 back to .flac
|
2021-11-19 17:14:40 +01:00
|
|
|
try:
|
|
|
|
|
ffmpeg.input(merged_temp_location, hide_banner=None, y=None).output(output_location, acodec='copy',
|
|
|
|
|
loglevel='error').run()
|
|
|
|
|
# Remove all files
|
|
|
|
|
silentremove(merged_temp_location)
|
|
|
|
|
for temp_location in temp_locations:
|
|
|
|
|
silentremove(temp_location)
|
|
|
|
|
except Exception:
|
2022-01-20 01:05:30 +01:00
|
|
|
self.print('FFmpeg is not installed or working! Using fallback, may have errors')
|
|
|
|
|
|
|
|
|
|
# return the MP4 temp file, but tell orpheus to change the container to .m4a (AAC)
|
|
|
|
|
return TrackDownloadInfo(
|
|
|
|
|
download_type=DownloadEnum.TEMP_FILE_PATH,
|
|
|
|
|
temp_file_path=merged_temp_location,
|
|
|
|
|
different_codec=CodecEnum.AAC
|
|
|
|
|
)
|
2021-11-19 17:14:40 +01:00
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
# return the converted flac file now
|
2021-11-19 17:14:40 +01:00
|
|
|
return TrackDownloadInfo(
|
|
|
|
|
download_type=DownloadEnum.TEMP_FILE_PATH,
|
2022-01-20 01:05:30 +01:00
|
|
|
temp_file_path=output_location,
|
2021-11-19 17:14:40 +01:00
|
|
|
)
|
2021-10-12 15:07:04 +02:00
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
def get_track_cover(self, track_id: str, cover_options: CoverOptions, data=None) -> CoverInfo:
|
|
|
|
|
if data is None:
|
|
|
|
|
data = {}
|
|
|
|
|
|
|
|
|
|
track_data = data[track_id] if track_id in data else self.session.get_track(track_id)
|
2022-02-06 02:50:05 +01:00
|
|
|
cover_id = track_data['album'].get('cover')
|
2022-01-20 01:05:30 +01:00
|
|
|
|
|
|
|
|
# Tidal don't support PNG, so it will always get JPG
|
|
|
|
|
cover_url = self.generate_artwork_url(cover_id, size=cover_options.resolution)
|
|
|
|
|
return CoverInfo(url=cover_url, file_type=ImageFileTypeEnum.jpg)
|
|
|
|
|
|
|
|
|
|
def get_track_lyrics(self, track_id: str, track_data: dict) -> LyricsInfo:
|
2022-02-06 02:50:05 +01:00
|
|
|
# get lyrics data for current track id
|
2021-08-28 15:31:36 +02:00
|
|
|
lyrics_data = self.session.get_lyrics(track_id)
|
|
|
|
|
|
2022-01-20 01:05:30 +01:00
|
|
|
if 'error' in lyrics_data:
|
|
|
|
|
# search for title and artist to find a matching track (non Atmos)
|
|
|
|
|
results = self.search(
|
|
|
|
|
DownloadTypeEnum.track,
|
2022-02-06 02:50:05 +01:00
|
|
|
f'{track_data.get("title")} {"".join(a.get("name") for a in track_data.get("artists"))}',
|
2022-01-20 01:05:30 +01:00
|
|
|
limit=10)
|
|
|
|
|
|
|
|
|
|
# check every result to find a matching result
|
|
|
|
|
best_tracks = [r.result_id for r in results
|
2022-02-06 02:50:05 +01:00
|
|
|
if r.name == track_data.get('title') and
|
|
|
|
|
r.artists[0] == track_data.get('artist').get('name') and
|
2022-01-20 01:05:30 +01:00
|
|
|
'Dolby Atmos' not in r.additional]
|
|
|
|
|
|
|
|
|
|
# retrieve the lyrics for the first one, otherwise return empty dict
|
|
|
|
|
lyrics_data = self.session.get_lyrics(best_tracks[0]) if len(best_tracks) > 0 else {}
|
|
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
embedded = lyrics_data.get('lyrics')
|
|
|
|
|
synced = lyrics_data.get('subtitles')
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
return LyricsInfo(
|
|
|
|
|
embedded=embedded,
|
|
|
|
|
synced=synced
|
|
|
|
|
)
|
|
|
|
|
|
2022-01-20 21:27:00 +01:00
|
|
|
def get_track_credits(self, track_id: str, data=None) -> Optional[list]:
|
|
|
|
|
if data is None:
|
|
|
|
|
data = {}
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
credits_dict = {}
|
|
|
|
|
|
2022-01-20 21:27:00 +01:00
|
|
|
# fetch credits from cache if not fetch those credits
|
|
|
|
|
if track_id in data:
|
|
|
|
|
track_contributors = data[track_id]
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
for contributor in track_contributors:
|
2022-02-06 02:50:05 +01:00
|
|
|
credits_dict[contributor.get('type')] = [c.get('name') for c in contributor.get('contributors')]
|
2021-08-28 15:31:36 +02:00
|
|
|
else:
|
2022-02-06 02:50:05 +01:00
|
|
|
track_contributors = self.session.get_track_contributors(track_id).get('items')
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
if len(track_contributors) > 0:
|
|
|
|
|
for contributor in track_contributors:
|
2022-01-20 21:27:00 +01:00
|
|
|
# check if the dict contains no list, create one
|
2022-02-06 02:50:05 +01:00
|
|
|
if contributor.get('role') not in credits_dict:
|
|
|
|
|
credits_dict[contributor.get('role')] = []
|
2021-08-28 15:31:36 +02:00
|
|
|
|
2022-02-06 02:50:05 +01:00
|
|
|
credits_dict[contributor.get('role')].append(contributor.get('name'))
|
2021-08-28 15:31:36 +02:00
|
|
|
|
|
|
|
|
if len(credits_dict) > 0:
|
2022-01-20 21:27:00 +01:00
|
|
|
# convert the dictionary back to a list of CreditsInfo
|
2021-08-28 15:31:36 +02:00
|
|
|
return [CreditsInfo(sanitise_name(k), v) for k, v in credits_dict.items()]
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
2022-02-02 00:27:28 +01:00
|
|
|
def convert_tags(track_data: dict, album_data: dict, mqa_file: MqaIdentifier = None) -> Tags:
|
2022-02-06 02:50:05 +01:00
|
|
|
track_name = track_data.get('title')
|
|
|
|
|
track_name += f' ({track_data.get("version")})' if track_data.get('version') else ''
|
2021-09-17 18:21:26 +02:00
|
|
|
|
2022-02-02 00:27:28 +01:00
|
|
|
extra_tags = {}
|
|
|
|
|
if mqa_file is not None:
|
|
|
|
|
encoder_time = datetime.now().strftime("%b %d %Y %H:%M:%S")
|
|
|
|
|
extra_tags = {
|
|
|
|
|
'ENCODER': f'MQAEncode v1.1, 2.4.0+0 (278f5dd), E24F1DE5-32F1-4930-8197-24954EB9D6F4, {encoder_time}',
|
|
|
|
|
'MQAENCODER': f'MQAEncode v1.1, 2.4.0+0 (278f5dd), E24F1DE5-32F1-4930-8197-24954EB9D6F4, {encoder_time}',
|
|
|
|
|
'ORIGINALSAMPLERATE': str(mqa_file.original_sample_rate)
|
|
|
|
|
}
|
|
|
|
|
|
2021-11-19 17:14:40 +01:00
|
|
|
return Tags(
|
2022-02-14 01:10:06 +01:00
|
|
|
album_artist=album_data.get('artist').get('name') if 'artist' in album_data else None,
|
2022-02-06 02:50:05 +01:00
|
|
|
track_number=track_data.get('trackNumber'),
|
|
|
|
|
total_tracks=album_data.get('numberOfTracks'),
|
|
|
|
|
disc_number=track_data.get('volumeNumber'),
|
|
|
|
|
total_discs=album_data.get('numberOfVolumes'),
|
|
|
|
|
isrc=track_data.get('isrc'),
|
|
|
|
|
upc=album_data.get('upc'),
|
|
|
|
|
release_date=album_data.get('releaseDate'),
|
|
|
|
|
copyright=track_data.get('copyright'),
|
|
|
|
|
replay_gain=track_data.get('replayGain'),
|
|
|
|
|
replay_peak=track_data.get('peak'),
|
2022-02-02 00:27:28 +01:00
|
|
|
extra_tags=extra_tags
|
2021-08-28 15:31:36 +02:00
|
|
|
)
|