Based off JPS-AU
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

699 行
27 KiB

  1. # Standard library packages
  2. import re
  3. import os
  4. import sys
  5. import shutil
  6. import string
  7. import argparse
  8. import html
  9. from urllib.parse import urlparse
  10. import json
  11. import ftplib
  12. # Third-party packages
  13. import requests
  14. from bs4 import BeautifulSoup
  15. from mutagen.flac import FLAC
  16. from mutagen.mp3 import MP3
  17. from torf import Torrent
  18. from tqdm import tqdm
  19. from langdetect import detect
  20. # JPS-AU files
  21. import smpy
  22. def asciiart ():
  23. print("""
  24. ███████╗███╗ ███╗ █████╗ ██╗ ██╗
  25. ██╔════╝████╗ ████║ ██╔══██╗██║ ██║
  26. ███████╗██╔████╔██║█████╗███████║██║ ██║
  27. ╚════██║██║╚██╔╝██║╚════╝██╔══██║██║ ██║
  28. ███████║██║ ╚═╝ ██║ ██║ ██║╚██████╔╝
  29. ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝
  30. """)
  31. # Get arguments using argparse
  32. def getargs():
  33. parser = argparse.ArgumentParser()
  34. parser.add_argument('-dir', '--directory', help='Initiate upload on directory', nargs='?', required=True)
  35. parser.add_argument("-f", "--freeleech", help="Enables freeleech", action="store_true")
  36. parser.add_argument("-t", "--tags", help="Add additional tags to the upload", nargs='?')
  37. parser.add_argument('-d', '--debug', help='Enable debug mode', action='store_true')
  38. parser.add_argument("-dry", "--dryrun", help="Dryrun will carry out all actions other than the actual upload to JPS.", action="store_true")
  39. return parser.parse_args()
  40. # Acquire the authkey used for torrent files from upload.php
  41. def getauthkey():
  42. """
  43. Get SM session authkey for use by uploadtorrent() data dict.
  44. Uses SM login data
  45. :return: authkey
  46. """
  47. smpage = sm.retrieveContent("https://sugoimusic.me/torrents.php?id=118") # Arbitrary page on JPS that has authkey
  48. soup = BeautifulSoup(smpage.text, 'html5lib')
  49. rel2 = str(soup.select('#content .thin .main_column .torrent_table tbody'))
  50. authkey = re.findall('authkey=(.*)&torrent_pass=', rel2)
  51. return authkey
  52. def copytree(src, dst, symlinks=False, ignore=None):
  53. for item in os.listdir(src):
  54. s = os.path.join(src, item)
  55. d = os.path.join(dst, item)
  56. if os.path.isdir(s):
  57. shutil.copytree(s, d, symlinks, ignore)
  58. else:
  59. shutil.copy2(s, d)
  60. # Creates torrent file using torf module.
  61. def createtorrent(authkey, directory, filename, releasedata):
  62. t = Torrent(path=directory,
  63. trackers=[authkey]) # Torf requires we store authkeys in a list object. This makes it easier to add multiple announce urls.
  64. # Set torrent to private as standard practice for private trackers
  65. t.private = True
  66. t.source = "SugoiMusic"
  67. t.generate()
  68. ## Format releasedata to bring a suitable torrent name.
  69. # The reason we don't just use the directory name is because of an error in POSTING.
  70. # POSTS do not seem to POST hangul/jp characters alongside files.
  71. filename = f"{releasedata['artist']} - {releasedata['title']} [{releasedata['media']}-{releasedata['format']}].torrent"
  72. #filename = filename.replace("\\","")
  73. try:
  74. t.write(filename)
  75. print("_" * 100)
  76. print("Torrent creation:\n")
  77. print(f"{filename} has been created.")
  78. except:
  79. print("_" * 100)
  80. print("Torrent creation:\n")
  81. os.remove(filename)
  82. print(f"{filename} already exists, existing torrent will be replaced.")
  83. t.write(filename)
  84. print(f"{filename} has been created.")
  85. return filename
  86. # Reads FLAC file and returns metadata.
  87. def readflac(filename):
  88. read = FLAC(filename)
  89. # Create dict containing all meta fields we'll be using.
  90. tags={
  91. "ALBUM": read.get('album'),
  92. "ALBUMARTIST": read.get('albumartist'),
  93. "ARTIST": read.get('artist'),
  94. "DATE": read.get('date')[0],
  95. "GENRE": "",#read.get('genre'),
  96. "TITLE": read.get('title'),
  97. "COMMENT": read.get('comment'),
  98. "TRACKNUMBER": read.get('tracknumber')[0].zfill(2),
  99. "DISCNUMBER": read.get('discnumber')}
  100. # Not further looked into this but some FLACs hold a grouping key of contentgroup instead of grouping.
  101. tags['GROUPING'] = read.get('grouping')
  102. ## If grouping returns None we check contentgroup.
  103. # If it still returns none we will ignore it and handle on final checks
  104. if tags['GROUPING'] == None:
  105. tags['GROUPING'] = read.get('contentgroup')
  106. required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
  107. for k,v in tags.items():
  108. if v == None:
  109. if k in required_tags:
  110. print(f"{k} has returned {v}, this is a required tag")
  111. sys.exit()
  112. return tags
  113. # Reads MP3 file and returns metadata.
  114. def readmp3(filename):
  115. read = MP3(filename)
  116. # Create dict containing all meta fields we'll be using.
  117. tags={
  118. "ALBUM": read.get('TALB'), # Album Title
  119. "ALBUMARTIST": read.get('TPE2'), # Album Artist
  120. "ARTIST": read.get('TPE1'), # Track Artist
  121. "DATE": str(read.get('TDRC')), # Date YYYYMMDD (Will need to add a try/except for other possible identifiers)
  122. "GENRE": read.get('TCON').text, # Genre
  123. "TITLE": read.get('TIT2'), # Track Title
  124. "COMMENT": read.get('COMM::eng'), # Track Comment
  125. "GROUPING": read.get('TIT1'), # Grouping
  126. "TRACKNUMBER": re.sub(r"\/.*", "", str(read.get('TRCK'))).zfill(2), # Tracknumber (Format #/Total) Re.sub removes /#
  127. "DISCNUMBER": re.sub(r"\/.*", "", str(read.get('TPOS')))} # Discnumber (Format #/Total) Re.sub removes /#
  128. required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
  129. for k,v in tags.items():
  130. if v == None:
  131. if k in required_tags:
  132. print(f"{k} has returned {v}, this is a required tag")
  133. sys.exit()
  134. return tags
  135. # Generates new log file based on directory contents
  136. def generatelog(track_titles, log_filename, log_directory):
  137. # Seperate each tracklist entry in the list with a newline
  138. track_titles = '\n'.join([str(x) for x in track_titles])
  139. # Format tracklist layout
  140. log_contents = f"""[size=5][b]Tracklist[/b][/size]\n{track_titles}
  141. """
  142. # If we have chosen to save the tracklist then we write log_contents to a .log file within the log directory specified
  143. if cfg['local_prefs']['save_tracklist']:
  144. # Write to {album_name}.log
  145. with open(f"{log_directory}/{log_filename}.log", "w+") as f:
  146. f.write(log_contents)
  147. # Reset position to first line and read
  148. f.seek(0)
  149. log_contents = f.read()
  150. f.close()
  151. # If debug mode is enabled we will print the log contents.
  152. if debug:
  153. print("_" * 100)
  154. print(f"Log Contents/Tracklisting: {log_contents}")
  155. return log_contents
  156. def readlog(log_name, log_directory):
  157. with open(f"{log_directory}/{log_name}.log", "r+") as f:
  158. log_contents = f.read()
  159. f.close()
  160. return log_contents
  161. def add_to_hangul_dict(hangul , english , category):
  162. hangul = str(hangul)
  163. english = str(english)
  164. categories = ['version','general','artist','genres', 'label', 'distr']
  165. file = f"json_data/dictionary.json"
  166. json_file = open(file, 'r', encoding='utf-8', errors='ignore')
  167. dictionary = json.load(json_file)
  168. json_file.close()
  169. new = dict()
  170. for cats in dictionary:
  171. #== Create the categories in the new temp file
  172. new[cats] = dict()
  173. for key,value in dictionary[cats].items():
  174. #== List all the old items into the new dict
  175. new[cats][key] = value
  176. if hangul in new[category].keys():
  177. if new[category].get(hangul) is None:
  178. if english != 'None':
  179. new[category][hangul] = english
  180. else:
  181. #== Only update if English word has been supplied ==#
  182. if english != 'None':
  183. new[category][hangul] = english
  184. else:
  185. if english == 'None':
  186. new[category][hangul] = None
  187. else:
  188. new[category][hangul] = english
  189. json_write = open(file, 'w+', encoding='utf-8')
  190. json_write.write(json.dumps(new, indent=4, ensure_ascii=False))
  191. json_write.close()
  192. def translate(string, category, result=None, output=None):
  193. file = "json_data/dictionary.json"
  194. with open(file, encoding='utf-8', errors='ignore') as f:
  195. dictionary = json.load(f, strict=False)
  196. category = str(category)
  197. string = str(string)
  198. search = dictionary[category]
  199. string = string.strip()
  200. if string == 'Various Artists':
  201. output = ['Various Artists',None]
  202. else:
  203. #== NO NEED TO SEARCH - STRING HAS HANGUL+ENGLISH or HANGUL+HANGUL ==#
  204. if re.search("\((?P<inside>.*)\)", string):
  205. #== Complete translation, add to dictionary with both values ==#
  206. #== Contains parentheses, need to split
  207. parenthesis = string.split("(")
  208. pre_parenthesis = parenthesis[0].strip()
  209. in_parenthesis = parenthesis[1].replace(")","").strip()
  210. #== Check the order of the parentheses ==#
  211. if re.search("[^\u0000-\u007F]+",pre_parenthesis) and re.search("[^\u0000-\u007F]+",in_parenthesis):
  212. #== Both hangul
  213. first = 'kr'
  214. second = 'kr'
  215. else:
  216. if re.search("[^\u0000-\u007F]+",pre_parenthesis):
  217. first = 'kr'
  218. second = 'eng'
  219. else:
  220. first = 'eng'
  221. second = 'kr'
  222. if first == 'kr' and second == 'eng':
  223. #== Hangul first ==#
  224. hangul = pre_parenthesis
  225. english = in_parenthesis
  226. add_to_hangul_dict(hangul,english,category)
  227. elif first == 'eng' and second == 'kr':
  228. #== English first ==#
  229. hangul = in_parenthesis
  230. english = pre_parenthesis
  231. add_to_hangul_dict(hangul,english,category)
  232. elif first == 'kr' and second == 'kr':
  233. #== Both Hangul ==#
  234. hangul = pre_parenthesis
  235. english = None
  236. add_to_hangul_dict(pre_parenthesis,None,category)
  237. add_to_hangul_dict(hangul,None,category)
  238. else:
  239. #== Both English
  240. hangul = None
  241. english = pre_parenthesis
  242. output = [hangul,english]
  243. #== No parentheses - HANGUL
  244. else:
  245. #== If the input string is a full Hangul word - check dictionary and then add if necessary)
  246. if re.search("[^\u0000-\u007F]+", string):
  247. if string in search.keys():
  248. #== yes
  249. if search.get(string) is None:
  250. #== If the keyword does not have a translation, add it to the dictionary ==#
  251. output = [string,None]
  252. else:
  253. #== Translation already exists, output the result in a list ==#
  254. output = [string,search.get(string)]
  255. else:
  256. output = [string,None]
  257. add_to_hangul_dict(string, None, category)
  258. #== Full English name -- leave it
  259. else:
  260. for key,value in search.items():
  261. if key == string:
  262. output = [value,string]
  263. break
  264. else:
  265. output = [string,string]
  266. return output
  267. def gatherdata(directory):
  268. # Lists for storing some
  269. list_album_artists = []
  270. list_track_artists = []
  271. list_album = []
  272. list_genre = []
  273. translated_genre = []
  274. translated_album_artists = []
  275. tracklist_entries = []
  276. # Creation of releasedata dict, this will store formatted meta used for the POST.
  277. releasedata = {}
  278. ## Set no log as default value.
  279. # This will be set to True is a .log file is found, in turn this will allow us to determine if WEB or CD.
  280. log_available = False
  281. flac_present = False
  282. mp3_present = False
  283. # Read directory contents, grab metadata of .FLAC files.
  284. for file in os.listdir(directory):
  285. file_location = os.path.join(directory, file)
  286. if file.endswith(".flac"):
  287. # Read FLAC file to grab meta
  288. tags = readflac(file_location)
  289. flac_present = True
  290. # If Discnumber isn't present then we omit it from the tracklist entry
  291. if tags['DISCNUMBER'] == None:
  292. tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  293. else:
  294. tracklist_entry = f"[b]{tags['DISCNUMBER'][0]}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  295. tracklist_entries.append(tracklist_entry)
  296. if debug:
  297. print ("_" * 100)
  298. print(f"Tags for {file}:\n{tags}")
  299. if file.endswith(".mp3"):
  300. # Read MP3 file to grab meta
  301. tags = readmp3(file_location)
  302. mp3_present = True
  303. # If Discnumber isn't present then we omit it from the tracklist entry
  304. if tags['DISCNUMBER'] == "None":
  305. tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  306. else:
  307. tracklist_entry = f"[b]{tags['DISCNUMBER']}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  308. tracklist_entries.append(tracklist_entry)
  309. if debug:
  310. print ("_" * 100)
  311. print(f"Tags for {file}:\n{tags}")
  312. # If only one genre in list attempt to split as there's likely more.
  313. if len(tags['GENRE']) == 1:
  314. tags['GENRE'] = tags['GENRE'][0].split(";")
  315. for aa in tags['ALBUMARTIST']:
  316. list_album_artists.append(aa)
  317. for a in tags['ARTIST']:
  318. list_track_artists.append(a)
  319. list_album.append(tags['ALBUM'][0])
  320. # for g in tags['GENRE']:
  321. # list_genre.append(g)
  322. # Check files to make sure there's no multi-format.
  323. if flac_present:
  324. format = 'FLAC'
  325. bitrate = 'Lossless'
  326. if mp3_present:
  327. format = 'MP3'
  328. bitrate = '320'
  329. if flac_present and mp3_present:
  330. print("Mutt detected, exiting.")
  331. sys.exit()
  332. if file.endswith(".log"):
  333. log_available = True
  334. if log_available == True:
  335. media = 'CD'
  336. else:
  337. media = 'WEB'
  338. # Load Dict.json for translations
  339. file = "json_data/dictionary.json"
  340. with open(file, encoding='utf-8', errors='ignore') as f:
  341. dictionary = json.load(f, strict=False)
  342. # Split additional genre's at comma and append to existing genre tags
  343. if additional_tags != None:
  344. split_tags = additional_tags.split(",")
  345. for s in split_tags:
  346. list_genre.append(s)
  347. # Translate genre's using dict and append to translated_genre
  348. for g in set(list_genre):
  349. translation = translate(g, "genres")[0]
  350. translated_genre.append(translation)
  351. # Translate artist's using dict and append to translated_album_artists
  352. for a in set(list_album_artists):
  353. if tags['ALBUMARTIST'][0] == 'Various Artists':
  354. translated_artist_name = 'V.A.'
  355. translated_album_artists.append("V.A.")
  356. else:
  357. translated_artist_name = translate(string=tags['ALBUMARTIST'][0], category="artist")
  358. translated_album_artists.append(translated_artist_name[0])
  359. ## Identify unique values using sets.
  360. unique_album_artists = ','.join(set(translated_album_artists))
  361. unique_track_artists = ','.join(set(list_track_artists))
  362. unique_genre = ','.join(set(translated_genre))
  363. unique_album = set(list_album)
  364. ## Acquire contents of our log file to be used for album description
  365. # Comments store the album id which matches our log names, so we can use the comment tag to find our album descriptions.
  366. log_directory = cfg['local_prefs']['log_directory']
  367. # Album description taken from log file.
  368. if cfg['local_prefs']['generate_tracklist']:
  369. log_filename = f"{unique_album_artists} - {tags['ALBUM'][0]}"
  370. album_description = generatelog(tracklist_entries, log_filename, log_directory)
  371. else:
  372. log_filename = tags['COMMENT'][0]
  373. album_description = readlog(log_filename, log_directory)
  374. ## If release description is enabled we apply comments to the bugs album url
  375. # Note that this is dependant on the album being sourced from bugs so should be changed per user.
  376. if cfg['local_prefs']['enable_release_description']:
  377. try:
  378. release_description = f"Sourced from [url=https://music.bugs.co.kr/album/{tags['COMMENT'][0]}]Bugs[/url]"
  379. # If any exceptions occur we will return to no release description
  380. except:
  381. release_description = ""
  382. # If release description is not enabled we will use no release description
  383. else:
  384. release_description = ""
  385. ## Assign all our unique values into releasedata{}. We'll use this later down the line for POSTING.
  386. # POST values can be found by inspecting JPS HTML
  387. releasedata['submit'] = 'true'
  388. # List of accepted upload types
  389. accepted_types = ['Album', 'Single']
  390. # If type errors then we ask for user input
  391. try:
  392. releasedata['type'] = translate(tags['GROUPING'][0], "release_types")[0]
  393. except TypeError:
  394. releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping is empty or has received an error, please enter manually (Album/Single)\n")
  395. # If type is still not in accepted_types we ask for user input again and do not break loop until correct
  396. if releasedata['type'] not in accepted_types:
  397. while True:
  398. releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping tag did not return an album type, please enter manually (Album/Single)\n")
  399. if releasedata['type'] not in accepted_types:
  400. continue
  401. else:
  402. break
  403. releasedata['title'] = tags['ALBUM'][0]
  404. releasedata['artist'] = unique_album_artists
  405. # If the value of album artist and artist is the same, we don't need to POST original artist.
  406. if unique_album_artists != unique_track_artists:
  407. releasedata['artistjp'] = unique_track_artists
  408. #re.sub removes any date separators, jps doesn't accept them
  409. releasedata['releasedate'] = re.sub(r"[^0-9]", "", tags['DATE'])
  410. releasedata['format'] = format
  411. releasedata['bitrate'] = bitrate
  412. releasedata['media'] = media
  413. releasedata['album_desc'] = album_description
  414. releasedata['release_desc'] = release_description
  415. releasedata['tags'] = unique_genre
  416. # Enable freeleech if arg is passed
  417. if freeleech:
  418. releasedata['freeleech'] = "true"
  419. ## Language Checks
  420. # This is a required check as we don't want to enter non-english/romaji characters into the title/artist field.
  421. en = detectlanguage(releasedata['title'])
  422. if debug:
  423. print("_" * 100)
  424. print("Title/Artist Language:\n")
  425. print(f"{releasedata['title']} < English = {en}")
  426. if en == False:
  427. input_english_title = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Please enter the romaji/english title:\n")
  428. # Create new key called titlejp and assign the old title to it
  429. releasedata['titlejp'] = releasedata['title']
  430. # Replace title with the user input.
  431. releasedata['title'] = input_english_title
  432. en = detectlanguage(releasedata['artist'])
  433. if debug:
  434. print(f"{releasedata['artist']} < English = {en}")
  435. if en == False:
  436. input_english_artist = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Please enter the romaji/english artist name:\n")
  437. # Create new key called titlejp and assign the old title to it
  438. # Replace title with the user input.
  439. releasedata['artist'] = input_english_artist
  440. return releasedata
  441. # Simple function to split a string up into characters
  442. def split(word):
  443. return [char for char in word]
  444. def detectlanguage(string):
  445. ## Language Detect
  446. # This is a required check as we don't want to enter non-english/romaji characters into the title field.
  447. characters = split(string)
  448. language_list = []
  449. for c in characters:
  450. try:
  451. language = detect(c)
  452. language_list.append(language)
  453. except:
  454. langauge = "error"
  455. if 'ko' or 'ja' in language_list:
  456. en = False
  457. else:
  458. en = True
  459. return en
  460. def uploadtorrent(torrent, cover, releasedata):
  461. languages = ('Japanese', 'English', 'Korean', 'Chinese', 'Vietnamese')
  462. # POST url.
  463. uploadurl = "https://sugoimusic.me/upload.php"
  464. # Dataset containing all of the information obtained from our FLAC files.
  465. data = releasedata
  466. if debug:
  467. print('_' * 100)
  468. print('Release Data:\n')
  469. print(releasedata)
  470. try:
  471. postDataFiles = {
  472. 'file_input': open(torrent, 'rb'),
  473. 'userfile': open(cover, 'rb')
  474. }
  475. except FileNotFoundError:
  476. print("_" * 100)
  477. print('File not found!\nPlease confirm file locations and names. Cover image or .torrent file could not be found')
  478. sys.exit()
  479. # If dryrun argument has not ben passed we will POST the results to JPopSuki.
  480. if dryrun != True:
  481. JPSres = sm.retrieveContent(uploadurl, "post", data, postDataFiles)
  482. print('\nUpload POSTED')
  483. ## TODO Filter through JPSres.text and create error handling based on responses
  484. #print(JPSres.text)
  485. # Function for transferring the contents of the torrent as well as the torrent.
  486. def ftp_transfer(fileSource, fileDestination, directory, folder_name, watch_folder):
  487. # Create session
  488. session = ftplib.FTP(cfg['ftp_prefs']['ftp_server'],cfg['ftp_prefs']['ftp_username'],cfg['ftp_prefs']['ftp_password'])
  489. # Set session encoding to utf-8 so we can properly handle hangul/other special characters
  490. session.encoding='utf-8'
  491. # Successful FTP Login Print
  492. print("_" * 100)
  493. print("FTP Login Successful")
  494. print(f"Server Name: {cfg['ftp_prefs']['ftp_server']} : Username: {cfg['ftp_prefs']['ftp_username']}\n")
  495. if cfg['ftp_prefs']['add_to_downloads_folder']:
  496. # Create folder based on the directory name of the folder within the torrent.
  497. try:
  498. session.mkd(f"{fileDestination}/{folder_name}")
  499. print(f'Created directory {fileDestination}/{folder_name}')
  500. except ftplib.error_perm:
  501. pass
  502. # Notify user we are beginning the transfer.
  503. print(f"Beginning transfer...")
  504. # Set current folder to the users preferred destination
  505. session.cwd(f"{fileDestination}/{folder_name}")
  506. # Transfer each file in the chosen directory
  507. for file in os.listdir(directory):
  508. with open(f"{directory}/{file}",'rb') as f:
  509. filesize = os.path.getsize(f"{directory}/{file}")
  510. ## Transfer file
  511. # tqdm used for better user feedback.
  512. with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = f'Uploading [{file}]', total = filesize) as tqdm_instance:
  513. session.storbinary('STOR ' + file, f, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
  514. print(f"{file} | Complete!")
  515. f.close()
  516. if cfg['ftp_prefs']['add_to_watch_folder']:
  517. with open(fileSource,'rb') as t:
  518. # Set current folder to watch directory
  519. session.cwd(watch_folder)
  520. ## Transfer file
  521. # We avoid tqdm here due to the filesize of torrent files.
  522. # Most connections will upload these within 1-3s, resulting in near useless progress bars.
  523. session.storbinary(f"STOR {torrentfile}", t)
  524. print(f"{torrentfile} | Sent to watch folder!")
  525. t.close()
  526. # Quit session when complete.
  527. session.quit()
  528. def localfileorganization(torrent, directory, watch_folder, downloads_folder):
  529. # Move torrent directory to downloads_folder
  530. if cfg['local_prefs']['add_to_downloads_folder']:
  531. try:
  532. os.mkdir(os.path.join(downloads_folder, os.path.basename(directory)))
  533. except FileExistsError:
  534. pass
  535. copytree(directory, os.path.join(downloads_folder, os.path.basename(directory)))
  536. shutil.rmtree(directory)
  537. if cfg['local_prefs']['add_to_watch_folder']:
  538. os.rename(torrent, f"{watch_folder}/{torrent}")
  539. if __name__ == "__main__":
  540. asciiart()
  541. args = getargs()
  542. # TODO consider calling args[] directly, we will then not need this line
  543. dryrun = freeleech = tags = directory = debug = None
  544. directory = args.directory
  545. additional_tags = args.tags
  546. if args.dryrun:
  547. dryrun = True
  548. if args.debug:
  549. debug = True
  550. if args.freeleech:
  551. freeleech = True
  552. # Load login credentials from JSON and use them to create a login session.
  553. with open(f'json_data/config.json') as f:
  554. cfg = json.load(f)
  555. loginData = {'username': cfg['credentials']['username'], 'password': cfg['credentials']['password']}
  556. loginUrl = "https://sugoimusic.me/login.php"
  557. loginTestUrl = "https://sugoimusic.me"
  558. successStr = "Enabled users"
  559. # j is an object which can be used to make requests with respect to the loginsession
  560. sm = smpy.MyLoginSession(loginUrl, loginData, loginTestUrl, successStr, debug=args.debug)
  561. # Acquire authkey
  562. authkey = getauthkey()
  563. # Gather data of FLAC file
  564. releasedata = gatherdata(directory)
  565. # Folder_name equals the last folder in the path, this is used to rename .torrent files to something relevant.
  566. folder_name = os.path.basename(os.path.normpath(directory))
  567. # Identifying cover.jpg path
  568. # cover_path = directory + "/" + cfg['local_prefs']['cover_name']
  569. # Create torrent file.
  570. torrentfile = createtorrent(authkey, directory, folder_name, releasedata)
  571. # Upload torrent to JPopSuki
  572. # uploadtorrent(torrentfile, cover_path, releasedata)
  573. # Setting variable for watch/download folders
  574. ftp_watch_folder = cfg['ftp_prefs']['ftp_watch_folder']
  575. ftp_downloads_folder = cfg['ftp_prefs']['ftp_downloads_folder']
  576. local_watch_folder = cfg['local_prefs']['local_watch_folder']
  577. local_downloads_folder = cfg['local_prefs']['local_downloads_folder']
  578. if cfg['ftp_prefs']['enable_ftp']:
  579. ftp_transfer(fileSource=torrentfile, fileDestination=ftp_downloads_folder, directory=directory, folder_name=folder_name, watch_folder=ftp_watch_folder)
  580. if cfg['local_prefs']['add_to_watch_folder'] or cfg['local_prefs']['add_to_downloads_folder']:
  581. localfileorganization(torrent=torrentfile, directory=directory, watch_folder=local_watch_folder, downloads_folder=local_downloads_folder)