Originally created by slyyxp
Du kan inte välja fler än 25 ämnen Ämnen måste starta med en bokstav eller siffra, kan innehålla bindestreck ('-') och vara max 35 tecken långa.

694 rader
26 KiB

  1. # Standard library packages
  2. import re
  3. import os
  4. import sys
  5. import shutil
  6. import string
  7. import argparse
  8. import html
  9. from urllib.parse import urlparse
  10. import json
  11. import ftplib
  12. # Third-party packages
  13. import requests
  14. from bs4 import BeautifulSoup
  15. from mutagen.flac import FLAC
  16. from mutagen.mp3 import MP3
  17. from torf import Torrent
  18. from tqdm import tqdm
  19. from langdetect import detect
  20. # JPS-AU files
  21. import jpspy
  22. def asciiart ():
  23. print("""
  24. ██╗██████╗ ███████╗ █████╗ ██╗ ██╗
  25. ██║██╔══██╗██╔════╝ ██╔══██╗██║ ██║
  26. ██║██████╔╝███████╗█████╗███████║██║ ██║
  27. ██ ██║██╔═══╝ ╚════██║╚════╝██╔══██║██║ ██║
  28. ╚█████╔╝██║ ███████║ ██║ ██║╚██████╔╝
  29. ╚════╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝ ╚═════╝
  30. """)
  31. # Get arguments using argparse
  32. def getargs():
  33. parser = argparse.ArgumentParser()
  34. parser.add_argument('-dir', '--directory', help='Initiate upload on directory', nargs='?', required=True)
  35. parser.add_argument("-f", "--freeleech", help="Enables freeleech", action="store_true")
  36. parser.add_argument("-t", "--tags", help="Add additional tags to the upload", nargs='?')
  37. parser.add_argument('-d', '--debug', help='Enable debug mode', action='store_true')
  38. parser.add_argument("-dry", "--dryrun", help="Dryrun will carry out all actions other than the actual upload to JPS.", action="store_true")
  39. return parser.parse_args()
  40. # Acquire the authkey used for torrent files from upload.php
  41. def getauthkey():
  42. uploadpage = j.retrieveContent("https://jpopsuki.eu/upload.php")
  43. soup = BeautifulSoup(uploadpage.text, 'html5lib')
  44. rel2 = str(soup.select('#wrapper #content .thin'))
  45. # Regex returns multiple matches, could be optimized.
  46. authkey = re.findall("(?<=value=\")(.*)(?=\")", rel2)[0]
  47. return authkey
  48. def copytree(src, dst, symlinks=False, ignore=None):
  49. for item in os.listdir(src):
  50. s = os.path.join(src, item)
  51. d = os.path.join(dst, item)
  52. if os.path.isdir(s):
  53. shutil.copytree(s, d, symlinks, ignore)
  54. else:
  55. shutil.copy2(s, d)
  56. # Creates torrent file using torf module.
  57. def createtorrent(authkey, directory, filename, releasedata):
  58. t = Torrent(path=directory,
  59. trackers=[authkey]) # Torf requires we store authkeys in a list object. This makes it easier to add multiple announce urls.
  60. # Set torrent to private as standard practice for private trackers
  61. t.private = True
  62. t.generate()
  63. ## Format releasedata to bring a suitable torrent name.
  64. # The reason we don't just use the directory name is because of an error in POSTING.
  65. # POSTS do not seem to POST hangul/jp characters alongside files.
  66. filename = f"{releasedata['artist']} - {releasedata['title']} [{releasedata['media']}-{releasedata['format']}].torrent"
  67. #filename = filename.replace("\\","")
  68. try:
  69. t.write(filename)
  70. print("_" * 100)
  71. print("Torrent creation:\n")
  72. print(f"{filename} has been created.")
  73. except:
  74. print("_" * 100)
  75. print("Torrent creation:\n")
  76. os.remove(filename)
  77. print(f"{filename} already exists, existing torrent will be replaced.")
  78. t.write(filename)
  79. print(f"{filename} has been created.")
  80. return filename
  81. # Reads FLAC file and returns metadata.
  82. def readflac(filename):
  83. read = FLAC(filename)
  84. # Create dict containing all meta fields we'll be using.
  85. tags={
  86. "ALBUM": read.get('album'),
  87. "ALBUMARTIST": read.get('albumartist'),
  88. "ARTIST": read.get('artist'),
  89. "DATE": read.get('date')[0],
  90. "GENRE": "",#read.get('genre'),
  91. "TITLE": read.get('title'),
  92. "COMMENT": read.get('comment'),
  93. "TRACKNUMBER": read.get('tracknumber')[0].zfill(2),
  94. "DISCNUMBER": read.get('discnumber')}
  95. # Not further looked into this but some FLACs hold a grouping key of contentgroup instead of grouping.
  96. tags['GROUPING'] = read.get('grouping')
  97. ## If grouping returns None we check contentgroup.
  98. # If it still returns none we will ignore it and handle on final checks
  99. if tags['GROUPING'] == None:
  100. tags['GROUPING'] = read.get('contentgroup')
  101. required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
  102. for k,v in tags.items():
  103. if v == None:
  104. if k in required_tags:
  105. print(f"{k} has returned {v}, this is a required tag")
  106. sys.exit()
  107. return tags
  108. # Reads MP3 file and returns metadata.
  109. def readmp3(filename):
  110. read = MP3(filename)
  111. # Create dict containing all meta fields we'll be using.
  112. tags={
  113. "ALBUM": read.get('TALB'), # Album Title
  114. "ALBUMARTIST": read.get('TPE2'), # Album Artist
  115. "ARTIST": read.get('TPE1'), # Track Artist
  116. "DATE": str(read.get('TDRC')), # Date YYYYMMDD (Will need to add a try/except for other possible identifiers)
  117. "GENRE": read.get('TCON').text, # Genre
  118. "TITLE": read.get('TIT2'), # Track Title
  119. "COMMENT": read.get('COMM::eng'), # Track Comment
  120. "GROUPING": read.get('TIT1'), # Grouping
  121. "TRACKNUMBER": re.sub(r"\/.*", "", str(read.get('TRCK'))).zfill(2), # Tracknumber (Format #/Total) Re.sub removes /#
  122. "DISCNUMBER": re.sub(r"\/.*", "", str(read.get('TPOS')))} # Discnumber (Format #/Total) Re.sub removes /#
  123. required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
  124. for k,v in tags.items():
  125. if v == None:
  126. if k in required_tags:
  127. print(f"{k} has returned {v}, this is a required tag")
  128. sys.exit()
  129. return tags
  130. # Generates new log file based on directory contents
  131. def generatelog(track_titles, log_filename, log_directory):
  132. # Seperate each tracklist entry in the list with a newline
  133. track_titles = '\n'.join([str(x) for x in track_titles])
  134. # Format tracklist layout
  135. log_contents = f"""[size=5][b]Tracklist[/b][/size]\n{track_titles}
  136. """
  137. # If we have chosen to save the tracklist then we write log_contents to a .log file within the log directory specified
  138. if cfg['local_prefs']['save_tracklist']:
  139. # Write to {album_name}.log
  140. with open(f"{log_directory}/{log_filename}.log", "w+") as f:
  141. f.write(log_contents)
  142. # Reset position to first line and read
  143. f.seek(0)
  144. log_contents = f.read()
  145. f.close()
  146. # If debug mode is enabled we will print the log contents.
  147. if debug:
  148. print("_" * 100)
  149. print(f"Log Contents/Tracklisting: {log_contents}")
  150. return log_contents
  151. def readlog(log_name, log_directory):
  152. with open(f"{log_directory}/{log_name}.log", "r+") as f:
  153. log_contents = f.read()
  154. f.close()
  155. return log_contents
  156. def add_to_hangul_dict(hangul , english , category):
  157. hangul = str(hangul)
  158. english = str(english)
  159. categories = ['version','general','artist','genres', 'label', 'distr']
  160. file = f"json_data/dictionary.json"
  161. json_file = open(file, 'r', encoding='utf-8', errors='ignore')
  162. dictionary = json.load(json_file)
  163. json_file.close()
  164. new = dict()
  165. for cats in dictionary:
  166. #== Create the categories in the new temp file
  167. new[cats] = dict()
  168. for key,value in dictionary[cats].items():
  169. #== List all the old items into the new dict
  170. new[cats][key] = value
  171. if hangul in new[category].keys():
  172. if new[category].get(hangul) is None:
  173. if english != 'None':
  174. new[category][hangul] = english
  175. else:
  176. #== Only update if English word has been supplied ==#
  177. if english != 'None':
  178. new[category][hangul] = english
  179. else:
  180. if english == 'None':
  181. new[category][hangul] = None
  182. else:
  183. new[category][hangul] = english
  184. json_write = open(file, 'w+', encoding='utf-8')
  185. json_write.write(json.dumps(new, indent=4, ensure_ascii=False))
  186. json_write.close()
  187. def translate(string, category, result=None, output=None):
  188. file = "json_data/dictionary.json"
  189. with open(file, encoding='utf-8', errors='ignore') as f:
  190. dictionary = json.load(f, strict=False)
  191. category = str(category)
  192. string = str(string)
  193. search = dictionary[category]
  194. string = string.strip()
  195. if string == 'Various Artists':
  196. output = ['Various Artists',None]
  197. else:
  198. #== NO NEED TO SEARCH - STRING HAS HANGUL+ENGLISH or HANGUL+HANGUL ==#
  199. if re.search("\((?P<inside>.*)\)", string):
  200. #== Complete translation, add to dictionary with both values ==#
  201. #== Contains parentheses, need to split
  202. parenthesis = string.split("(")
  203. pre_parenthesis = parenthesis[0].strip()
  204. in_parenthesis = parenthesis[1].replace(")","").strip()
  205. #== Check the order of the parentheses ==#
  206. if re.search("[^\u0000-\u007F]+",pre_parenthesis) and re.search("[^\u0000-\u007F]+",in_parenthesis):
  207. #== Both hangul
  208. first = 'kr'
  209. second = 'kr'
  210. else:
  211. if re.search("[^\u0000-\u007F]+",pre_parenthesis):
  212. first = 'kr'
  213. second = 'eng'
  214. else:
  215. first = 'eng'
  216. second = 'kr'
  217. if first == 'kr' and second == 'eng':
  218. #== Hangul first ==#
  219. hangul = pre_parenthesis
  220. english = in_parenthesis
  221. add_to_hangul_dict(hangul,english,category)
  222. elif first == 'eng' and second == 'kr':
  223. #== English first ==#
  224. hangul = in_parenthesis
  225. english = pre_parenthesis
  226. add_to_hangul_dict(hangul,english,category)
  227. elif first == 'kr' and second == 'kr':
  228. #== Both Hangul ==#
  229. hangul = pre_parenthesis
  230. english = None
  231. add_to_hangul_dict(pre_parenthesis,None,category)
  232. add_to_hangul_dict(hangul,None,category)
  233. else:
  234. #== Both English
  235. hangul = None
  236. english = pre_parenthesis
  237. output = [hangul,english]
  238. #== No parentheses - HANGUL
  239. else:
  240. #== If the input string is a full Hangul word - check dictionary and then add if necessary)
  241. if re.search("[^\u0000-\u007F]+", string):
  242. if string in search.keys():
  243. #== yes
  244. if search.get(string) is None:
  245. #== If the keyword does not have a translation, add it to the dictionary ==#
  246. output = [string,None]
  247. else:
  248. #== Translation already exists, output the result in a list ==#
  249. output = [string,search.get(string)]
  250. else:
  251. output = [string,None]
  252. add_to_hangul_dict(string, None, category)
  253. #== Full English name -- leave it
  254. else:
  255. for key,value in search.items():
  256. if key == string:
  257. output = [value,string]
  258. break
  259. else:
  260. output = [string,string]
  261. return output
  262. def gatherdata(directory):
  263. # Lists for storing some
  264. list_album_artists = []
  265. list_track_artists = []
  266. list_album = []
  267. list_genre = []
  268. translated_genre = []
  269. translated_album_artists = []
  270. tracklist_entries = []
  271. # Creation of releasedata dict, this will store formatted meta used for the POST.
  272. releasedata = {}
  273. ## Set no log as default value.
  274. # This will be set to True is a .log file is found, in turn this will allow us to determine if WEB or CD.
  275. log_available = False
  276. flac_present = False
  277. mp3_present = False
  278. # Read directory contents, grab metadata of .FLAC files.
  279. for file in os.listdir(directory):
  280. file_location = os.path.join(directory, file)
  281. if file.endswith(".flac"):
  282. # Read FLAC file to grab meta
  283. tags = readflac(file_location)
  284. flac_present = True
  285. # If Discnumber isn't present then we omit it from the tracklist entry
  286. if tags['DISCNUMBER'] == None:
  287. tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  288. else:
  289. tracklist_entry = f"[b]{tags['DISCNUMBER'][0]}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  290. tracklist_entries.append(tracklist_entry)
  291. if debug:
  292. print ("_" * 100)
  293. print(f"Tags for {file}:\n{tags}")
  294. if file.endswith(".mp3"):
  295. # Read MP3 file to grab meta
  296. tags = readmp3(file_location)
  297. mp3_present = True
  298. # If Discnumber isn't present then we omit it from the tracklist entry
  299. if tags['DISCNUMBER'] == "None":
  300. tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  301. else:
  302. tracklist_entry = f"[b]{tags['DISCNUMBER']}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  303. tracklist_entries.append(tracklist_entry)
  304. if debug:
  305. print ("_" * 100)
  306. print(f"Tags for {file}:\n{tags}")
  307. # If only one genre in list attempt to split as there's likely more.
  308. if len(tags['GENRE']) == 1:
  309. tags['GENRE'] = tags['GENRE'][0].split(";")
  310. for aa in tags['ALBUMARTIST']:
  311. list_album_artists.append(aa)
  312. for a in tags['ARTIST']:
  313. list_track_artists.append(a)
  314. list_album.append(tags['ALBUM'][0])
  315. # for g in tags['GENRE']:
  316. # list_genre.append(g)
  317. # Check files to make sure there's no multi-format.
  318. if flac_present:
  319. format = 'FLAC'
  320. bitrate = 'Lossless'
  321. if mp3_present:
  322. format = 'MP3'
  323. bitrate = '320'
  324. if flac_present and mp3_present:
  325. print("Mutt detected, exiting.")
  326. sys.exit()
  327. if file.endswith(".log"):
  328. log_available = True
  329. if log_available == True:
  330. media = 'CD'
  331. else:
  332. media = 'WEB'
  333. # Load Dict.json for translations
  334. file = "json_data/dictionary.json"
  335. with open(file, encoding='utf-8', errors='ignore') as f:
  336. dictionary = json.load(f, strict=False)
  337. # Split additional genre's at comma and append to existing genre tags
  338. if additional_tags != None:
  339. split_tags = additional_tags.split(",")
  340. for s in split_tags:
  341. list_genre.append(s)
  342. # Translate genre's using dict and append to translated_genre
  343. for g in set(list_genre):
  344. translation = translate(g, "genres")[0]
  345. translated_genre.append(translation)
  346. # Translate artist's using dict and append to translated_album_artists
  347. for a in set(list_album_artists):
  348. if tags['ALBUMARTIST'][0] == 'Various Artists':
  349. translated_artist_name = 'V.A.'
  350. translated_album_artists.append("V.A.")
  351. else:
  352. translated_artist_name = translate(string=tags['ALBUMARTIST'][0], category="artist")
  353. translated_album_artists.append(translated_artist_name[0])
  354. ## Identify unique values using sets.
  355. unique_album_artists = ','.join(set(translated_album_artists))
  356. unique_track_artists = ','.join(set(list_track_artists))
  357. unique_genre = ','.join(set(translated_genre))
  358. unique_album = set(list_album)
  359. ## Acquire contents of our log file to be used for album description
  360. # Comments store the album id which matches our log names, so we can use the comment tag to find our album descriptions.
  361. log_directory = cfg['local_prefs']['log_directory']
  362. # Album description taken from log file.
  363. if cfg['local_prefs']['generate_tracklist']:
  364. log_filename = f"{unique_album_artists} - {tags['ALBUM'][0]}"
  365. album_description = generatelog(tracklist_entries, log_filename, log_directory)
  366. else:
  367. log_filename = tags['COMMENT'][0]
  368. album_description = readlog(log_filename, log_directory)
  369. ## If release description is enabled we apply comments to the bugs album url
  370. # Note that this is dependant on the album being sourced from bugs so should be changed per user.
  371. if cfg['local_prefs']['enable_release_description']:
  372. try:
  373. release_description = f"Sourced from [url=https://music.bugs.co.kr/album/{tags['COMMENT'][0]}]Bugs[/url]"
  374. # If any exceptions occur we will return to no release description
  375. except:
  376. release_description = ""
  377. # If release description is not enabled we will use no release description
  378. else:
  379. release_description = ""
  380. ## Assign all our unique values into releasedata{}. We'll use this later down the line for POSTING.
  381. # POST values can be found by inspecting JPS HTML
  382. releasedata['submit'] = 'true'
  383. # List of accepted upload types
  384. accepted_types = ['Album', 'Single']
  385. # If type errors then we ask for user input
  386. try:
  387. releasedata['type'] = translate(tags['GROUPING'][0], "release_types")[0]
  388. except TypeError:
  389. releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping is empty or has received an error, please enter manually (Album/Single)\n")
  390. # If type is still not in accepted_types we ask for user input again and do not break loop until correct
  391. if releasedata['type'] not in accepted_types:
  392. while True:
  393. releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping tag did not return an album type, please enter manually (Album/Single)\n")
  394. if releasedata['type'] not in accepted_types:
  395. continue
  396. else:
  397. break
  398. releasedata['title'] = tags['ALBUM'][0]
  399. releasedata['artist'] = unique_album_artists
  400. # If the value of album artist and artist is the same, we don't need to POST original artist.
  401. if unique_album_artists != unique_track_artists:
  402. releasedata['artistjp'] = unique_track_artists
  403. #re.sub removes any date separators, jps doesn't accept them
  404. releasedata['releasedate'] = re.sub(r"[^0-9]", "", tags['DATE'])
  405. releasedata['format'] = format
  406. releasedata['bitrate'] = bitrate
  407. releasedata['media'] = media
  408. releasedata['album_desc'] = album_description
  409. releasedata['release_desc'] = release_description
  410. releasedata['tags'] = unique_genre
  411. # Enable freeleech if arg is passed
  412. if freeleech:
  413. releasedata['freeleech'] = "true"
  414. ## Language Checks
  415. # This is a required check as we don't want to enter non-english/romaji characters into the title/artist field.
  416. en = detectlanguage(releasedata['title'])
  417. if debug:
  418. print("_" * 100)
  419. print("Title/Artist Language:\n")
  420. print(f"{releasedata['title']} < English = {en}")
  421. if en == False:
  422. input_english_title = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Please enter the romaji/english title:\n")
  423. # Create new key called titlejp and assign the old title to it
  424. releasedata['titlejp'] = releasedata['title']
  425. # Replace title with the user input.
  426. releasedata['title'] = input_english_title
  427. en = detectlanguage(releasedata['artist'])
  428. if debug:
  429. print(f"{releasedata['artist']} < English = {en}")
  430. if en == False:
  431. input_english_artist = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Please enter the romaji/english artist name:\n")
  432. # Create new key called titlejp and assign the old title to it
  433. # Replace title with the user input.
  434. releasedata['artist'] = input_english_artist
  435. return releasedata
  436. # Simple function to split a string up into characters
  437. def split(word):
  438. return [char for char in word]
  439. def detectlanguage(string):
  440. ## Language Detect
  441. # This is a required check as we don't want to enter non-english/romaji characters into the title field.
  442. characters = split(string)
  443. language_list = []
  444. for c in characters:
  445. try:
  446. language = detect(c)
  447. language_list.append(language)
  448. except:
  449. langauge = "error"
  450. if 'ko' or 'ja' in language_list:
  451. en = False
  452. else:
  453. en = True
  454. return en
  455. def uploadtorrent(torrent, cover, releasedata):
  456. # POST url.
  457. uploadurl = "https://jpopsuki.eu/upload.php"
  458. # Dataset containing all of the information obtained from our FLAC files.
  459. data = releasedata
  460. if debug:
  461. print('_' * 100)
  462. print('Release Data:\n')
  463. print(releasedata)
  464. try:
  465. postDataFiles = {
  466. 'file_input': open(torrent, 'rb'),
  467. 'userfile': open(cover, 'rb')
  468. }
  469. except FileNotFoundError:
  470. print("_" * 100)
  471. print('File not found!\nPlease confirm file locations and names. Cover image or .torrent file could not be found')
  472. sys.exit()
  473. # If dryrun argument has not ben passed we will POST the results to JPopSuki.
  474. if dryrun != True:
  475. JPSres = j.retrieveContent(uploadurl, "post", data, postDataFiles)
  476. print('\nUpload POSTED')
  477. ## TODO Filter through JPSres.text and create error handling based on responses
  478. #print(JPSres.text)
  479. # Function for transferring the contents of the torrent as well as the torrent.
  480. def ftp_transfer(fileSource, fileDestination, directory, folder_name, watch_folder):
  481. # Create session
  482. session = ftplib.FTP(cfg['ftp_prefs']['ftp_server'],cfg['ftp_prefs']['ftp_username'],cfg['ftp_prefs']['ftp_password'])
  483. # Set session encoding to utf-8 so we can properly handle hangul/other special characters
  484. session.encoding='utf-8'
  485. # Successful FTP Login Print
  486. print("_" * 100)
  487. print("FTP Login Successful")
  488. print(f"Server Name: {cfg['ftp_prefs']['ftp_server']} : Username: {cfg['ftp_prefs']['ftp_username']}\n")
  489. if cfg['ftp_prefs']['add_to_downloads_folder']:
  490. # Create folder based on the directory name of the folder within the torrent.
  491. try:
  492. session.mkd(f"{fileDestination}/{folder_name}")
  493. print(f'Created directory {fileDestination}/{folder_name}')
  494. except ftplib.error_perm:
  495. pass
  496. # Notify user we are beginning the transfer.
  497. print(f"Beginning transfer...")
  498. # Set current folder to the users preferred destination
  499. session.cwd(f"{fileDestination}/{folder_name}")
  500. # Transfer each file in the chosen directory
  501. for file in os.listdir(directory):
  502. with open(f"{directory}/{file}",'rb') as f:
  503. filesize = os.path.getsize(f"{directory}/{file}")
  504. ## Transfer file
  505. # tqdm used for better user feedback.
  506. with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = f'Uploading [{file}]', total = filesize) as tqdm_instance:
  507. session.storbinary('STOR ' + file, f, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
  508. print(f"{file} | Complete!")
  509. f.close()
  510. if cfg['ftp_prefs']['add_to_watch_folder']:
  511. with open(fileSource,'rb') as t:
  512. # Set current folder to watch directory
  513. session.cwd(watch_folder)
  514. ## Transfer file
  515. # We avoid tqdm here due to the filesize of torrent files.
  516. # Most connections will upload these within 1-3s, resulting in near useless progress bars.
  517. session.storbinary(f"STOR {torrentfile}", t)
  518. print(f"{torrentfile} | Sent to watch folder!")
  519. t.close()
  520. # Quit session when complete.
  521. session.quit()
  522. def localfileorganization(torrent, directory, watch_folder, downloads_folder):
  523. # Move torrent directory to downloads_folder
  524. if cfg['local_prefs']['add_to_downloads_folder']:
  525. try:
  526. os.mkdir(os.path.join(downloads_folder, os.path.basename(directory)))
  527. except FileExistsError:
  528. pass
  529. copytree(directory, os.path.join(downloads_folder, os.path.basename(directory)))
  530. shutil.rmtree(directory)
  531. if cfg['local_prefs']['add_to_watch_folder']:
  532. os.rename(torrent, f"{watch_folder}/{torrent}")
  533. if __name__ == "__main__":
  534. asciiart()
  535. args = getargs()
  536. # TODO consider calling args[] directly, we will then not need this line
  537. dryrun = freeleech = tags = directory = debug = None
  538. directory = args.directory
  539. additional_tags = args.tags
  540. if args.dryrun:
  541. dryrun = True
  542. if args.debug:
  543. debug = True
  544. if args.freeleech:
  545. freeleech = True
  546. # Load login credentials from JSON and use them to create a login session.
  547. with open(f'json_data/config.json') as f:
  548. cfg = json.load(f)
  549. loginData = {'username': cfg['credentials']['username'], 'password': cfg['credentials']['password']}
  550. loginUrl = "https://jpopsuki.eu/login.php"
  551. loginTestUrl = "https://jpopsuki.eu"
  552. successStr = "Latest 5 Torrents"
  553. # j is an object which can be used to make requests with respect to the loginsession
  554. j = jpspy.MyLoginSession(loginUrl, loginData, loginTestUrl, successStr, debug=args.debug)
  555. # Acquire authkey
  556. authkey = getauthkey()
  557. # Gather data of FLAC file
  558. releasedata = gatherdata(directory)
  559. # Folder_name equals the last folder in the path, this is used to rename .torrent files to something relevant.
  560. folder_name = os.path.basename(os.path.normpath(directory))
  561. # Identifying cover.jpg path
  562. cover_path = directory + "/" + cfg['local_prefs']['cover_name']
  563. # Create torrent file.
  564. torrentfile = createtorrent(authkey, directory, folder_name, releasedata)
  565. # Upload torrent to JPopSuki
  566. uploadtorrent(torrentfile, cover_path, releasedata)
  567. # Setting variable for watch/download folders
  568. ftp_watch_folder = cfg['ftp_prefs']['ftp_watch_folder']
  569. ftp_downloads_folder = cfg['ftp_prefs']['ftp_downloads_folder']
  570. local_watch_folder = cfg['local_prefs']['local_watch_folder']
  571. local_downloads_folder = cfg['local_prefs']['local_downloads_folder']
  572. if cfg['ftp_prefs']['enable_ftp']:
  573. ftp_transfer(fileSource=torrentfile, fileDestination=ftp_downloads_folder, directory=directory, folder_name=folder_name, watch_folder=ftp_watch_folder)
  574. if cfg['local_prefs']['add_to_watch_folder'] or cfg['local_prefs']['add_to_downloads_folder']:
  575. localfileorganization(torrent=torrentfile, directory=directory, watch_folder=local_watch_folder, downloads_folder=local_downloads_folder)