Based off JPS-AU
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

720 regels
28 KiB

  1. # Standard library packages
  2. import re
  3. import os
  4. import sys
  5. import shutil
  6. import string
  7. import argparse
  8. import html
  9. from urllib.parse import urlparse
  10. import json
  11. import ftplib
  12. # Third-party packages
  13. import requests
  14. from bs4 import BeautifulSoup
  15. from mutagen.flac import FLAC
  16. from mutagen.mp3 import MP3
  17. from torf import Torrent
  18. from tqdm import tqdm
  19. from langdetect import detect
  20. # JPS-AU files
  21. import smpy
  22. def asciiart ():
  23. print("""
  24. ███████╗███╗ ███╗ █████╗ ██╗ ██╗
  25. ██╔════╝████╗ ████║ ██╔══██╗██║ ██║
  26. ███████╗██╔████╔██║█████╗███████║██║ ██║
  27. ╚════██║██║╚██╔╝██║╚════╝██╔══██║██║ ██║
  28. ███████║██║ ╚═╝ ██║ ██║ ██║╚██████╔╝
  29. ╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝
  30. """)
  31. # Get arguments using argparse
  32. def getargs():
  33. parser = argparse.ArgumentParser()
  34. parser.add_argument('-dir', '--directory', help='Initiate upload on directory', nargs='?', required=True)
  35. parser.add_argument("-f", "--freeleech", help="Enables freeleech", action="store_true")
  36. parser.add_argument("-t", "--tags", help="Add additional tags to the upload", nargs='?')
  37. parser.add_argument('-d', '--debug', help='Enable debug mode', action='store_true')
  38. parser.add_argument("-dry", "--dryrun", help="Dryrun will carry out all actions other than the actual upload to JPS.", action="store_true")
  39. parser.add_argument("-i", "--imageURL", help='Set the torrent cover URL', nargs='?')
  40. return parser.parse_args()
  41. # Acquire the authkey used for torrent files from upload.php
  42. def getauthkey():
  43. """
  44. Get SM session authkey for use by uploadtorrent() data dict.
  45. Uses SM login data
  46. :return: authkey
  47. """
  48. smpage = sm.retrieveContent("https://sugoimusic.me/torrents.php?id=118") # Arbitrary page on JPS that has authkey
  49. soup = BeautifulSoup(smpage.text, 'html5lib')
  50. rel2 = str(soup.select('#content .thin .main_column .torrent_table tbody'))
  51. authkey = re.findall('authkey=(.*)&torrent_pass=', rel2)
  52. return authkey
  53. def copytree(src, dst, symlinks=False, ignore=None):
  54. for item in os.listdir(src):
  55. s = os.path.join(src, item)
  56. d = os.path.join(dst, item)
  57. if os.path.isdir(s):
  58. shutil.copytree(s, d, symlinks, ignore)
  59. else:
  60. shutil.copy2(s, d)
  61. # Creates torrent file using torf module.
  62. def createtorrent(authkey, directory, filename, releasedata):
  63. t = Torrent(path=directory,
  64. trackers=[authkey]) # Torf requires we store authkeys in a list object. This makes it easier to add multiple announce urls.
  65. # Set torrent to private as standard practice for private trackers
  66. t.private = True
  67. t.source = "SugoiMusic"
  68. t.generate()
  69. ## Format releasedata to bring a suitable torrent name.
  70. # The reason we don't just use the directory name is because of an error in POSTING.
  71. # POSTS do not seem to POST hangul/jp characters alongside files.
  72. # filename = f"{releasedata['idols[]']} - {releasedata['title']} [{releasedata['media']}-{releasedata['audioformat']}].torrent"
  73. filename = f"{releasedata['title']} [{releasedata['media']}-{releasedata['audioformat']}].torrent"
  74. filename = filename.replace("/","")
  75. try:
  76. t.write(filename)
  77. print("_" * 100)
  78. print("Torrent creation:\n")
  79. print(f"{filename} has been created.")
  80. except:
  81. print("_" * 100)
  82. print("Torrent creation:\n")
  83. os.remove(filename)
  84. print(f"{filename} already exists, existing torrent will be replaced.")
  85. t.write(filename)
  86. print(f"{filename} has been created.")
  87. return filename
  88. # Reads FLAC file and returns metadata.
  89. def readflac(filename):
  90. read = FLAC(filename)
  91. # Create dict containing all meta fields we'll be using.
  92. tags={
  93. "ALBUM": read.get('album'),
  94. "ALBUMARTIST": read.get('albumartist'),
  95. "ARTIST": read.get('artist'),
  96. "DATE": read.get('date')[0],
  97. "GENRE": "",#read.get('genre'),
  98. "TITLE": read.get('title'),
  99. "COMMENT": read.get('comment'),
  100. "TRACKNUMBER": read.get('tracknumber')[0].zfill(2),
  101. "DISCNUMBER": read.get('discnumber')}
  102. # Not further looked into this but some FLACs hold a grouping key of contentgroup instead of grouping.
  103. tags['GROUPING'] = read.get('grouping')
  104. ## If grouping returns None we check contentgroup.
  105. # If it still returns none we will ignore it and handle on final checks
  106. if tags['GROUPING'] == None:
  107. tags['GROUPING'] = read.get('contentgroup')
  108. required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
  109. for k,v in tags.items():
  110. if v == None:
  111. if k in required_tags:
  112. print(f"{k} has returned {v}, this is a required tag")
  113. sys.exit()
  114. return tags
  115. # Reads MP3 file and returns metadata.
  116. def readmp3(filename):
  117. read = MP3(filename)
  118. # Create dict containing all meta fields we'll be using.
  119. tags={
  120. "ALBUM": read.get('TALB'), # Album Title
  121. "ALBUMARTIST": read.get('TPE2'), # Album Artist
  122. "ARTIST": read.get('TPE1'), # Track Artist
  123. "DATE": str(read.get('TDRC')), # Date YYYYMMDD (Will need to add a try/except for other possible identifiers)
  124. "GENRE": read.get('TCON').text, # Genre
  125. "TITLE": read.get('TIT2'), # Track Title
  126. "COMMENT": read.get('COMM::eng'), # Track Comment
  127. "GROUPING": read.get('TIT1'), # Grouping
  128. "TRACKNUMBER": re.sub(r"\/.*", "", str(read.get('TRCK'))).zfill(2), # Tracknumber (Format #/Total) Re.sub removes /#
  129. "DISCNUMBER": re.sub(r"\/.*", "", str(read.get('TPOS')))} # Discnumber (Format #/Total) Re.sub removes /#
  130. required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
  131. for k,v in tags.items():
  132. if v == None:
  133. if k in required_tags:
  134. print(f"{k} has returned {v}, this is a required tag")
  135. sys.exit()
  136. return tags
  137. # Generates new log file based on directory contents
  138. def generatelog(track_titles, log_filename, log_directory):
  139. # Seperate each tracklist entry in the list with a newline
  140. track_titles = '\n'.join([str(x) for x in track_titles])
  141. # Format tracklist layout
  142. log_contents = f"""[size=5][b]Tracklist[/b][/size]\n{track_titles}
  143. """
  144. # If we have chosen to save the tracklist then we write log_contents to a .log file within the log directory specified
  145. if cfg['local_prefs']['save_tracklist']:
  146. # Write to {album_name}.log
  147. with open(f"{log_directory}/{log_filename}.log", "w+") as f:
  148. f.write(log_contents)
  149. # Reset position to first line and read
  150. f.seek(0)
  151. log_contents = f.read()
  152. f.close()
  153. # If debug mode is enabled we will print the log contents.
  154. if debug:
  155. print("_" * 100)
  156. print(f"Log Contents/Tracklisting: {log_contents}")
  157. return log_contents
  158. def readlog(log_name, log_directory):
  159. with open(f"{log_directory}/{log_name}.log", "r+") as f:
  160. log_contents = f.read()
  161. f.close()
  162. return log_contents
  163. def add_to_hangul_dict(hangul , english , category):
  164. hangul = str(hangul)
  165. english = str(english)
  166. categories = ['version','general','artist','genres', 'label', 'distr']
  167. file = f"json_data/dictionary.json"
  168. json_file = open(file, 'r', encoding='utf-8', errors='ignore')
  169. dictionary = json.load(json_file)
  170. json_file.close()
  171. new = dict()
  172. for cats in dictionary:
  173. #== Create the categories in the new temp file
  174. new[cats] = dict()
  175. for key,value in dictionary[cats].items():
  176. #== List all the old items into the new dict
  177. new[cats][key] = value
  178. if hangul in new[category].keys():
  179. if new[category].get(hangul) is None:
  180. if english != 'None':
  181. new[category][hangul] = english
  182. else:
  183. #== Only update if English word has been supplied ==#
  184. if english != 'None':
  185. new[category][hangul] = english
  186. else:
  187. if english == 'None':
  188. new[category][hangul] = None
  189. else:
  190. new[category][hangul] = english
  191. json_write = open(file, 'w+', encoding='utf-8')
  192. json_write.write(json.dumps(new, indent=4, ensure_ascii=False))
  193. json_write.close()
  194. def translate(string, category, result=None, output=None):
  195. file = "json_data/dictionary.json"
  196. with open(file, encoding='utf-8', errors='ignore') as f:
  197. dictionary = json.load(f, strict=False)
  198. category = str(category)
  199. string = str(string)
  200. search = dictionary[category]
  201. string = string.strip()
  202. if string == 'Various Artists':
  203. output = ['Various Artists',None]
  204. else:
  205. #== NO NEED TO SEARCH - STRING HAS HANGUL+ENGLISH or HANGUL+HANGUL ==#
  206. if re.search("\((?P<inside>.*)\)", string):
  207. #== Complete translation, add to dictionary with both values ==#
  208. #== Contains parentheses, need to split
  209. parenthesis = string.split("(")
  210. pre_parenthesis = parenthesis[0].strip()
  211. in_parenthesis = parenthesis[1].replace(")","").strip()
  212. #== Check the order of the parentheses ==#
  213. if re.search("[^\u0000-\u007F]+",pre_parenthesis) and re.search("[^\u0000-\u007F]+",in_parenthesis):
  214. #== Both hangul
  215. first = 'kr'
  216. second = 'kr'
  217. else:
  218. if re.search("[^\u0000-\u007F]+",pre_parenthesis):
  219. first = 'kr'
  220. second = 'eng'
  221. else:
  222. first = 'eng'
  223. second = 'kr'
  224. if first == 'kr' and second == 'eng':
  225. #== Hangul first ==#
  226. hangul = pre_parenthesis
  227. english = in_parenthesis
  228. add_to_hangul_dict(hangul,english,category)
  229. elif first == 'eng' and second == 'kr':
  230. #== English first ==#
  231. hangul = in_parenthesis
  232. english = pre_parenthesis
  233. add_to_hangul_dict(hangul,english,category)
  234. elif first == 'kr' and second == 'kr':
  235. #== Both Hangul ==#
  236. hangul = pre_parenthesis
  237. english = None
  238. add_to_hangul_dict(pre_parenthesis,None,category)
  239. add_to_hangul_dict(hangul,None,category)
  240. else:
  241. #== Both English
  242. hangul = None
  243. english = pre_parenthesis
  244. output = [hangul,english]
  245. #== No parentheses - HANGUL
  246. else:
  247. #== If the input string is a full Hangul word - check dictionary and then add if necessary)
  248. if re.search("[^\u0000-\u007F]+", string):
  249. if string in search.keys():
  250. #== yes
  251. if search.get(string) is None:
  252. #== If the keyword does not have a translation, add it to the dictionary ==#
  253. output = [string,None]
  254. else:
  255. #== Translation already exists, output the result in a list ==#
  256. output = [string,search.get(string)]
  257. else:
  258. output = [string,None]
  259. add_to_hangul_dict(string, None, category)
  260. #== Full English name -- leave it
  261. else:
  262. for key,value in search.items():
  263. if key == string:
  264. output = [value,string]
  265. break
  266. else:
  267. output = [string,string]
  268. return output
  269. def gatherdata(directory):
  270. # Lists for storing some
  271. list_album_artists = []
  272. list_track_artists = []
  273. list_album = []
  274. list_genre = []
  275. translated_genre = []
  276. translated_album_artists = []
  277. tracklist_entries = []
  278. # Creation of releasedata dict, this will store formatted meta used for the POST.
  279. releasedata = {}
  280. ## Set no log as default value.
  281. # This will be set to True is a .log file is found, in turn this will allow us to determine if WEB or CD.
  282. log_available = False
  283. flac_present = False
  284. mp3_present = False
  285. # Read directory contents, grab metadata of .FLAC files.
  286. for file in os.listdir(directory):
  287. file_location = os.path.join(directory, file)
  288. if file.endswith(".flac"):
  289. # Read FLAC file to grab meta
  290. tags = readflac(file_location)
  291. flac_present = True
  292. # If Discnumber isn't present then we omit it from the tracklist entry
  293. if tags['DISCNUMBER'] == None:
  294. tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  295. else:
  296. tracklist_entry = f"[b]{tags['DISCNUMBER'][0]}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  297. tracklist_entries.append(tracklist_entry)
  298. if debug:
  299. print ("_" * 100)
  300. print(f"Tags for {file}:\n{tags}")
  301. if file.endswith(".mp3"):
  302. # Read MP3 file to grab meta
  303. tags = readmp3(file_location)
  304. mp3_present = True
  305. # If Discnumber isn't present then we omit it from the tracklist entry
  306. if tags['DISCNUMBER'] == "None":
  307. tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  308. else:
  309. tracklist_entry = f"[b]{tags['DISCNUMBER']}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
  310. tracklist_entries.append(tracklist_entry)
  311. if debug:
  312. print ("_" * 100)
  313. print(f"Tags for {file}:\n{tags}")
  314. # If only one genre in list attempt to split as there's likely more.
  315. if len(tags['GENRE']) == 1:
  316. tags['GENRE'] = tags['GENRE'][0].split(";")
  317. for aa in tags['ALBUMARTIST']:
  318. list_album_artists.append(aa)
  319. for a in tags['ARTIST']:
  320. list_track_artists.append(a)
  321. list_album.append(tags['ALBUM'][0])
  322. # for g in tags['GENRE']:
  323. # list_genre.append(g)
  324. # Check files to make sure there's no multi-format.
  325. if flac_present:
  326. format = 'FLAC'
  327. bitrate = 'Lossless'
  328. if mp3_present:
  329. format = 'MP3'
  330. bitrate = '320'
  331. if flac_present and mp3_present:
  332. print("Mutt detected, exiting.")
  333. sys.exit()
  334. if file.endswith(".log"):
  335. log_available = True
  336. if log_available == True:
  337. media = 'CD'
  338. else:
  339. media = 'Web'
  340. # Load Dict.json for translations
  341. file = "json_data/dictionary.json"
  342. with open(file, encoding='utf-8', errors='ignore') as f:
  343. dictionary = json.load(f, strict=False)
  344. # Split additional genre's at comma and append to existing genre tags
  345. if additional_tags != None:
  346. split_tags = additional_tags.split(",")
  347. for s in split_tags:
  348. list_genre.append(s)
  349. # Translate genre's using dict and append to translated_genre
  350. for g in set(list_genre):
  351. translation = translate(g, "genres")[0]
  352. translated_genre.append(translation)
  353. # Translate artist's using dict and append to translated_album_artists
  354. for a in set(list_album_artists):
  355. if tags['ALBUMARTIST'][0] == 'Various Artists':
  356. translated_artist_name = 'V.A.'
  357. translated_album_artists.append("V.A.")
  358. else:
  359. translated_artist_name = translate(string=tags['ALBUMARTIST'][0], category="artist")
  360. translated_album_artists.append(translated_artist_name[0])
  361. ## Identify unique values using sets.
  362. unique_album_artists = ','.join(set(translated_album_artists))
  363. unique_track_artists = ','.join(set(list_track_artists))
  364. unique_genre = ','.join(set(translated_genre))
  365. unique_album = set(list_album)
  366. ## Acquire contents of our log file to be used for album description
  367. # Comments store the album id which matches our log names, so we can use the comment tag to find our album descriptions.
  368. log_directory = cfg['local_prefs']['log_directory']
  369. # Album description taken from log file.
  370. if cfg['local_prefs']['generate_tracklist']:
  371. log_filename = f"{unique_album_artists} - {tags['ALBUM'][0]}"
  372. album_description = generatelog(tracklist_entries, log_filename, log_directory)
  373. else:
  374. log_filename = tags['COMMENT'][0]
  375. album_description = readlog(log_filename, log_directory)
  376. ## If release description is enabled we apply comments to the bugs album url
  377. # Note that this is dependant on the album being sourced from bugs so should be changed per user.
  378. if cfg['local_prefs']['enable_release_description']:
  379. try:
  380. release_description = f"Sourced from [url=https://music.bugs.co.kr/album/{tags['COMMENT'][0]}]Bugs[/url]"
  381. # If any exceptions occur we will return to no release description
  382. except:
  383. release_description = ""
  384. # If release description is not enabled we will use no release description
  385. else:
  386. release_description = ""
  387. ## Assign all our unique values into releasedata{}. We'll use this later down the line for POSTING.
  388. # POST values can be found by inspecting JPS HTML
  389. releasedata['submit'] = 'true'
  390. # List of accepted upload types
  391. accepted_types = ['Album', 'Single', 'EP']
  392. # If type errors then we ask for user input
  393. try:
  394. releasedata['type'] = translate(tags['GROUPING'][0], "release_types")[0]
  395. except TypeError:
  396. releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping is empty or has received an error, please enter manually (Album/Single/EP)\n")
  397. # If type is still not in accepted_types we ask for user input again and do not break loop until correct
  398. if releasedata['type'] not in accepted_types:
  399. while True:
  400. releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping tag did not return an album type, please enter manually (Album/Single/EP)\n")
  401. if releasedata['type'] not in accepted_types:
  402. continue
  403. else:
  404. break
  405. # SM uses numbers for it's types
  406. if releasedata['type'] == "Album":
  407. releasedata['type'] = 0
  408. elif releasedata['type'] == "Single":
  409. releasedata['type'] = 2
  410. else: # EP type
  411. releasedata['type'] = 1
  412. releasedata['title'] = tags['ALBUM'][0]
  413. releasedata['idols[]'] = unique_album_artists
  414. # If the value of album artist and artist is the same, we don't need to POST original artist.
  415. if unique_album_artists != unique_track_artists:
  416. releasedata['artist_jp'] = unique_track_artists
  417. #re.sub removes any date separators, jps doesn't accept them
  418. releasedata['year'] = re.sub(r"[^0-9]", "", tags['DATE'])
  419. releasedata['audioformat'] = format
  420. releasedata['bitrate'] = bitrate
  421. releasedata['media'] = media
  422. releasedata['album_desc'] = album_description
  423. releasedata['release_desc'] = release_description
  424. releasedata['tags'] = unique_genre
  425. # Enable freeleech if arg is passed
  426. if freeleech:
  427. releasedata['freeleech'] = "true"
  428. ## Language Checks
  429. # This is a required check as we don't want to enter non-english/romaji characters into the title/artist field.
  430. en = detectlanguage(releasedata['title'])
  431. if debug:
  432. print("_" * 100)
  433. print("Title/Artist Language:\n")
  434. print(f"{releasedata['title']} < English = {en}")
  435. if en == False:
  436. input_english_title = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Please enter the romaji/english title:\n")
  437. # Create new key called title_jp and assign the old title to it
  438. releasedata['title_jp'] = releasedata['title']
  439. # Replace title with the user input.
  440. releasedata['title'] = input_english_title
  441. en = detectlanguage(releasedata['idols[]'])
  442. if debug:
  443. print(f"{releasedata['idols[]']} < English = {en}")
  444. if en == False:
  445. input_english_artist = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Separate multiple main artists with \",\". Please enter the romaji/english artist name:\n")
  446. input_english_artist = [x.strip() for x in input_english_artist.split(',')]
  447. releasedata['idols[]'] = input_english_artist
  448. input_english_contributing_artist = input("\n" + "_" * 100 + "\nSeparate multiple contributing artists with \",\". Press enter to skip. Please enter the romaji/english artist name:\n")
  449. if input_english_contributing_artist != "":
  450. input_english_contributing_artist = [x.strip() for x in input_english_contributing_artist.split(',')]
  451. releasedata['contrib_artists[]'] = input_english_contributing_artist
  452. return releasedata
  453. # Simple function to split a string up into characters
  454. def split(word):
  455. return [char for char in word]
  456. def detectlanguage(string):
  457. ## Language Detect
  458. # This is a required check as we don't want to enter non-english/romaji characters into the title field.
  459. characters = split(string)
  460. language_list = []
  461. for c in characters:
  462. try:
  463. language = detect(c)
  464. language_list.append(language)
  465. except:
  466. langauge = "error"
  467. if 'ko' or 'ja' in language_list:
  468. en = False
  469. else:
  470. en = True
  471. return en
  472. def uploadtorrent(torrent, imageURL, releasedata):
  473. # POST url.
  474. uploadurl = "https://sugoimusic.me/upload.php"
  475. # Dataset containing all of the information obtained from our FLAC files.
  476. data = releasedata
  477. data['image'] = imageURL
  478. if not dryrun:
  479. data['auth'] = authkey
  480. if debug:
  481. print('_' * 100)
  482. print('Release Data:\n')
  483. print(releasedata)
  484. try:
  485. postDataFiles = {
  486. 'file_input': open(torrent, 'rb')
  487. #'userfile': open(cover, 'rb')
  488. }
  489. except FileNotFoundError:
  490. print("_" * 100)
  491. print('File not found!\nPlease confirm file locations and names. Cover image or .torrent file could not be found')
  492. sys.exit()
  493. # If dryrun argument has not ben passed we will POST the results to JPopSuki.
  494. if dryrun != True:
  495. JPSres = sm.retrieveContent(uploadurl, "post", data, postDataFiles)
  496. print('\nUpload POSTED. It may take a moment for this upload to appear on SugoiMusic.')
  497. ## TODO Filter through JPSres.text and create error handling based on responses
  498. #print(JPSres.text)
  499. # Function for transferring the contents of the torrent as well as the torrent.
  500. def ftp_transfer(fileSource, fileDestination, directory, folder_name, watch_folder):
  501. # Create session
  502. session = ftplib.FTP(cfg['ftp_prefs']['ftp_server'],cfg['ftp_prefs']['ftp_username'],cfg['ftp_prefs']['ftp_password'])
  503. # Set session encoding to utf-8 so we can properly handle hangul/other special characters
  504. session.encoding='utf-8'
  505. # Successful FTP Login Print
  506. print("_" * 100)
  507. print("FTP Login Successful")
  508. print(f"Server Name: {cfg['ftp_prefs']['ftp_server']} : Username: {cfg['ftp_prefs']['ftp_username']}\n")
  509. if cfg['ftp_prefs']['add_to_downloads_folder']:
  510. # Create folder based on the directory name of the folder within the torrent.
  511. try:
  512. session.mkd(f"{fileDestination}/{folder_name}")
  513. print(f'Created directory {fileDestination}/{folder_name}')
  514. except ftplib.error_perm:
  515. pass
  516. # Notify user we are beginning the transfer.
  517. print(f"Beginning transfer...")
  518. # Set current folder to the users preferred destination
  519. session.cwd(f"{fileDestination}/{folder_name}")
  520. # Transfer each file in the chosen directory
  521. for file in os.listdir(directory):
  522. with open(f"{directory}/{file}",'rb') as f:
  523. filesize = os.path.getsize(f"{directory}/{file}")
  524. ## Transfer file
  525. # tqdm used for better user feedback.
  526. with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = f'Uploading [{file}]', total = filesize) as tqdm_instance:
  527. session.storbinary('STOR ' + file, f, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
  528. print(f"{file} | Complete!")
  529. f.close()
  530. if cfg['ftp_prefs']['add_to_watch_folder']:
  531. with open(fileSource,'rb') as t:
  532. # Set current folder to watch directory
  533. session.cwd(watch_folder)
  534. ## Transfer file
  535. # We avoid tqdm here due to the filesize of torrent files.
  536. # Most connections will upload these within 1-3s, resulting in near useless progress bars.
  537. session.storbinary(f"STOR {torrentfile}", t)
  538. print(f"{torrentfile} | Sent to watch folder!")
  539. t.close()
  540. # Quit session when complete.
  541. session.quit()
  542. def localfileorganization(torrent, directory, watch_folder, downloads_folder):
  543. # Move torrent directory to downloads_folder
  544. if cfg['local_prefs']['add_to_downloads_folder']:
  545. try:
  546. os.mkdir(os.path.join(downloads_folder, os.path.basename(directory)))
  547. except FileExistsError:
  548. pass
  549. copytree(directory, os.path.join(downloads_folder, os.path.basename(directory)))
  550. shutil.rmtree(directory)
  551. if cfg['local_prefs']['add_to_watch_folder']:
  552. os.rename(torrent, f"{watch_folder}/{torrent}")
  553. if __name__ == "__main__":
  554. asciiart()
  555. args = getargs()
  556. # TODO consider calling args[] directly, we will then not need this line
  557. dryrun = freeleech = tags = directory = debug = imageURL = None
  558. directory = args.directory
  559. additional_tags = args.tags
  560. if args.dryrun:
  561. dryrun = True
  562. if args.debug:
  563. debug = True
  564. if args.freeleech:
  565. freeleech = True
  566. if args.imageURL:
  567. imageURL = args.imageURL
  568. # Load login credentials from JSON and use them to create a login session.
  569. with open(f'json_data/config.json') as f:
  570. cfg = json.load(f)
  571. loginData = {'username': cfg['credentials']['username'], 'password': cfg['credentials']['password']}
  572. loginUrl = "https://sugoimusic.me/login.php"
  573. loginTestUrl = "https://sugoimusic.me"
  574. successStr = "Enabled users"
  575. passkey = cfg['credentials']['passkey']
  576. annouceurl = "https://tracker.sugoimusic.me:24601/"+passkey+"/announce"
  577. # j is an object which can be used to make requests with respect to the loginsession
  578. sm = smpy.MyLoginSession(loginUrl, loginData, loginTestUrl, successStr, debug=args.debug)
  579. # Acquire authkey
  580. authkey = getauthkey()
  581. # Gather data of FLAC file
  582. releasedata = gatherdata(directory)
  583. # Folder_name equals the last folder in the path, this is used to rename .torrent files to something relevant.
  584. folder_name = os.path.basename(os.path.normpath(directory))
  585. # Identifying cover.jpg path
  586. # cover_path = directory + "/" + cfg['local_prefs']['cover_name']
  587. # Create torrent file.
  588. #torrentfile = createtorrent(authkey, directory, folder_name, releasedata)
  589. torrentfile = createtorrent(annouceurl, directory, folder_name, releasedata)
  590. # Upload torrent to JPopSuki
  591. uploadtorrent(torrentfile, imageURL, releasedata)
  592. # Setting variable for watch/download folders
  593. ftp_watch_folder = cfg['ftp_prefs']['ftp_watch_folder']
  594. ftp_downloads_folder = cfg['ftp_prefs']['ftp_downloads_folder']
  595. local_watch_folder = cfg['local_prefs']['local_watch_folder']
  596. local_downloads_folder = cfg['local_prefs']['local_downloads_folder']
  597. if not dryrun:
  598. if cfg['ftp_prefs']['enable_ftp']:
  599. ftp_transfer(fileSource=torrentfile, fileDestination=ftp_downloads_folder, directory=directory, folder_name=folder_name, watch_folder=ftp_watch_folder)
  600. if cfg['local_prefs']['add_to_watch_folder'] or cfg['local_prefs']['add_to_downloads_folder']:
  601. localfileorganization(torrent=torrentfile, directory=directory, watch_folder=local_watch_folder, downloads_folder=local_downloads_folder)