Browse Source

initial commit

pull/2/head
sharky555 3 years ago
commit
91aa04df3f
15 changed files with 2023 additions and 0 deletions
  1. +4
    -0
      .gitignore
  2. +82
    -0
      README.md
  3. +1
    -0
      copycommand.txt
  4. +0
    -0
     
  5. +63
    -0
      jps2sm/constants.py
  6. +290
    -0
      jps2sm/get_data.py
  7. +199
    -0
      jps2sm/mediainfo.py
  8. +178
    -0
      jps2sm/myloginsession.py
  9. +124
    -0
      jps2sm/utils.py
  10. +223
    -0
      jps2sm/validation.py
  11. +27
    -0
      json_data/config.json.example
  12. +244
    -0
      json_data/dictionary.json
  13. +9
    -0
      requirements.txt
  14. +117
    -0
      smpy.py
  15. +462
    -0
      upload.py

+ 4
- 0
.gitignore View File

@@ -0,0 +1,4 @@
__pycache__
config.json
*session.dat
*.torrent

+ 82
- 0
README.md View File

@@ -0,0 +1,82 @@
Rewriting for use for uploading video torrents to sugoimusic
## Overview
**SM-AU-TV** is a tool for automating the uploading process on sugoimusic.me.
**Features:**
- SM Client.
- FTP Support
**Installation:**
- Install requirements
```
pip install -r requirements.txt
```
## Command Usage
```
python upload.py {command_name}
```
Use the copycommand.txt file to easily copy and paste your command.
Command | Description
------------- | -------------
-h, --help | show help message and exit
-i [INPUT], --input [INPUT] | Initiate upload on input file. This must be added as an argument.
-d, --debug | Enable debug mode.
-dry, --dryrun | Dryrun will carry out all actions other than the actual upload to SM.
-a [ARTISTS], --artists [ARTISTS] | Set the artists. (Romaji\English). Split multiple with ","
-oa [ORIGINALARTIST], --originalartist [ORIGINALARTIST] | Set the artist. (Original Language)
-ca [CONTRIBUTINGARTISTS], --contributingartists [CONTRIBUTINGARTISTS] | Set the contributing artists. (Romaji\English). Split multiple with ","
-ti [TITLE], --title [TITLE] | Set the title. (Romaji\English)
-oti [ORIGINALTITLE], --originaltitle [ORIGINALTITLE] | Set the title. (Original Language)
-des [DESCRIPTION], --description [DESCRIPTION] | Add a torrent description. This must be added as an argument.
-t [TAGS], --tags [TAGS] | Add additional tags to the upload. At least 2 tags are required
-im [IMAGEURL], --imageURL [IMAGEURL] | Set the torrent cover URL.
-ms [MEDIASOURCE], --mediasource [MEDIASOURCE] | Set the media source.
-rt [RELEASETYPE], --releasetype [RELEASETYPE] | Set the release type.
-s [SUB], --sub [SUB] | Set the subtitle type.
-l [LANGUAGE], --language [LANGUAGE] | Set the language
-y [YEAR], --year [YEAR] | Set the torrent year (YYYYMMDD or YYYY).
-f, --freeleech | Enables freeleech.
## Config.json
- It's not recommended to use both local watch/download folders and ftp watch/download folders at the same time as it will result in seeding from 2 locations.
- If generate_tracklist is set to false the script will try and find BugsPy logs within the selected log folder, this will use the comments tag to find the log. For example if 204815 is within your comments tag, it will search your log folder for 204815.log and use the contents of this log for the album description.
**credentials:**
Config | Description | Example
------------- | ------------- | -------------
Username | SugoiMusic Username | Slyy
Password | SugoiMusic Password | Password
Passkey | SugoiMusic Passkey | Passkey
**local_prefs**
Config | Description | Example
------------- | ------------- | -------------
log_directory | directory containing BugsPy log files | `Z:/Bugs/Logs`
generate_tracklist | enable tracklist generation | `true/false`
save_tracklist | write tracklist to .log and save in log folder | `true/false`
enable_release_description | post comments tag to release description | `true/false`
cover_name | name of cover with extension | `cover.jpg`
add_to_watch_folder | moves .torrent file to local watch folder | `true/false`
add_to_downloads_folder | moves torrent data to local downloads folder | `true/false`
local_watch_folder | directory of local watch folder | `Z:/watch/Transmission`
local_downloads_folder | directory of local downloads folder | `Z:/downloads`
**ftp_prefs:**
Config | Description | Example
------------- | ------------- | -------------
enable_ftp | enable ftp mode, if enabled suggested to disable local watch and downloads folders | `true/false`
add_to_watch_folder | transfer .torrent file to watch folder on FTP server | `true/false`
add_to_downloads_folder | transfer torrent data to downloads folder on FTP server | `true/false`
ftp_server | url of ftp server | bestboxever.seedhost.eu
ftp_username | username of ftp account | slyy
ftp_password | password of ftp account | password
ftp_watch_folder | directory of ftp watch folder | `/downloads/watch/transmission`
ftp_downloads_folder | directory of ftp downloads folder | `/downloads`

+ 1
- 0
copycommand.txt View File

@@ -0,0 +1 @@
python upload.py -i "C:\path to file" --artists "" --title "" --tags "" --mediasource "HDTV" --releasetype "PV" --sub "NoSubs" --language "Japanese" --year "" --description "a long description!!!!"

+ 0
- 0
View File


+ 63
- 0
jps2sm/constants.py View File

@@ -0,0 +1,63 @@
class VideoOptions:
"""
Store Video option constants
"""
VideoMedias = ('DVD', 'Blu-Ray', 'VHS', 'VCD', 'TV', 'HDTV', 'WEB')
badcontainers = ('ISO', 'VOB', 'MPEG', 'AVI', 'MKV', 'WMV', 'MP4')
badcodecs = ('MPEG2', 'h264')
badformats = badcontainers + badcodecs
resolutions = ('720p', '1080i', '1080p')
class Categories:
"""
Store category constants
"""
# Store JPS to SM Category translation, defines which JPS Cat gets uploaded to which SM Cat
# key: JPS category name
# value: SM category ID
JPStoSM = {
'Album': 0,
'EP': 1, # Does not exist on JPS
'Single': 2,
'Bluray': 3, # Does not exist on JPS
'DVD': 4,
'PV': 5,
'Music Performance': 6, # Does not exist on JPS
'TV-Music': 7, # Music Show
'TV-Variety': 8, # Talk Show
'TV-Drama': 9, # TV Drama
'Pictures': 10,
'Misc': 11,
}
SM = {
'Album': 0,
'EP': 1, # Does not exist on JPS
'Single': 2,
'Bluray': 3, # Does not exist on JPS
'DVD': 4,
'PV': 5,
'Music Performance': 6, # Does not exist on JPS
'TV Music': 7, # TV-Music
'TV Variety': 8, # TV-Variety
'TV Drama': 9, # TV-Drama
'Pictures': 10,
'Misc': 11,
}
Video = ('Bluray', 'DVD', 'PV', 'TV-Music', 'TV-Variety', 'TV-Drama', 'Music Performance', 'Fansubs')
Music = ('Album', 'Single')
# JPS Categories where release date cannot be entered and therefore need to be processed differently
NonDate = ('TV-Music', 'TV-Variety', 'TV-Drama', 'Fansubs', 'Pictures', 'Misc')
# JPS Categories where no release data is present and therefore need to be processed differently
NonReleaseData = ('Pictures', 'Misc')
# Music and Music Video Torrents, for category validation. This must match the cateogry headers in JPS for an artist, hence they are in plural
NonTVCategories = ('Albums', 'Singles', 'DVDs', 'PVs')
# Categories that should have some of their mediainfo stripped if present, must match indices in Categories.SM
SM_StripAllMediainfo = (0, 1, 2, 11) # Album, EP, Single, Misc - useful to have duration if we have it added to the description
SM_StripAllMediainfoExcResolution = 10 # Pictures - useful to have resolution if we have it

+ 290
- 0
jps2sm/get_data.py View File

@@ -0,0 +1,290 @@
# Standard library packages
import logging
import re
import itertools
import time
import json
from jps2sm.myloginsession import jpopsuki, sugoimusic
from jps2sm.constants import Categories
from jps2sm.utils import remove_html_tags
# Third-party packages
from bs4 import BeautifulSoup
logger = logging.getLogger('main.' + __name__)
class GetGroupData:
"""
Retrieve group data of the group supplied from args.parsed.urls
Group data is defined as data that is constant for every release, eg category, artist, title, groupdescription, tags etc.
Each property is gathered by calling a method of the class
"""
def __init__(self, jpsurl):
self.jpsurl = jpsurl
logger.debug(f'Processing JPS URL: {jpsurl}')
self.groupid: int = int()
self.category: str = str()
self.artist: str = str()
self.date: str = str()
self.title: str = str()
self.originalartist: str = str()
self.originaltitle: str = str()
self.rel2: str = str()
self.groupdescription: str = str()
self.imagelink: str = str()
self.tagsall: str = str()
self.contribartists: str = str()
self.getdata()
def getdata(self):
date_regex = r'[12]\d{3}\.(?:0[1-9]|1[0-2])\.(?:0[1-9]|[12]\d|3[01])' # YYYY.MM.DD format
# YYYY.MM.DD OR YYYY format, for Pictures only
date_regex2 = r'(?:[12]\d{3}\.(?:0[1-9]|1[0-2])\.(?:0[1-9]|[12]\d|3[01])|(?:19|20)\d\d)'
res = jpopsuki(self.jpsurl.split()[0]) # If there are multiple urls only the first url needs to be parsed
self.groupid = re.findall(r"(?!id=)\d+", self.jpsurl)[0]
soup = BeautifulSoup(res.text, 'html5lib')
artistlinelink = soup.select('.thin h2 a')
originaltitleline = soup.select('.thin h3')
logger.debug(torrent_description_page_h2_line := str(soup.select('.thin h2')[0]))
self.category = re.findall(r'\[(.*?)\]', torrent_description_page_h2_line)[0]
logger.info(f'Category: {self.category}')
try:
artist_raw = re.findall(r'<a[^>]+>(.*)<', str(artistlinelink[0]))[0]
self.artist = split_bad_multiple_artists(artist_raw)
except IndexError: # Cannot find artist
if self.category == "Pictures":
# JPS allows Picture torrents to have no artist set, in this scenario try to infer the artist by examining the text
# immediately after the category string up to a YYYY.MM.DD string if available as this should be the magazine title
try:
self.artist = re.findall(fr'\[Pictures\] ([A-Za-z\. ]+) (?:{date_regex2})', torrent_description_page_h2_line)
except IndexError:
logger.exception('Cannot find artist')
raise
elif self.category == "Misc":
# JPS has some older groups with no artists set, uploaders still used the "Artist - Group name" syntax though
try:
artist_raw = re.findall(r'\[Misc\] ([A-Za-z\, ]+) - ', torrent_description_page_h2_line)[0]
except IndexError:
logger.exception('Cannot find artist')
raise
self.artist = split_bad_multiple_artists(artist_raw)
else:
logger.exception('JPS upload appears to have no artist set and artist cannot be autodetected')
raise
logger.info(f'Artist(s): {self.artist}')
# Extract date without using '[]' as it allows '[]' elsewhere in the title and it works with JPS TV-* categories
try:
self.date = re.findall(date_regex, torrent_description_page_h2_line)[0].replace(".", "")
except IndexError: # Handle YYYY dates, creating extra regex as I cannot get it working without causing issue #33
try:
self.date = re.findall(r'[^\d]((?:19|20)\d{2})[^\d]', torrent_description_page_h2_line)[0]
# Handle if cannot find date in the title, use upload date instead from getreleasedata() but error if the category should have it
except IndexError:
if self.category not in Categories.NonDate:
logger.exception(f'Group release date not found and not using upload date instead as {self.category} torrents should have it set')
else:
logger.warning('Date not found from group data, will use upload date as the release date')
self.date = None
pass
logger.info(f'Release date: {self.date}')
if self.category not in Categories.NonDate:
self.title = re.findall(r'<a.*> - (.*) \[', torrent_description_page_h2_line)[0]
else:
# Using two sets of findall() as I cannot get the OR regex operator "|" to work
title1 = re.findall(r'<a.*> - (?:[12]\d{3}\.(?:0[1-9]|1[0-2])\.(?:0[1-9]|[12]\d|3[01])) - (.*)</h2>', torrent_description_page_h2_line)
title2 = re.findall(r'<a.*> - (.*) \((.*) (?:[12]\d{3}\.(?:0[1-9]|1[0-2])\.(?:0[1-9]|[12]\d|3[01]))', torrent_description_page_h2_line)
# title1 has 1 matching group, title2 has 2
titlemergedpre = [title1, " ".join(itertools.chain(*title2))]
titlemerged = "".join(itertools.chain(*titlemergedpre))
if len(titlemerged) == 0: # Non standard title, fallback on the whole string after the "-"
try:
self.title = re.findall(r'<a.*> - (.*)</h2>', torrent_description_page_h2_line)[0]
except IndexError:
if self.category == "Pictures": # Pictures non-artist upload - for magazines
# Fallback to all the text after the category, we need to include the date stamp as magazines are often titled
# with the same numbers each year - the first magazine each year appears to always be 'No. 1' for example
try:
self.title = re.findall(fr'\[Pictures\] (?:[A-Za-z\. ]+) ({date_regex2}(?:.*))</h2>', torrent_description_page_h2_line)[0]
except IndexError:
logger.exception('Cannot find title from the JPS upload')
raise
elif self.category == "Misc":
try:
self.title = re.findall(r'\[Misc\] (?:[A-Za-z\, ]+) - (.+)</h2>', torrent_description_page_h2_line)[0]
except IndexError:
logger.exception('Cannot find title from the JPS upload')
raise
else:
logger.exception('Cannot find title from the JPS upload')
raise
else:
self.title = titlemerged
logger.info(f'Title: {self.title}')
try:
originalchars = re.findall(r'<a href="artist.php\?id=(?:[0-9]+)">(.+)</a> - (.+)\)</h3>', str(originaltitleline))[0]
self.originalartist = originalchars[0]
self.originaltitle = originalchars[1]
logger.info(f"Original artist: {self.originalartist} Original title: {self.originaltitle}")
except IndexError: # Do nothing if group has no original artist/title
pass
self.rel2 = str(soup.select('#content .thin .main_column .torrent_table tbody')[0])
# Get description with BB Code if user has group edit permissions on JPS, if not just use stripped html text.
try:
self.groupdescription = get_group_descrption_bbcode(self.groupid) # Requires PU+ at JPS
except:
logger.exception('Could not get group description BBCode. Are you a Power User+ at JPS?')
self.groupdescription = remove_html_tags(str(soup.select('#content .thin .main_column .box .body')[0]))
logger.info(f"Group description:\n{self.groupdescription}")
image = str(soup.select('#content .thin .sidebar .box p a'))
try:
self.imagelink = "https://jpopsuki.eu/" + re.findall('<a\s+(?:[^>]*?\s+)?href=\"([^\"]*)\"', image)[0]
logger.info(f'Image link: {self.imagelink}')
except IndexError: # No image for the group
self.imagelink = None
tagsget = str(soup.select('#content .thin .sidebar .box ul.stats.nobullet li'))
tags = re.findall('searchtags=([^\"]+)', tagsget)
logger.info(f'Tags: {tags}')
self.tagsall = ",".join(tags)
try:
contribartistsget = str(soup.select('#content .thin .sidebar .box .body ul.stats.nobullet li'))
contribartistslist = re.findall(r'<li><a href="artist\.php\?id=(?:[0-9]+?)" title="([^"]*?)">([\w .-]+)</a>', contribartistsget)
self.contribartists = {}
for artistpair in contribartistslist:
self.contribartists[artistpair[1]] = artistpair[0] # Creates contribartists[artist] = origartist
logger.info(f'Contributing artists: {self.contribartists}')
except IndexError: # Do nothing if group has no contrib artists
pass
def originalchars(self):
return self.originalartist, self.originaltitle
def __getattr__(self, item):
return self.item
def split_bad_multiple_artists(artists):
return re.split(', | x | & ', artists)
def get_release_data(torrentids, release_data, date):
"""
Retrieve all torrent id and release data (slash separated data and upload date) whilst coping with 'noise' from FL torrents,
and either return all data if using a group URL or only return the relevant releases if release url(s) were used
:param torrentids: list of torrentids to be processed, NULL if group is used
:return: releasedata: 2d dict of release data in the format of torrentid: { "slashdata" : [ slashdatalist ] , "uploaddate": uploaddate } .
"""
freeleechtext = '<strong>Freeleech!</strong>'
releasedatapre = re.findall(r"swapTorrent\('([0-9]+)'\);\">» (.*?)</a>.*?<blockquote>(?:\s*)Uploaded by <a href=\"user.php\?id=(?:[0-9]+)\">(?:[\S]+)</a> on <span title=\"(?:[^\"]+)\">([^<]+)</span>", release_data, re.DOTALL)
# if args.parsed.debug:
# print(f'Pre-processed releasedata: {json.dumps(releasedatapre, indent=2)}')
releasedata = {}
for release in releasedatapre:
torrentid = release[0]
slashlist = ([i.split(' / ') for i in [release[1]]])[0]
uploadeddate = release[2]
releasedata[torrentid] = {}
releasedata[torrentid]['slashdata'] = slashlist
releasedata[torrentid]['uploaddate'] = uploadeddate
logger.debug(f'Entire group contains: {json.dumps(releasedata, indent=2)}')
removetorrents = []
for torrentid, release in releasedata.items(): # Now release is a dict!
if len(torrentids) != 0 and torrentid not in torrentids:
# If len(torrentids) != 0 then user has supplied a group url and every release is processed,
# otherwise iterate through releasedata{} and remove what is not needed
removetorrents.append(torrentid)
if freeleechtext in release['slashdata']:
release['slashdata'].remove(freeleechtext) # Remove Freeleech whole match so it does not interfere with Remastered
for index, slashreleaseitem in enumerate(release['slashdata']):
if remaster_freeleech_removed := re.findall(r'(.*) - <strong>Freeleech!<\/strong>', slashreleaseitem): # Handle Freeleech remastered torrents, issue #43
release['slashdata'][index] = f'{remaster_freeleech_removed[0]} - {date[:4]}' # Use the extracted value and append group JPS release year
logger.debug(f"Torrent {torrentid} is freeleech remastered, validated remasterdata to {release['slashdata'][index]}")
for torrentid in removetorrents:
del (releasedata[torrentid])
logger.info(f'Selected for upload: {releasedata}')
return releasedata
def get_group_descrption_bbcode(groupid):
"""
Retrieve original bbcode from edit group url and reformat any JPS style bbcode
:param: groupid: JPS groupid to get group description with bbcode
:return: bbcode: group description with bbcode
"""
edit_group_page = jpopsuki(f"https://jpopsuki.eu/torrents.php?action=editgroup&groupid={groupid}")
soup = BeautifulSoup(edit_group_page.text, 'html5lib')
bbcode = soup.find("textarea", {"name": "body"}).string
bbcode_sanitised = re.sub(r'\[youtube=([^\]]+)]', r'[youtube]\1[/youtube]', bbcode)
return bbcode_sanitised
def get_jps_user_id():
"""
Returns the JPopSuki user id
:return: int: user id
"""
res = jpopsuki("https://jpopsuki.eu/", True)
soup = BeautifulSoup(res.text, 'html5lib')
href = soup.select('.username')[0]['href']
jps_user_id = re.match(r"user\.php\?id=(\d+)", href).group(1)
time.sleep(5) # Sleep as otherwise we hit JPS browse quota
return int(str(jps_user_id))
def get_user_keys():
"""
Get SM session authkey and torrent_password_key for use by uploadtorrent()|download_sm_torrent() data dict.
Uses SM login data
"""
smpage = sugoimusic("https://sugoimusic.me/torrents.php?id=118", test_login=True) # Arbitrary page on JPS that has authkey
soup = BeautifulSoup(smpage.text, 'html5lib')
rel2 = str(soup.select_one('#torrent_details .group_torrent > td > span > .tooltip'))
return {
'authkey': re.findall('authkey=(.*)&amp;torrent_pass=', rel2)[0],
'torrent_password_key': re.findall(r"torrent_pass=(.+)\" title", rel2)[0]
}
def get_torrent_link(torrentid, release_data):
"""
Extract a torrent link for a given torrentid
:param torrentid:
:return: torrentlink: URI of torrent link
"""
torrentlink = re.findall(rf'torrents\.php\?action=download&amp;id={torrentid}&amp;authkey=(?:[^&]+)&amp;torrent_pass=(?:[^"]+)', release_data)[0]
return torrentlink

+ 199
- 0
jps2sm/mediainfo.py View File

@@ -0,0 +1,199 @@
import logging
import os
# Third-party modules
from pymediainfo import MediaInfo
import torrent_parser as tp
from pyunpack import Archive
from pathlib import Path
import tempfile
logger = logging.getLogger('main.' + __name__)
def get_mediainfo(torrentfilename, media, media_roots):
"""
Get filename(s) of video files in the torrent and run mediainfo and capture the output, extract if DVD found (Blurays not yet supported)
then set the appropriate fields for the upload
:param torrentfilename: str filename of torrent to parse from collate()
:param media: str Validated media from collate()
:param media_roots: Sanitised MediaDirectories from cfg for use by get_media_location()
:return: mediainfo, releasedataout
mediainfo: Mediainfo text output of the file(s)
releaseadtaout: Fields gathered from mediainfo for SM upload
"""
torrentmetadata = tp.parse_torrent_file(torrentfilename)
torrentname = torrentmetadata['info']['name'] # Directory if >1 file, otherwise it is filename
# print(torrentmetadata)
mediainfosall = ""
releasedataout = {}
releasedataout['duration'] = 0
# TODO Need to cleanup the logic to create an overall filename list to parse instead of the 3-way duplication we currently have
if 'files' in torrentmetadata['info'].keys(): # Multiple files
directory = torrentname
logger.info(f'According torrent metadata the dir is {directory}')
file_path = get_media_location(directory, True, media_roots)
logger.info(f'Path to dir: {file_path}')
for file in torrentmetadata['info']['files']:
if len(torrentmetadata['info']['files']) == 1: # This might never happen, it could be just info.name if so
filename = os.path.join(*file['path'])
else:
releasedataout['multiplefiles'] = True
filename = os.path.join(*[file_path, *file['path']]) # Each file in the directory of source data for the torrent
mediainfosall += str(MediaInfo.parse(filename, text=True))
releasedataout['duration'] += get_mediainfo_duration(filename)
# Get biggest file and mediainfo on this to set the fields for the release
maxfile = max(torrentmetadata['info']['files'], key=lambda x: x['length']) # returns {'length': int, 'path': [str]} of largest file
fileforsmfields = Path(*[file_path, *maxfile['path']]) # Assume the largest file is the main file that should populate SM upload fields
else: # Single file
releasedataout['multiplefiles'] = False
filename = torrentname
file_path = get_media_location(filename, False, media_roots)
logger.debug(f'Filename for mediainfo: {file_path}')
mediainfosall += str(MediaInfo.parse(file_path, text=True))
releasedataout['duration'] += get_mediainfo_duration(file_path)
fileforsmfields = file_path
if fileforsmfields.suffix == '.iso' and media == 'DVD':
# If DVD, extract the ISO and run mediainfo against appropriate files, if BR we skip as pyunpack (patool/7z) cannot extract them
releasedataout['container'] = 'ISO'
logger.info(f'Extracting ISO {fileforsmfields} to obtain mediainfo on it...')
isovideoextensions = ('.vob', '.m2ts')
tempdir = tempfile.TemporaryDirectory()
Archive(fileforsmfields).extractall(tempdir.name)
dir_files = []
for root, subFolder, files in os.walk(tempdir.name):
for item in files:
filenamewithpath = os.path.join(root, item)
dir_files.append(filenamewithpath)
if list(filter(filenamewithpath.lower().endswith,
isovideoextensions)): # Only gather mediainfo for DVD video files (BR when supported)
mediainfosall += str(MediaInfo.parse(filenamewithpath, text=True))
releasedataout['duration'] += get_mediainfo_duration(filenamewithpath)
filesize = lambda f: os.path.getsize(f)
fileforsmfields = sorted(dir_files, key=filesize)[-1] # Assume the largest file is the main file that should populate SM upload fields
# Now we have decided which file will have its mediainfo parsed for SM fields, parse its mediainfo
mediainforeleasedata = MediaInfo.parse(fileforsmfields)
# Remove path to file in case it reveals usernames etc.
replacement = str(Path(file_path).parent)
mediainfosall = mediainfosall.replace(replacement, '')
if Path(fileforsmfields).suffix == '.iso' and media == 'DVD':
tempdir.cleanup()
for track in mediainforeleasedata.tracks:
if track.track_type == 'General':
# releasedataout['language'] = track.audio_language_list # Will need to check if this is reliable
if 'container' not in releasedataout: # Not an ISO, only set container if we do not already know its an ISO
releasedataout['container'] = track.file_extension.upper()
else: # We have ISO - get category data based Mediainfo if we have it
if track.file_extension.upper() == 'VOB':
releasedataout['category'] = 'DVD'
elif track.file_extension.upper() == 'M2TS': # Not used yet as we cannot handle Bluray / UDF
releasedataout['category'] = 'Bluray'
if track.track_type == 'Video':
validatecodec = {
"MPEG Video": "MPEG-2",
"AVC": "h264",
"HEVC": "h265",
"MPEG-4 Visual": "DivX", # MPEG-4 Part 2 / h263 , usually xvid / divx
}
for old, new in validatecodec.items():
if track.format == old:
releasedataout['codec'] = new
standardresolutions = {
"3840": "1920",
"1920": "1080",
"1280": "720",
"720": "480",
}
for width, height in standardresolutions.items():
if str(track.width) == width and str(track.height) == height:
releasedataout['ressel'] = height
if 'ressel' in releasedataout.keys(): # Known resolution type, try to determine if interlaced
if track.scan_type == "Interlaced" or track.scan_type == "MBAFF":
releasedataout['ressel'] += "i"
else:
releasedataout['ressel'] += "p" # Sometimes a Progressive encode has no field set
else: # Custom resolution
releasedataout['ressel'] = 'Other'
releasedataout['resolution'] = str(track.width) + "x" + str(track.height)
if track.track_type == 'Audio' or track.track_type == 'Audio #1': # Handle multiple audio streams, we just get data from the first for now
if track.format in ["AAC", "DTS", "PCM", "AC3"]:
releasedataout['audioformat'] = track.format
elif track.format == "AC-3":
releasedataout['audioformat'] = "AC3"
elif track.format == "MPEG Audio" and track.format_profile == "Layer 3":
releasedataout['audioformat'] = "MP3"
elif track.format == "MPEG Audio" and track.format_profile == "Layer 2":
releasedataout['audioformat'] = "MP2"
logger.debug(f'Mediainfo interpreted data: {releasedataout}')
return mediainfosall, releasedataout
def get_mediainfo_duration(filename):
"""
Get duration in mediainfo for filename
:param filename:
:return: float ms
"""
mediainfo_for_duration = MediaInfo.parse(filename)
for track in mediainfo_for_duration.tracks:
if track.track_type == 'General':
if track.duration is None:
return 0
else:
logger.info(f'Mediainfo duration: {filename} {track.duration}')
return float(track.duration) # time in ms
def get_media_location(media_name, directory, media_roots):
"""
Find the location of the directory or file of the source data for getmediainfo()
:param media_name: str name of the file or directory
:param directory: boolean true if dir, false if file
:param fall_back_file: str fall back search cor
:param media_roots: Sanitised MediaDirectories from cfg
:return: full path to file/dir
"""
# Find the file/dir and stop on the first hit, hopefully OS-side disk cache will mean this will not take too long
media_location = None
logger.info(f'Searching for {media_name}...')
for media_dir_search in media_roots:
for dirname, dirnames, filenames in os.walk(media_dir_search):
if directory is True:
for subdirname in dirnames:
if subdirname == media_name:
media_location = os.path.join(dirname, subdirname)
return Path(media_dir_search, media_location)
else:
for filename in filenames:
if filename == media_name:
media_location = os.path.join(dirname, filename)
return Path(media_dir_search, media_location)
if media_location is None:
media_not_found_error_msg = f'Mediainfo error - file/directory not found: {media_name} in any of the MediaDirectories specified: {media_roots}'
logger.error(media_not_found_error_msg)
raise RuntimeError(media_not_found_error_msg)

+ 178
- 0
jps2sm/myloginsession.py View File

@@ -0,0 +1,178 @@
# Standard library packages
import logging
import datetime
import os
import pickle
from urllib.parse import urlparse
import requests
from jps2sm.utils import GetConfig
logger = logging.getLogger('main.' + __name__)
def jpopsuki(url, test_login=False):
"""
Get content from JPS
:param url:
:param test_login: Disable test login
:return: data
"""
config = GetConfig()
jps_login_url = "https://jpopsuki.eu/login.php"
jps_test_url = "https://jpopsuki.eu"
jps_success = '<div id="extra1"><span></span></div>'
login_data = {'username': config.jps_user, 'password': config.jps_pass}
jps_session = MyLoginSession(jps_login_url, login_data, jps_test_url, jps_success, test_login)
return jps_session.retrieveContent(url)
def sugoimusic(url, method="get", post_data=None, post_data_files=None, test_login=False):
"""
Get/Post content to SM
:param post_data_files: Files to send in POST
:param post_data: Parameters to send in POST
:param method: HTML method
:param url: URL to parse
:param test_login: Disable test login
:return: data
"""
config = GetConfig()
sm_login_url = "https://sugoimusic.me/login.php"
sm_test_url = "https://sugoimusic.me/"
sm_success = "Enabled users"
login_data = {'username': config.sm_user, 'password': config.sm_pass}
sm_session = MyLoginSession(sm_login_url, login_data, sm_test_url, sm_success, test_login)
return sm_session.retrieveContent(url, method, post_data, post_data_files)
class MyLoginSession:
"""
Taken from: https://stackoverflow.com/a/37118451/2115140
New features added in jps2sm
Originally by: https://stackoverflow.com/users/1150303/domtomcat
a class which handles and saves login sessions. It also keeps track of proxy settings.
It does also maintains a cache-file for restoring session data from earlier
script executions.
"""
def __init__(self,
loginUrl,
loginData,
loginTestUrl,
loginTestString,
test_login=False,
sessionFileAppendix='_session.dat',
maxSessionTimeSeconds=30 * 60,
proxies=None,
userAgent='Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
forceLogin=False,
**kwargs):
"""
save some information needed to login the session
you'll have to provide 'loginTestString' which will be looked for in the
responses html to make sure, you've properly been logged in
'proxies' is of format { 'https' : 'https://user:pass@server:port', 'http' : ...
'loginData' will be sent as post data (dictionary of id : value).
'maxSessionTimeSeconds' will be used to determine when to re-login.
"""
urlData = urlparse(loginUrl)
self.proxies = proxies
self.loginData = loginData
self.loginUrl = loginUrl
self.loginTestUrl = loginTestUrl
self.maxSessionTime = maxSessionTimeSeconds
self.sessionFile = urlData.netloc + sessionFileAppendix
self.userAgent = userAgent
self.loginTestString = loginTestString
self.login(forceLogin, test_login, **kwargs)
def modification_date(self, filename):
"""
return last file modification date as datetime object
"""
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def login(self, forceLogin=False, test_login=False, **kwargs):
"""
login to a session. Try to read last saved session from cache file. If this fails
do proper login. If the last cache access was too old, also perform a proper login.
Always updates session cache file.
"""
wasReadFromCache = False
# logger.debug('loading or generating session...')
if os.path.exists(self.sessionFile) and not forceLogin:
time = self.modification_date(self.sessionFile)
# only load if file less than 30 minutes old
lastModification = (datetime.datetime.now() - time).seconds
if lastModification < self.maxSessionTime:
with open(self.sessionFile, "rb") as f:
self.session = pickle.load(f)
wasReadFromCache = True
# logger.debug("loaded session from cache (last access %ds ago) " % lastModification)
if not wasReadFromCache:
self.session = requests.Session()
self.session.headers.update({'user-agent': self.userAgent})
res = self.session.post(self.loginUrl, data=self.loginData,
proxies=self.proxies, **kwargs)
if 'Your username or password was incorrect.' in res.text: # check if login was sucessful
raise Exception("could not log into provided site '%s'"
" (username or password was incorrect)"
% self.loginUrl)
logger.debug('created new session with login')
self.saveSessionToCache()
if test_login:
# test login
logger.debug('Loaded session from cache and testing login...')
res = self.session.get(self.loginTestUrl)
if res.text.lower().find(self.loginTestString.lower()) < 0:
os.remove(self.sessionFile) # delete the session file if login fails
logger.debug(res.text)
raise Exception("could not log into provided site '%s'"
" (did not find successful login string)"
% self.loginUrl)
def saveSessionToCache(self):
"""
save session to a cache file
"""
# always save (to update timeout)
with open(self.sessionFile, "wb") as f:
pickle.dump(self.session, f)
logger.debug('updated session cache-file %s' % self.sessionFile)
def retrieveContent(self, url, method="get", postData=None, postDataFiles=None, **kwargs):
"""
return the content of the url with respect to the session.
If 'method' is not 'get', the url will be called with 'postData'
as a post request.
"""
if method == 'get':
res = self.session.get(url, proxies=self.proxies, **kwargs)
else:
res = self.session.post(url, data=postData, proxies=self.proxies, files=postDataFiles, **kwargs)
# the session has been updated on the server, so also update in cache
self.saveSessionToCache()
return res

+ 124
- 0
jps2sm/utils.py View File

@@ -0,0 +1,124 @@
# Standard library packages
import logging
import re
from typing import AnyStr
import sys
import configparser
import argparse
# Third-party packages
from pathlib import Path
logger = logging.getLogger('main.' + __name__)
__version__ = "1.5.1"
def get_valid_filename(s: str) -> AnyStr:
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
:param s: str: A string that needs to be converted
:return: str: A string with a clean filename
"""
s = str(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def count_values_dict(dict):
"""
Count the values in a dictionary.
"""
return sum([len(dict[x]) for x in dict])
def fatal_error(msg):
"""
Immediately exit and show an error to stderr and not log it
Usually used argument, file or other simple errors that should not be logged as otherwise it creates noise
:param msg: str
:return:
"""
print(msg, file=sys.stderr)
sys.exit(1)
class GetArgs:
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
parser.add_argument('-d', '--debug', help='Enable debug mode', action='store_true')
parser.add_argument("-u", "--urls", help="JPS URL for a group, or multiple individual releases URLs to be added to the same group", type=str)
parser.add_argument("-n", "--dryrun", help="Just parse url and show the output, do not add the torrent to SM", action="store_true")
parser.add_argument("-b", "--batchuser", help="User id for batch user operations, default is user id of SM Username specified in jps2sm.cfg")
parser.add_argument("-U", "--batchuploaded", help="(Batch mode only) Upload all releases uploaded by you or, if provided, user id specified by --batchuser", action="store_true")
parser.add_argument("-S", "--batchseeding", help="(Batch mode only) Upload all releases currently seeding by you or, if provided, user id specified by --batchuser", action="store_true")
parser.add_argument("--batchsnatched", help="(Batch mode only) Upload all releases snatched by you or, if provided, user id specified by --batchuser", action="store_true")
parser.add_argument("-s", "--batchstart", help="(Batch mode only) Start at this page", type=int)
parser.add_argument("-e", "--batchend", help="(Batch mode only) End at this page", type=int)
parser.add_argument("-exc", "--exccategory", help="(Batch mode only) Exclude a JPS category from upload", type=str)
parser.add_argument("-exf", "--excaudioformat", help="(Batch mode only) Exclude an audioformat from upload", type=str)
parser.add_argument("-exm", "--excmedia", help="(Batch mode only) Exclude a media from upload", type=str)
parser.add_argument("-m", "--mediainfo", help="Search and get mediainfo data from the source file(s) in the directories specified by MediaDirectories. Extract data to set codec, resolution, audio format and container fields as well as the mediainfo field itself.", action="store_true")
self.parsed = parser.parse_args()
class GetConfig:
def __init__(self):
script_dir = Path(__file__).parent.parent
# Get configuration
config = configparser.ConfigParser()
configfile = Path(script_dir, 'jps2sm.cfg')
try:
open(configfile)
except FileNotFoundError:
fatal_error(
f'Error: config file {configfile} not found - enter your JPS/SM credentials in jps2sm.cfg and check jps2sm.cfg.example to see the syntax.')
config.read(configfile)
self.jps_user = config.get('JPopSuki', 'User')
self.jps_pass = config.get('JPopSuki', 'Password')
self.sm_user = config.get('SugoiMusic', 'User')
self.sm_pass = config.get('SugoiMusic', 'Password')
self.media_roots = [x.strip() for x in config.get('Media', 'MediaDirectories').split(',')] # Remove whitespace after comma if any
self.directories = config.items('Directories')
def __getattr__(self, item):
return self.item
class HandleCfgOutputDirs:
"""
Handle all config dir logic
Get data, decide if relative or absolute path and create dir if required
:param config_file_dirs_section: dict: Contents of 'Directories' section in jps2sm.cfg
"""
def __init__(self, config_file_dirs_section):
self.config_file_dirs_section = config_file_dirs_section
self.file_dir = {}
for (cfg_key, cfg_value) in config_file_dirs_section:
if Path(cfg_value).is_absolute():
self.file_dir[cfg_key] = cfg_value
else:
self.file_dir[cfg_key] = Path(Path.home(), cfg_value)
if not Path(self.file_dir[cfg_key]).is_dir():
Path(self.file_dir[cfg_key]).mkdir(parents=True, exist_ok=True)
def remove_html_tags(text):
"""
Strip html tags, used by GetGroupData() on the group description if unable to get bbcode
"""
clean = re.compile('<.*?>')
return re.sub(clean, '', text)

+ 223
- 0
jps2sm/validation.py View File

@@ -0,0 +1,223 @@
# Standard library packages
import logging
import re
from jps2sm.myloginsession import jpopsuki
from jps2sm.constants import Categories, VideoOptions
from jps2sm.utils import GetArgs
# Third-party packages
from bs4 import BeautifulSoup
import torrent_parser as tp
logger = logging.getLogger('main.' + __name__)
def decide_music_performance(artists, multiplefiles, duration):
"""
Return if upload should be a Music Performance or not
A music performance is a cut from a Music TV show and is 25 mins or less long and therefore also not a TV Show artist
We assume we are being called if Cat = TV Music
:return: str: 'Music Performance' or 'TV Music'
"""
if multiplefiles is True or duration > 1500000: # 1 500 000 ms = 25 mins
return 'TV Music'
else: # Single file that is < 25 mins, decide if Music Performance
if len(artists) > 1: # Multiple artists
logger.debug('Upload is a Music Performance as it has derived multiple artists and is 25 mins or less')
return 'Music Performance' # JPS TV Show artists never have multiple artists
JPSartistpage = jpopsuki(f"https://jpopsuki.eu/artist.php?name={artists[0]}")
soup = BeautifulSoup(JPSartistpage.text, 'html5lib')
categoriesbox = str(soup.select('#content .thin .main_column .box.center'))
categories = re.findall(r'\[(.+)\]', categoriesbox)
if any({*Categories.NonTVCategories} & {*categories}): # Exclude any TV Shows for being mislabeled as Music Performance
logger.debug('Upload is a Music Performance as it is 25 mins or less and not a TV Show')
return 'Music Performance'
else:
logger.debug('Upload is not a Music Performance')
return 'TV Music'
def get_alternate_fansub_category_id(artist, group_name):
"""
Attempts to detect the actual category for JPS Fansubs category torrents and if not ask the user to select an alternate category.
If it is a TV show, this TV show category type is detected and returned, else query the user from a list of potential categories.
:param artist: str artist name
:return: int alternative category ID based on Categories.SM()
"""
JPSartistpage = jpopsuki(f"https://jpopsuki.eu/artist.php?name={artist}")
soup = BeautifulSoup(JPSartistpage.text, 'html5lib')
categoriesbox = str(soup.select('#content .thin .main_column .box.center'))
categories = re.findall(r'\[(.+)\]', categoriesbox)
if not any({*Categories.NonTVCategories} & {*categories}) and " ".join(categories).count('TV-') == 1:
# Artist has no music and only 1 TV Category, artist is a TV show and we can auto detect the category for FanSub releases
autodetectcategory = re.findall(r'(TV-(?:[^ ]+))', " ".join(categories))[0]
logger.debug(f'Autodetected SM category {autodetectcategory} for JPS Fansubs torrent')
return autodetectcategory
else: # Cannot autodetect
AlternateFanSubCategoriesIDs = (5, 6, 7, 8, 9, 11) # Matches indices in Categories()
logger.warning(f'Cannot auto-detect correct category for torrent group {group_name}.')
print('Select Category:')
option = 1
optionlookup = {}
for alternativefansubcategoryid in AlternateFanSubCategoriesIDs:
for cat, catid in Categories.SM.items():
if alternativefansubcategoryid == catid:
print(f'({option}) {cat}')
optionlookup[option] = alternativefansubcategoryid
option += 1
alternatecategoryoption = input('Choose alternate category or press ENTER to skip: ')
if alternatecategoryoption == "":
logger.error('No alternate Fansubs category chosen.')
return "Fansubs" # Allow upload to fail
else:
category = optionlookup[int(alternatecategoryoption)]
logger.info(f'Alternate Fansubs category {category} chosen')
return category
def validate_jps_video_data(releasedata, categorystatus):
"""
Validate and process dict supplied by getreleasedata() via collate() to extract all available data
from JPS for video torrents, whilst handling weird cases where VideoTorrent is uploaded as a Music category
:param releasedata:
:param categorystatus: str: good or bad. good for correct category assigned and bad if this is a Music Torrent
mistakenly uploaded as a non-VC category!
:return: releasedataout{} validated container, codec, media, audioformat
"""
releasedataout = {}
# JPS uses the audioformat field (represented as releasedata[0] here) for containers and codecs in video torrents
# If a known container is used as audioformat set it as the container on SM
if releasedata[0] in VideoOptions.badcontainers:
releasedataout['container'] = releasedata[0]
else:
releasedataout['container'] = 'CHANGEME'
# If a known codec is used as audioformat set it as the codec on SM
if releasedata[0] in VideoOptions.badcodecs:
if releasedata[0] == "MPEG2": # JPS uses 'MPEG2' for codec instead of the correct 'MPEG-2'
releasedataout['codec'] = "MPEG-2"
else:
releasedataout['codec'] = releasedata[0]
else:
releasedataout['codec'] = 'CHANGEME' # assume default
if categorystatus == "good":
releasedataout['media'] = releasedata[1]
else:
releasedataout['media'] = releasedata[2]
if releasedata[0] == 'AAC': # For video torrents, the only correct audioformat in JPS is AAC
releasedataout['audioformat'] = "AAC"
else:
releasedataout['audioformat'] = "CHANGEME"
return releasedataout
def validate_jps_bitrate(jps_bitrate):
"""
Validate JPS bad bitrates to sensible bitrates ready for upload to SM
:param jps_bitrate:
:return: sm_bitrate
"""
bitrates = {
"Hi-Res 96/24": "24bit Lossless 96kHz",
"24bit/48kHz": "24bit Lossless 48kHz",
"Hi-Res": "24bit Lossless",
"Hi-Res 48/24": "24bit Lossless 48kHz",
"24bit/96kHz": "24bit Lossless 96kHz",
"24bit/48Khz": "24bit Lossless 48kHz",
"24bit/96Khz": "24bit Lossless 96kHz",
"24bit/48khz": "24bit Lossless 48kHz",
"Hi-Res Lossless": "24bit Lossless",
"160": "Other",
"Variable": "Other",
"320 (VBR)": "Other",
"Scans": "",
"Booklet": "",
"1080p": "",
"720p": "",
"256 (VBR)": "APS (VBR)",
"155": "Other"
}
sm_bitrate = jps_bitrate # Default is to leave bitrate alone if not mentioned here, such as bitrates that are OK on both JPS and SM
for old, new in bitrates.items():
if jps_bitrate == old:
sm_bitrate = new
return sm_bitrate
def decide_exc_filter(audioformat, media, releasedata):
"""
Implement audioformat and media exclusion filters
:return: boolean: True or False
"""
args = GetArgs()
if audioformat == args.parsed.excaudioformat:
logger.info(f'Excluding {releasedata} as exclude audioformat {args.parsed.excaudioformat} is set')
return True
elif media == args.parsed.excmedia:
logger.info(f'Excluding {releasedata} as exclude media {args.parsed.excmedia} is set')
return True
return False
def decide_ep(torrentfilename, uploaddata):
"""
Return if Album upload should be an EP or not.
EPs are considered to have < 7 tracks, excluding off-vocals and uploaded to JPS as an Album
We assume we are being called only if Cat = Album
:param torrentfilename:
:param uploaddata:
:return: str: 'EP' or 'Album'
"""
if uploaddata['media'].lower() == 'bluray' or uploaddata['media'].lower() == 'dvd':
return 'Album'
torrent_metadata = tp.parse_torrent_file(torrentfilename)
music_extensions = ['.flac', '.mp3', '.ogg', '.alac', '.m4a', '.wav', '.wma', '.ra']
off_vocal_phrases = ['off-vocal', 'offvocal', 'off vocal', 'inst.', 'instrumental', 'english ver', 'japanese ver', 'korean ver']
track_count = 0
has_cue = False
track_extensions = set()
for file in torrent_metadata['info']['files']:
file_path = file['path'][-1].lower()
if file_path.endswith('.iso'):
return 'Album'
if file_path.endswith('.cue'):
has_cue = True
if list(filter(file_path.endswith, music_extensions)) and \
not any(substring in file_path for substring in off_vocal_phrases):
# Count music files which are not an off-vocal or instrumental
logger.debug(f"Deciding if EP with torrent with these tracks: {file['path'][-1]}")
track_count += 1
track_extensions.add(file_path.split('.')[-1])
if has_cue and track_extensions == {'flac'}:
logger.debug(f'Upload is not an EP as it has a .cue file and only .flac files')
return 'Album'
if track_count < 7:
logger.debug(f'Upload is an EP as it has {track_count} standard tracks')
return 'EP'
else:
logger.debug(f'Upload is not an EP as it has {track_count} tracks')
return 'Album'

+ 27
- 0
json_data/config.json.example View File

@@ -0,0 +1,27 @@
{
"credentials": {
"username": "username",
"password": "password",
"passkey": "passkey"
},
"local_prefs": {
"log_directory": "D:/Bugs/BugsPy-master/logs",
"generate_tracklist": true,
"save_tracklist": false,
"enable_release_description": false,
"cover_name": "cover.jpg",
"add_to_watch_folder": false,
"add_to_downloads_folder": false,
"local_watch_folder": "C:/watch",
"local_downloads_folder": "C:/downloads"
},
"ftp_prefs": {
"enable_ftp": false,
"add_to_watch_folder": true,
"add_to_downloads_folder": true,
"ftp_server": "server url",
"ftp_username": "username",
"ftp_password": "password",
"ftp_watch_folder": "/downloads/watch/transmission",
"ftp_downloads_folder": "/downloads"
}

+ 244
- 0
json_data/dictionary.json View File

@@ -0,0 +1,244 @@
{
"release_types": {
"Album": "Album",
"Single": "Single",
"EP": "Album",
"OST": "Album",
"싱글": "Single",
"EP(미니)": "Album",
"정규": "Album",
"컴필레이션": "Album",
"베스트": "Album",
"미니": "Album"
},
"genres": {
"R&B": "rnb",
"소울": "Soul",
"힙합": "hip.hop",
"랩": "Rap",
"영화": "Movie",
"로맨스": "Romance",
"드라마": "OST",
"TV 드라마": "OST",
"애니메이션": "anime",
"인디": "Indie",
"인디힙합": "Indie Hip-Hop",
"재즈 힙합": "Jazz-Hop",
"댄스 팝": "Dance",
"발라드": "Ballad",
"댄스": "Dance",
"포크 팝": "Folk",
"팝": "Pop",
"팝 락": "Pop.Rock",
"인디 락": "Indie.Rock",
"락": "Rock",
"메탈": "Metal",
"인디 팝": "Indie.Pop",
"일렉트로닉": "Electronic",
"일렉트로닉 팝": "Electro",
"인디일렉트로닉": "Indie.Electronic",
"신스 팝": "Synth-Pop",
"J-POP": "J-Pop",
"재즈": "Jazz",
"성인가요": "Trot",
"월드뮤직": "World Music",
"국악": "Traditional",
"종교": "Religious",
"CCM": "CCM",
"어린이": "Child",
"태교": "Taegyo",
"캐롤": "Christmas",
"트랩": "Trap",
"얼터너티브 팝": "Alternative.Pop",
"얼터너티브": "Alternative",
"뉴에이지": "New Age",
"켈틱": "Celtic",
"켈틱 퓨전": "Celtic.Fusion",
"퓨전": "Fusion",
"에스닉 퓨전": "Ethnic.Fusion",
"레게": "Reggae",
"댄스홀": "Dancehall",
"하우스": "House",
"트로트": "Trot",
"얼터너티브 락": "Alternative.Rock",
"덥": "Dub",
"싸이키델릭": "Psychedelic",
"인스트루멘탈 힙합": "Instrumental.Hip-Hop",
"인스트루멘탈": "Instrumental",
"클래식": "Classic",
"컨트리": "Country",
"종교음악": "Religious",
"전통음악": "Traditional",
"블루스": "Blues",
"라틴": "Latin",
"기타": "Other",
"기능성음악": "Functional",
"인디포크": "indie.folk",
"포크": "Folk",
"어쿠스틱": "Acoustic",
"Hip-Hop": "hip.hop"
},
"artist": {
"오아": "OA",
"이고": "Ego",
"ハルカトミユキ": null,
"琴音": null,
"下村陽子 × suis from ヨルシカ": null,
"川島ケイジ": null,
"裸体": null,
"空音": null,
"さかいゆう": null,
"美波": null,
"アルカラ": null,
"윤상": null,
"ブレッド & バター": null,
"Official髭男dism": null,
"優里": null,
"サニーデイ・サービス": null,
"ずっと真夜中でいいのに。": null,
"やなぎなぎ": null,
"米津玄師": null,
"梶浦由記": null,
"澁谷逆太郎": null,
"ポルカドットスティングレイ": null,
"김트와친구들": null,
"安斉かれん": null,
"坂口有望": null,
"空想委員会": null,
"ヨルシカ": null,
"向井太一": null,
"ペンギンラッシュ": null,
"黒子首": null,
"中島みゆき": null,
"ハリィさんとスイカくらぶ": null,
"堀込高樹": null,
"堀込泰行": null,
"スピラ・スピカ": null,
"17歳とベルリンの壁": null,
"天野月": null,
"ソールドシュガー": null,
"ナンカノユメ": null,
"ルルルルズ": null,
"東京事変": null,
"藍井エイル": null,
"阿部真央": null,
"赤いくらげ": null,
"週末CITY PLAY BOYZ": null,
"林 浩司": null,
"蒼山幸子": null,
"フラスコテーション": null,
"ゑんら": null,
"ハンブレッダーズ": null,
"鈴木このみ": null,
"みゆな": null,
"ビッケブランカ": null,
"めありー": null,
"キタニタツヤ": null,
"イロメガネ": null,
"ヤユヨ": null,
"ピロカルピン": null,
"ツユ": null,
"リリー楽綺団": null,
"山崎ハコ": null,
"いきものがかり": null,
"はるまきごはん": null,
"おくみずき": null,
"渣泥": null,
"竹渕慶": null,
"早見沙織": null,
"倖田來未": null,
"世武裕子": null,
"ラブリーサマーちゃん": null,
"SUPER☆GiRLS": null,
"österreich": null,
"フレデリック": null,
"ズーカラデル": null,
"神山羊": null,
"太田ひな": null,
"ヤバイTシャツ屋さん": null,
"當山みれい": null,
"大森靖子": null,
"大原櫻子": null,
"東京スカパラダイスオーケストラ": null,
"三月のパンタシア": null,
"雨のパレード": null,
"川崎鷹也": null,
"中島 美嘉": null,
"加藤ミリヤ": null,
"りぶ": null,
"雨ニマケテモ": null,
"三浦大知": null,
"コブクロ": null,
"ももいろクローバーZ": null,
"手嶌葵": null,
"Nao☆": null,
"尾崎裕哉": null,
"マーティ・フリードマン": null,
"幾田りら": null,
"山本彩": null,
"ビッケブランカ VS 岡崎体育": null,
"まるりとりゅうが": null,
"藤原さくら": null,
"藤井風": null,
"sicboy": "",
"LUCA & haruka nakamura": "arca",
"伊沢麻未": null,
"マカロニえんぴつ": null,
"チャラン・ポ・ランタン": null,
"鈴木瑛美子": null,
"神はサイコロを振らない": null,
"宇野実彩子": "AAA",
"ウルトラタワー": null,
"空白ごっこ": null,
"Cö shu Nie": null,
"くるり": null,
"流線形 & 一十三十一": null,
"清水翔太": null,
"あれくん": null,
"秋山黄色": null,
"웬디": "WENDY",
"瀧川ありさ": null,
"キリンジ": null,
"ユアネス": null,
"クレナズム": null,
"H△G": null,
"電音部": null,
"武藤彩未": null,
"中島美嘉": null,
"雫": null,
"坂本真綾": null,
"たかやん": null,
"布袋寅泰": null,
"アイラヴミー": null,
"ナナヲアカリ": null,
"福山雅治": null,
"Jacob&よみぃ": null,
"クミコ": null,
"リュックと添い寝ごはん": null,
"眉村ちあき": null,
"ちゃんみな & SKY-HI": null,
"関口シンゴ": null,
"角巻わため": null,
"Snail’s House": null,
"ロザリーナ": null,
"ニノミヤユイ": null,
"シド": null,
"森内寛樹": null,
"TK from 凛として時雨": null,
"スダンナユズユリー": null,
"ヤなことそっとミュート": null,
"宇徳敬子": null,
"Heavenstamp & ハハノシキュウ": null,
"비": null,
"ぼっちぼろまる": null,
"さなり": null,
"家入レオ": null,
"凛として時雨": null,
"緑黄色社会": null,
"あるゆえ": null,
"宮下 遊": null,
"夢乃ゆき": null,
"女王蜂": null,
"夜の本気ダンス": null
}
}

+ 9
- 0
requirements.txt View File

@@ -0,0 +1,9 @@
bs4 == 0.0.1
langdetect == 1.0.7
mutagen == 1.42.0
requests == 2.22.0
torf == 2.1.0
tqdm == 4.39.0
html5lib
pymediainfo==4.1
humanfriendly==4.18

+ 117
- 0
smpy.py View File

@@ -0,0 +1,117 @@
import os
import pickle
import datetime
from urllib.parse import urlparse
import requests
class MyLoginSession:
def __init__(self,
loginUrl,
loginData,
loginTestUrl,
loginTestString,
sessionFileAppendix='_session.dat',
maxSessionTimeSeconds=30 * 60,
proxies=None,
userAgent='Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
debug=False,
forceLogin=False,
**kwargs):
"""
save some information needed to login the session
you'll have to provide 'loginTestString' which will be looked for in the
responses html to make sure, you've properly been logged in
'proxies' is of format { 'https' : 'https://user:pass@server:port', 'http' : ...
'loginData' will be sent as post data (dictionary of id : value).
'maxSessionTimeSeconds' will be used to determine when to re-login.
"""
urlData = urlparse(loginUrl)
self.proxies = proxies
self.loginData = loginData
self.loginUrl = loginUrl
self.loginTestUrl = loginTestUrl
self.maxSessionTime = maxSessionTimeSeconds
self.sessionFile = urlData.netloc + sessionFileAppendix
self.userAgent = userAgent
self.loginTestString = loginTestString
self.debug = debug
self.login(forceLogin, **kwargs)
def modification_date(self, filename):
"""
return last file modification date as datetime object
"""
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def login(self, forceLogin=False, **kwargs):
"""
login to a session. Try to read last saved session from cache file. If this fails
do proper login. If the last cache access was too old, also perform a proper login.
Always updates session cache file.
"""
wasReadFromCache = False
if self.debug:
print('loading or generating session...')
if os.path.exists(self.sessionFile) and not forceLogin:
time = self.modification_date(self.sessionFile)
# only load if file less than 30 minutes old
lastModification = (datetime.datetime.now() - time).seconds
if lastModification < self.maxSessionTime:
with open(self.sessionFile, "rb") as f:
self.session = pickle.load(f)
wasReadFromCache = True
if self.debug:
print("loaded session from cache (last access %ds ago) "
% lastModification)
if not wasReadFromCache:
self.session = requests.Session()
self.session.headers.update({'user-agent': self.userAgent})
res = self.session.post(self.loginUrl, data=self.loginData,
proxies=self.proxies, **kwargs)
if self.debug:
print('created new session with login')
self.saveSessionToCache()
# test login
res = self.session.get(self.loginTestUrl)
if res.text.lower().find(self.loginTestString.lower()) < 0:
if self.debug:
print(res.text)
raise Exception("could not log into provided site '%s'"
" (did not find successful login string)"
% self.loginUrl)
def saveSessionToCache(self):
"""
save session to a cache file
"""
# always save (to update timeout)
with open(self.sessionFile, "wb") as f:
pickle.dump(self.session, f)
if self.debug:
print('updated session cache-file %s' % self.sessionFile)
def retrieveContent(self, url, method="get", postData=None, postDataFiles=None, **kwargs):
"""
return the content of the url with respect to the session.
If 'method' is not 'get', the url will be called with 'postData'
as a post request.
"""
if method == 'get':
res = self.session.get(url, proxies=self.proxies, **kwargs)
else:
res = self.session.post(url, data=postData, proxies=self.proxies, files=postDataFiles, **kwargs)
# the session has been updated on the server, so also update in cache
self.saveSessionToCache()
return res

+ 462
- 0
upload.py View File

@@ -0,0 +1,462 @@
# get args
# make torrent
# read mediainfo
# upload torrent
# move torrent to watch dir
# Standard library packages
from subprocess import check_output
import re
import os
import sys
import argparse
from urllib.parse import urlparse
import json
# Third-party packages
from bs4 import BeautifulSoup
from torf import Torrent
from pathlib import Path
# JPS-AU files
import smpy
from pymediainfo import MediaInfo
def asciiart ():
print("""
███████╗███╗ ███╗ █████╗ ██╗ ██╗ ████████╗██╗ ██╗
██╔════╝████╗ ████║ ██╔══██╗██║ ██║ ╚══██╔══╝██║ ██║
███████╗██╔████╔██║█████╗███████║██║ ██║█████╗██║ ██║ ██║
╚════██║██║╚██╔╝██║╚════╝██╔══██║██║ ██║╚════╝██║ ╚██╗ ██╔╝
███████║██║ ╚═╝ ██║ ██║ ██║╚██████╔╝ ██║ ╚████╔╝
╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝
""")
def getargs():
"""
Get arguments using argparse
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', help='Initiate upload on input file', nargs='?', required=True)
parser.add_argument('-d', '--debug', help='Enable debug mode.', action='store_true')
parser.add_argument("-dry", "--dryrun", help="Dryrun will carry out all actions other than the actual upload to SM.", action="store_true")
parser.add_argument("-a", "--artists", help='Set the artists. (Romaji\English). Split multiple with ","', nargs='?')
parser.add_argument("-oa", "--originalartist", help='Set the artist. (Original Language)', nargs='?')
parser.add_argument("-ca", "--contributingartists", help='Set the contributing artists. (Romaji\English). Split multiple with ","', nargs='?')
parser.add_argument("-ti", "--title", help='Set the title. (Romaji\English)', nargs='?')
parser.add_argument("-oti", "--originaltitle", help='Set the title. (Original Language)', nargs='?')
parser.add_argument("-des", "--description", help='Add a torrent description.', nargs='?', required=True)
parser.add_argument("-t", "--tags", help="Add additional tags to the upload. At least 2 tags are required", nargs='?')
parser.add_argument("-im", "--imageURL", help='Set the torrent cover URL.', nargs='?')
parser.add_argument("-ms", "--mediasource", help='Set the media source.', nargs='?')
parser.add_argument("-rt", "--releasetype", help='Set the release type.', nargs='?')
parser.add_argument("-s", "--sub", help='Set the subtitle type.', nargs='?')
parser.add_argument("-l", "--language", help='Set the language', nargs='?')
parser.add_argument("-y", "--year", help='Set the torrent year (YYYYMMDD or YYYY).', nargs='?')
parser.add_argument("-f", "--freeleech", help="Enables freeleech.", action="store_true")
return parser.parse_args()
def gatherdata():
"""
Retrieve data about the upload. Ask for user input if necessary.
:return: releasedata: dict
"""
releasedata = {"submit": "true"}
releasedata["album_desc"] = description
if artists:
releasedata['idols[]'] = artists
else:
input_english_artist = input("\n" + "_" * 100 + "\nEnter the romaji/english ARTIST name. Separate multiple with \",\". \n")
input_english_artist = [x.strip() for x in input_english_artist.split(',')]
releasedata['idols[]'] = input_english_artist
if originalartist:
releasedata['artist_jp'] = originalartist
else:
input_artist = input("\n" + "_" * 100 + "\nEnter the original ARTIST name. Press enter to skip if this torrent has multiple artists or artist name is already english. \n")
releasedata['artist_jp'] = input_artist
if contributingartists:
input_english_contributing_artist = contributingartists
else:
input_english_contributing_artist = input("\n" + "_" * 100 + "\nEnter the romaji/english CONTRIBUTING ARTIST name. Separate with \",\". Press enter to skip.\n")
if input_english_contributing_artist != "":
input_english_contributing_artist = [x.strip() for x in input_english_contributing_artist.split(',')]
releasedata['contrib_artists[]'] = input_english_contributing_artist
if title:
releasedata['title'] = title
else:
input_english_title = input("\n" + "_" * 100 + "\nEnter the romaji/english TITLE:\n")
releasedata['title'] = input_english_title
if originaltitle:
releasedata['title_jp'] = originaltitle
else:
input_title = input("\n" + "_" * 100 + "\nEnter the original TITLE. Press enter to skip.\n\n")
releasedata['title_jp'] = input_title
if sub:
releasedata["sub"] = sub
else:
while(True):
input_sub = input("\n" + "_" * 100 + "\nEnter a number to choose subtitle type. \n1=NoSubs\n2=Softsubs\n3=Hardsubs\n")
if input_sub == "1":
releasedata["sub"] = "NoSubs"
break
elif input_sub == "2":
releasedata["sub"] = "Softsubs"
break
elif input_sub == "3":
releasedata["sub"] = "Hardsubs"
break
print("Invalid choice.")
if language:
releasedata["lang"] = language
else:
while(True):
input_lang = input("\n" + "_" * 100 + "\nEnter a number to choose subtitle type. \n1=Japanese\n2=English\n3=Korean\n4=Chinese\n5=Vietnamese\n6=Other\n")
if input_lang == "1":
releasedata["lang"] = "Japanese"
break
elif input_lang == "2":
releasedata["lang"] = "English"
break
elif input_lang == "3":
releasedata["lang"] = "Korean"
break
elif input_lang == "2":
releasedata["lang"] = "Chinese"
break
elif input_lang == "3":
releasedata["lang"] = "Vietnamese"
break
elif input_lang == "2":
releasedata["lang"] = "Other"
break
print("Invalid choice.")
if mediasource:
releasedata['media'] = mediasource
else:
while(True):
input_lang = input("\n" + "_" * 100 + "\nEnter a number to choose the media source. \n1=HDTV\n2=Web\n")
if input_lang == "1":
releasedata["media"] = "HDTV"
break
elif input_lang == "2":
releasedata["media"] = "Web"
break
print("Invalid choice.")
if year:
releasedata["year"] = year
else:
input_year = input("\n" + "_" * 100 + "\nEnter the year as YYYYMMDD or YYYY.\n")
releasedata["year"] = input_year
if tags:
input_tags = tags
else:
input_tags = input("\n" + "_" * 100 + "\nEnter the tags. Separate multiple with \",\". Minimum 2 tags required.\n")
if input_tags != "":
input_tags = [x.strip() for x in input_tags.split(',')]
releasedata["tags"] = input_tags
list_of_types = ["PV", "Music Performance", "TV Music", "TV Variety", "TV-Drama"]
if releasetype in list_of_types:
if releasetype == "PV":
releasedata["type"] = 5
elif releasetype == "Music Performance":
releasedata["type"] = 6
elif releasetype == "TV Music":
releasedata["type"] = 7
elif releasetype == "TV Variety":
releasedata["type"] = 8
elif releasetype == "TV-Drama":
releasedata["type"] = 9
else:
while(True):
input_lang = input("\n" + "_" * 100 + "\nEnter a number to choose the upload type. \n1=PV\n2=Music Performance\n3=TV Music\n4=TV Variety\n5=TV-Drama\n")
if input_lang == "1":
releasedata["type"] = 5
break
elif input_lang == "2":
releasedata["type"] = 6
break
elif input_lang == "3":
releasedata["type"] = 7
break
elif input_lang == "4":
releasedata["type"] = 8
break
elif input_lang == "5":
releasedata["type"] = 9
break
print("Invalid choice.")
return releasedata
def add_mediainfo_to_releasedata(filename, releasedata):
"""
Retrieve mediainfo and append it to the releasedata dictionary.
:return: releasedata: dict
"""
mediainfosall = ""
media_info = MediaInfo.parse(filename)
mediainfosall += str(MediaInfo.parse(filename, text=True))
replacement = str(Path(filename).parent)
mediainfosall = mediainfosall.replace(replacement, '')
for track in media_info.tracks:
if track.track_type == 'General':
# releasedataout['language'] = track.audio_language_list # Will need to check if this is reliable
if 'container' not in releasedata: # Not an ISO, only set container if we do not already know its an ISO
releasedata['container'] = track.file_extension.upper()
else: # We have ISO - get category data based Mediainfo if we have it
if track.file_extension.upper() == 'VOB':
releasedata['category'] = 'DVD'
elif track.file_extension.upper() == 'M2TS': # Not used yet as we cannot handle Bluray / UDF
releasedata['category'] = 'Bluray'
if track.track_type == 'Video':
validatecodec = {
"MPEG Video": "MPEG-2",
"AVC": "h264",
"HEVC": "h265",
"MPEG-4 Visual": "DivX", # MPEG-4 Part 2 / h263 , usually xvid / divx
}
for old, new in validatecodec.items():
if track.format == old:
releasedata['codec'] = new
standardresolutions = {
"3840": "1920",
"1920": "1080",
"1280": "720",
"720": "480",
}
for width, height in standardresolutions.items():
if str(track.width) == width and str(track.height) == height:
releasedata['ressel'] = height
if 'ressel' in releasedata.keys(): # Known resolution type, try to determine if interlaced
if track.scan_type == "Interlaced" or track.scan_type == "MBAFF":
releasedata['ressel'] += "i"
else:
releasedata['ressel'] += "p" # Sometimes a Progressive encode has no field set
else: # Custom resolution
releasedata['ressel'] = 'Other'
releasedata['resolution'] = str(track.width) + "x" + str(track.height)
if track.track_type == 'Audio' or track.track_type == 'Audio #1': # Handle multiple audio streams, we just get data from the first for now
if track.format in ["AAC", "DTS", "PCM", "AC3"]:
releasedata['audioformat'] = track.format
elif track.format == "AC-3":
releasedata['audioformat'] = "AC3"
elif track.format == "MPEG Audio" and track.format_profile == "Layer 3":
releasedata['audioformat'] = "MP3"
elif track.format == "MPEG Audio" and track.format_profile == "Layer 2":
releasedata['audioformat'] = "MP2"
releasedata["mediainfo"] = mediainfosall
return releasedata
# Creates torrent file using torf module.
def createtorrent(authkey, filepath, releasedata):
"""
Creates a torrent.
:param: authkey: authkey string
:param: filepath: full path of the file for torrent creation
:param: releasedata: dict
:return: filename of created torrent
"""
t = Torrent(path=filepath,
trackers=[authkey]) # Torf requires we store authkeys in a list object. This makes it easier to add multiple announce urls.
# Set torrent to private as standard practice for private trackers
t.private = True
t.source = "SugoiMusic"
t.generate()
## Format releasedata to bring a suitable torrent name.
# The reason we don't just use the directory name is because of an error in POSTING.
# POSTS do not seem to POST hangul/jp characters alongside files.
# filename = f"{releasedata['idols[]']} - {releasedata['title']} [{releasedata['media']}-{releasedata['audioformat']}].torrent"
filename = f"{releasedata['title']}.torrent"
filename = filename.replace("/","")
try:
t.write(filename)
print("_" * 100)
print("Torrent creation:\n")
print(f"{filename} has been created.")
except:
print("_" * 100)
print("Torrent creation:\n")
os.remove(filename)
print(f"{filename} already exists, existing torrent will be replaced.")
t.write(filename)
print(f"{filename} has been created.")
return filename
def getauthkey():
"""
Get SM session authkey for use by uploadtorrent() data dict.
Uses SM login data
:return: authkey
"""
smpage = sm.retrieveContent("https://sugoimusic.me/torrents.php?id=118") # Arbitrary page on JPS that has authkey
soup = BeautifulSoup(smpage.text, 'html5lib')
rel2 = str(soup.select('#content .thin .main_column .torrent_table tbody'))
authkey = re.findall('authkey=(.*)&amp;torrent_pass=', rel2)
return authkey
def uploadtorrent(torrent, imageURL, releasedata):
"""
Uploads a torrent.
:param: torrent: torrent filename.
:param: imageURL: url to a cover image
:param: releasedata: dict
"""
# POST url.
uploadurl = "https://sugoimusic.me/upload.php"
# Dataset containing all of the information obtained from our FLAC files.
data = releasedata
data['image'] = imageURL
if not dryrun:
data['auth'] = authkey
if debug:
print('_' * 100)
print('Release Data:\n')
for field in data:
print(field)
print(data)
try:
postDataFiles = {
'file_input': open(torrent, 'rb')
#'userfile': open(cover, 'rb')
}
except FileNotFoundError:
print("_" * 100)
print('File not found!\nPlease confirm file locations and names. Cover image or .torrent file could not be found')
sys.exit()
# If dryrun argument has not ben passed we will POST the results to JPopSuki.
if dryrun != True:
SMres = sm.retrieveContent(uploadurl, "post", data, postDataFiles)
print('\nUpload POSTED. It may take a moment for this upload to appear on SugoiMusic.')
SMerrorTorrent = re.findall('red; text-align: center;">(.*)</p>', SMres.text)
SMerrorLogon = re.findall('<p>Invalid (.*)</p>', SMres.text)
if SMerrorTorrent == None:
print("Upload failed.")
print(SMerrorTorrent)
if SMerrorLogon == None:
print(SMerrorLogon)
## TODO Filter through JPSres.text and create error handling based on responses
#print(JPSres.text)
def localfileorganization(torrent, watch_folder):
if cfg['local_prefs']['add_to_watch_folder']:
os.rename(torrent, f"{watch_folder}/{torrent}")
if __name__ == "__main__":
asciiart()
args = getargs()
# TODO consider calling args[] directly, we will then not need this line
dryrun = debug = freeleech = imageURL = tags = inputfile = artists = contributingartists = title = None
originalartist = originaltitle = description = sub = language = year = mediasource = releasetype = None
inputfile = args.input
description = args.description
if args.dryrun:
dryrun = True
if args.debug:
debug = True
if args.freeleech:
freeleech = True
if args.imageURL:
imageURL = args.imageURL
if args.releasetype:
releasetype = args.releasetype
if args.title:
title = args.title
if args.artists:
artists = args.artists
if args.contributingartists:
contributingartists = args.contributingartists
if args.originalartist:
originalartist = args.originalartist
if args.originaltitle:
originaltitle = args.originaltitle
if args.language:
language = args.language
if args.year:
year = args.year
if args.sub:
sub = args.sub
if args.mediasource:
mediasource = args.mediasource
if args.tags:
tags = args.tags
releasedata = gatherdata()
releasedata_and_mediainfo = add_mediainfo_to_releasedata(inputfile, releasedata)
if debug:
print("Release data and MediaInfo complete. Uploading torrent now.")
with open(f'json_data/config.json') as f:
cfg = json.load(f)
loginData = {'username': cfg['credentials']['username'], 'password': cfg['credentials']['password']}
loginUrl = "https://sugoimusic.me/login.php"
loginTestUrl = "https://sugoimusic.me"
successStr = "Enabled users"
passkey = cfg['credentials']['passkey']
annouceurl = "https://tracker.sugoimusic.me:24601/"+passkey+"/announce"
# j is an object which can be used to make requests with respect to the loginsession
sm = smpy.MyLoginSession(loginUrl, loginData, loginTestUrl, successStr, debug=args.debug)
# Acquire authkey
authkey = getauthkey()
torrentfile = createtorrent(annouceurl, inputfile, releasedata_and_mediainfo)
uploadtorrent(torrentfile, imageURL, releasedata_and_mediainfo)
# Setting variable for watch/download folders
ftp_watch_folder = cfg['ftp_prefs']['ftp_watch_folder']
ftp_downloads_folder = cfg['ftp_prefs']['ftp_downloads_folder']
local_watch_folder = cfg['local_prefs']['local_watch_folder']
local_downloads_folder = cfg['local_prefs']['local_downloads_folder']
if not dryrun:
if cfg['local_prefs']['add_to_watch_folder'] or cfg['local_prefs']['add_to_downloads_folder']:
localfileorganization(torrent=torrentfile, watch_folder=local_watch_folder)

Loading…
Cancel
Save