Bladeren bron

initial files

master
sharky555 4 jaren geleden
commit
07691cf018
7 gewijzigde bestanden met toevoegingen van 1152 en 0 verwijderingen
  1. +4
    -0
      .gitignore
  2. +74
    -0
      README.md
  3. +693
    -0
      autoupload.py
  4. +117
    -0
      jpspy.py
  5. +26
    -0
      json_data/config.json.example
  6. +231
    -0
      json_data/dictionary.json
  7. +7
    -0
      requirements.txt

+ 4
- 0
.gitignore Bestand weergeven

@@ -0,0 +1,4 @@
__pycache__
config.json
*session.dat
*.torrent

+ 74
- 0
README.md Bestand weergeven

@@ -0,0 +1,74 @@
This project is **NOT** being maintained as i'd rather rewrite it from scratch.

## Overview
**JPS-AU** is a tool for automating the uploading process on jpopsuki.eu.
This is intended to be used alongside BugsPy.

**Features:**
- JPS Client.
- FTP Support
- FLAC/MP3 Support.
- BugsPy .log Support.

**Installation:**
- Install requirements
```
pip install -r requirements.txt
```

## Command Usage
```
python autoupload.py {command_name} {ID/URL}
```
Command | Description | Example
------------- | ------------- | -------------
-d, --debug | Provides additional information on upload for debugging purposes | `python autoupload.py -d`
-f, --freeleech | Enables freeleech (VIP+ Userclass Requirement) | `python autoupload.py -f -dir "Z:\Music\Korean\Ego\Ego - E [2020.01.02] [EP] [WEB-MP3]"`
-t, --tags | Add additional tags to upload, separated with comma | `python autoupload.py -t "korean, female.vocalist" -dir "Z:\Music\Korean\Ego\Ego - E [2020.01.02] [EP] [WEB-MP3]"`
-dir, --directory | Appoint directory used for torrent creation | `python autoupload.py -dir "Z:\Music\Korean\Ego\Ego - E [2020.01.02] [EP] [WEB-MP3]"`
-dry, --dryrun | Carries out all actions other than the upload itself.| `python autoupload.py -dir "Z:\Music\Korean\Ego\Ego - E [2020.01.02] [EP] [WEB-MP3]" -dry`

## Config.json

- It's not recommended to use both local watch/download folders and ftp watch/download folders at the same time as it will result in seeding from 2 locations.
- If generate_tracklist is set to false the script will try and find BugsPy logs within the selected log folder, this will use the comments tag to find the log. For example if 204815 is within your comments tag, it will search your log folder for 204815.log and use the contents of this log for the album description.
- Similarly to generate_tracklist, if enable_release_description is 'true' the script will create a url to the Bugs.co.kr album webpage to display the source, this can easily be edited to suit your needs within the gatherdata() function

**credentials:**

Config | Description | Example
------------- | ------------- | -------------
Username | JPopSuki Username | Slyy
Password | JPopSuki Password | Password

**local_prefs**

Config | Description | Example
------------- | ------------- | -------------
log_directory | directory containing BugsPy log files | `Z:/Bugs/Logs`
generate_tracklist | enable tracklist generation | `true/false`
save_tracklist | write tracklist to .log and save in log folder | `true/false`
enable_release_description | post comments tag to release description | `true/false`
cover_name | name of cover with extension | `cover.jpg`
add_to_watch_folder | moves .torrent file to local watch folder | `true/false`
add_to_downloads_folder | moves torrent data to local downloads folder | `true/false`
local_watch_folder | directory of local watch folder | `Z:/watch/Transmission`
local_downloads_folder | directory of local downloads folder | `Z:/downloads`


**ftp_prefs:**

Config | Description | Example
------------- | ------------- | -------------
enable_ftp | enable ftp mode, if enabled suggested to disable local watch and downloads folders | `true/false`
add_to_watch_folder | transfer .torrent file to watch folder on FTP server | `true/false`
add_to_downloads_folder | transfer torrent data to downloads folder on FTP server | `true/false`
ftp_server | url of ftp server | haze.seedhost.eu
ftp_username | username of ftp account | slyy
ftp_password | password of ftp account | password
ftp_watch_folder | directory of ftp watch folder | `/downloads/watch/transmission`
ftp_downloads_folder | directory of ftp downloads folder | `/downloads`


## Disclaimer
- The usage of this script **may be** illegal in your country. It's your own responsibility to inform yourself of Copyright Law.

+ 693
- 0
autoupload.py Bestand weergeven

@@ -0,0 +1,693 @@
# Standard library packages
import re
import os
import sys
import shutil
import string
import argparse
import html
from urllib.parse import urlparse
import json
import ftplib

# Third-party packages
import requests
from bs4 import BeautifulSoup
from mutagen.flac import FLAC
from mutagen.mp3 import MP3
from torf import Torrent
from tqdm import tqdm
from langdetect import detect

# JPS-AU files
import jpspy

def asciiart ():
print("""
██╗██████╗ ███████╗ █████╗ ██╗ ██╗
██║██╔══██╗██╔════╝ ██╔══██╗██║ ██║
██║██████╔╝███████╗█████╗███████║██║ ██║
██ ██║██╔═══╝ ╚════██║╚════╝██╔══██║██║ ██║
╚█████╔╝██║ ███████║ ██║ ██║╚██████╔╝
╚════╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝ ╚═════╝

""")

# Get arguments using argparse
def getargs():
parser = argparse.ArgumentParser()
parser.add_argument('-dir', '--directory', help='Initiate upload on directory', nargs='?', required=True)
parser.add_argument("-f", "--freeleech", help="Enables freeleech", action="store_true")
parser.add_argument("-t", "--tags", help="Add additional tags to the upload", nargs='?')
parser.add_argument('-d', '--debug', help='Enable debug mode', action='store_true')
parser.add_argument("-dry", "--dryrun", help="Dryrun will carry out all actions other than the actual upload to JPS.", action="store_true")

return parser.parse_args()

# Acquire the authkey used for torrent files from upload.php
def getauthkey():
uploadpage = j.retrieveContent("https://jpopsuki.eu/upload.php")
soup = BeautifulSoup(uploadpage.text, 'html5lib')
rel2 = str(soup.select('#wrapper #content .thin'))
# Regex returns multiple matches, could be optimized.
authkey = re.findall("(?<=value=\")(.*)(?=\")", rel2)[0]

return authkey
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)

# Creates torrent file using torf module.
def createtorrent(authkey, directory, filename, releasedata):
t = Torrent(path=directory,
trackers=[authkey]) # Torf requires we store authkeys in a list object. This makes it easier to add multiple announce urls.
# Set torrent to private as standard practice for private trackers
t.private = True
t.generate()
## Format releasedata to bring a suitable torrent name.
# The reason we don't just use the directory name is because of an error in POSTING.
# POSTS do not seem to POST hangul/jp characters alongside files.
filename = f"{releasedata['artist']} - {releasedata['title']} [{releasedata['media']}-{releasedata['format']}].torrent"
#filename = filename.replace("\\","")
try:
t.write(filename)
print("_" * 100)
print("Torrent creation:\n")
print(f"{filename} has been created.")
except:
print("_" * 100)
print("Torrent creation:\n")
os.remove(filename)
print(f"{filename} already exists, existing torrent will be replaced.")
t.write(filename)
print(f"{filename} has been created.")

return filename

# Reads FLAC file and returns metadata.
def readflac(filename):
read = FLAC(filename)

# Create dict containing all meta fields we'll be using.
tags={
"ALBUM": read.get('album'),
"ALBUMARTIST": read.get('albumartist'),
"ARTIST": read.get('artist'),
"DATE": read.get('date')[0],
"GENRE": "",#read.get('genre'),
"TITLE": read.get('title'),
"COMMENT": read.get('comment'),
"TRACKNUMBER": read.get('tracknumber')[0].zfill(2),
"DISCNUMBER": read.get('discnumber')}

# Not further looked into this but some FLACs hold a grouping key of contentgroup instead of grouping.
tags['GROUPING'] = read.get('grouping')
## If grouping returns None we check contentgroup.
# If it still returns none we will ignore it and handle on final checks
if tags['GROUPING'] == None:
tags['GROUPING'] = read.get('contentgroup')


required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
for k,v in tags.items():
if v == None:
if k in required_tags:
print(f"{k} has returned {v}, this is a required tag")
sys.exit()

return tags

# Reads MP3 file and returns metadata.
def readmp3(filename):
read = MP3(filename)

# Create dict containing all meta fields we'll be using.
tags={
"ALBUM": read.get('TALB'), # Album Title
"ALBUMARTIST": read.get('TPE2'), # Album Artist
"ARTIST": read.get('TPE1'), # Track Artist
"DATE": str(read.get('TDRC')), # Date YYYYMMDD (Will need to add a try/except for other possible identifiers)
"GENRE": read.get('TCON').text, # Genre
"TITLE": read.get('TIT2'), # Track Title
"COMMENT": read.get('COMM::eng'), # Track Comment
"GROUPING": read.get('TIT1'), # Grouping
"TRACKNUMBER": re.sub(r"\/.*", "", str(read.get('TRCK'))).zfill(2), # Tracknumber (Format #/Total) Re.sub removes /#
"DISCNUMBER": re.sub(r"\/.*", "", str(read.get('TPOS')))} # Discnumber (Format #/Total) Re.sub removes /#

required_tags = ['ALBUM', 'ALBUMARTIST','DATE','TRACKNUMBER']
for k,v in tags.items():
if v == None:
if k in required_tags:
print(f"{k} has returned {v}, this is a required tag")
sys.exit()

return tags

# Generates new log file based on directory contents
def generatelog(track_titles, log_filename, log_directory):
# Seperate each tracklist entry in the list with a newline
track_titles = '\n'.join([str(x) for x in track_titles])

# Format tracklist layout
log_contents = f"""[size=5][b]Tracklist[/b][/size]\n{track_titles}
"""

# If we have chosen to save the tracklist then we write log_contents to a .log file within the log directory specified
if cfg['local_prefs']['save_tracklist']:
# Write to {album_name}.log
with open(f"{log_directory}/{log_filename}.log", "w+") as f:
f.write(log_contents)
# Reset position to first line and read
f.seek(0)
log_contents = f.read()
f.close()

# If debug mode is enabled we will print the log contents.
if debug:
print("_" * 100)
print(f"Log Contents/Tracklisting: {log_contents}")

return log_contents

def readlog(log_name, log_directory):
with open(f"{log_directory}/{log_name}.log", "r+") as f:
log_contents = f.read()
f.close()

return log_contents

def add_to_hangul_dict(hangul , english , category):
hangul = str(hangul)
english = str(english)

categories = ['version','general','artist','genres', 'label', 'distr']
file = f"json_data/dictionary.json"
json_file = open(file, 'r', encoding='utf-8', errors='ignore')
dictionary = json.load(json_file)
json_file.close()

new = dict()
for cats in dictionary:
#== Create the categories in the new temp file
new[cats] = dict()

for key,value in dictionary[cats].items():
#== List all the old items into the new dict
new[cats][key] = value

if hangul in new[category].keys():

if new[category].get(hangul) is None:

if english != 'None':
new[category][hangul] = english

else:
#== Only update if English word has been supplied ==#
if english != 'None':
new[category][hangul] = english
else:

if english == 'None':
new[category][hangul] = None
else:
new[category][hangul] = english

json_write = open(file, 'w+', encoding='utf-8')
json_write.write(json.dumps(new, indent=4, ensure_ascii=False))
json_write.close()

def translate(string, category, result=None, output=None):

file = "json_data/dictionary.json"
with open(file, encoding='utf-8', errors='ignore') as f:
dictionary = json.load(f, strict=False)

category = str(category)
string = str(string)
search = dictionary[category]
string = string.strip()

if string == 'Various Artists':
output = ['Various Artists',None]
else:
#== NO NEED TO SEARCH - STRING HAS HANGUL+ENGLISH or HANGUL+HANGUL ==#
if re.search("\((?P<inside>.*)\)", string):
#== Complete translation, add to dictionary with both values ==#

#== Contains parentheses, need to split
parenthesis = string.split("(")
pre_parenthesis = parenthesis[0].strip()
in_parenthesis = parenthesis[1].replace(")","").strip()

#== Check the order of the parentheses ==#

if re.search("[^\u0000-\u007F]+",pre_parenthesis) and re.search("[^\u0000-\u007F]+",in_parenthesis):
#== Both hangul
first = 'kr'
second = 'kr'
else:
if re.search("[^\u0000-\u007F]+",pre_parenthesis):
first = 'kr'
second = 'eng'
else:
first = 'eng'
second = 'kr'

if first == 'kr' and second == 'eng':
#== Hangul first ==#
hangul = pre_parenthesis
english = in_parenthesis
add_to_hangul_dict(hangul,english,category)

elif first == 'eng' and second == 'kr':
#== English first ==#
hangul = in_parenthesis
english = pre_parenthesis
add_to_hangul_dict(hangul,english,category)
elif first == 'kr' and second == 'kr':
#== Both Hangul ==#
hangul = pre_parenthesis
english = None
add_to_hangul_dict(pre_parenthesis,None,category)
add_to_hangul_dict(hangul,None,category)
else:
#== Both English
hangul = None
english = pre_parenthesis

output = [hangul,english]

#== No parentheses - HANGUL
else:

#== If the input string is a full Hangul word - check dictionary and then add if necessary)
if re.search("[^\u0000-\u007F]+", string):

if string in search.keys():
#== yes
if search.get(string) is None:
#== If the keyword does not have a translation, add it to the dictionary ==#
output = [string,None]
else:
#== Translation already exists, output the result in a list ==#
output = [string,search.get(string)]
else:
output = [string,None]
add_to_hangul_dict(string, None, category)

#== Full English name -- leave it
else:
for key,value in search.items():
if key == string:
output = [value,string]
break
else:
output = [string,string]

return output

def gatherdata(directory):
# Lists for storing some
list_album_artists = []
list_track_artists = []
list_album = []
list_genre = []
translated_genre = []
translated_album_artists = []
tracklist_entries = []
# Creation of releasedata dict, this will store formatted meta used for the POST.
releasedata = {}

## Set no log as default value.
# This will be set to True is a .log file is found, in turn this will allow us to determine if WEB or CD.
log_available = False
flac_present = False
mp3_present = False
# Read directory contents, grab metadata of .FLAC files.
for file in os.listdir(directory):
file_location = os.path.join(directory, file)
if file.endswith(".flac"):
# Read FLAC file to grab meta
tags = readflac(file_location)
flac_present = True
# If Discnumber isn't present then we omit it from the tracklist entry
if tags['DISCNUMBER'] == None:
tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
else:
tracklist_entry = f"[b]{tags['DISCNUMBER'][0]}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"

tracklist_entries.append(tracklist_entry)

if debug:
print ("_" * 100)
print(f"Tags for {file}:\n{tags}")

if file.endswith(".mp3"):
# Read MP3 file to grab meta
tags = readmp3(file_location)
mp3_present = True
# If Discnumber isn't present then we omit it from the tracklist entry
if tags['DISCNUMBER'] == "None":
tracklist_entry = f"[b]{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"
else:
tracklist_entry = f"[b]{tags['DISCNUMBER']}-{tags['TRACKNUMBER']}[/b]. {tags['TITLE'][0]}"

tracklist_entries.append(tracklist_entry)

if debug:
print ("_" * 100)
print(f"Tags for {file}:\n{tags}")

# If only one genre in list attempt to split as there's likely more.
if len(tags['GENRE']) == 1:
tags['GENRE'] = tags['GENRE'][0].split(";")
for aa in tags['ALBUMARTIST']:
list_album_artists.append(aa)
for a in tags['ARTIST']:
list_track_artists.append(a)
list_album.append(tags['ALBUM'][0])
# for g in tags['GENRE']:
# list_genre.append(g)


# Check files to make sure there's no multi-format.
if flac_present:
format = 'FLAC'
bitrate = 'Lossless'
if mp3_present:
format = 'MP3'
bitrate = '320'
if flac_present and mp3_present:
print("Mutt detected, exiting.")
sys.exit()

if file.endswith(".log"):
log_available = True

if log_available == True:
media = 'CD'
else:
media = 'WEB'

# Load Dict.json for translations
file = "json_data/dictionary.json"
with open(file, encoding='utf-8', errors='ignore') as f:
dictionary = json.load(f, strict=False)

# Split additional genre's at comma and append to existing genre tags
if additional_tags != None:
split_tags = additional_tags.split(",")
for s in split_tags:
list_genre.append(s)

# Translate genre's using dict and append to translated_genre
for g in set(list_genre):
translation = translate(g, "genres")[0]
translated_genre.append(translation)

# Translate artist's using dict and append to translated_album_artists
for a in set(list_album_artists):
if tags['ALBUMARTIST'][0] == 'Various Artists':
translated_artist_name = 'V.A.'
translated_album_artists.append("V.A.")
else:
translated_artist_name = translate(string=tags['ALBUMARTIST'][0], category="artist")
translated_album_artists.append(translated_artist_name[0])

## Identify unique values using sets.
unique_album_artists = ','.join(set(translated_album_artists))
unique_track_artists = ','.join(set(list_track_artists))
unique_genre = ','.join(set(translated_genre))
unique_album = set(list_album)

## Acquire contents of our log file to be used for album description
# Comments store the album id which matches our log names, so we can use the comment tag to find our album descriptions.
log_directory = cfg['local_prefs']['log_directory']
# Album description taken from log file.
if cfg['local_prefs']['generate_tracklist']:
log_filename = f"{unique_album_artists} - {tags['ALBUM'][0]}"
album_description = generatelog(tracklist_entries, log_filename, log_directory)
else:
log_filename = tags['COMMENT'][0]
album_description = readlog(log_filename, log_directory)

## If release description is enabled we apply comments to the bugs album url
# Note that this is dependant on the album being sourced from bugs so should be changed per user.
if cfg['local_prefs']['enable_release_description']:
try:
release_description = f"Sourced from [url=https://music.bugs.co.kr/album/{tags['COMMENT'][0]}]Bugs[/url]"
# If any exceptions occur we will return to no release description
except:
release_description = ""
# If release description is not enabled we will use no release description
else:
release_description = ""

## Assign all our unique values into releasedata{}. We'll use this later down the line for POSTING.
# POST values can be found by inspecting JPS HTML
releasedata['submit'] = 'true'

# List of accepted upload types
accepted_types = ['Album', 'Single']
# If type errors then we ask for user input
try:
releasedata['type'] = translate(tags['GROUPING'][0], "release_types")[0]
except TypeError:
releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping is empty or has received an error, please enter manually (Album/Single)\n")

# If type is still not in accepted_types we ask for user input again and do not break loop until correct
if releasedata['type'] not in accepted_types:
while True:
releasedata['type'] = input("\n" + "_" * 100 + "\nGrouping tag did not return an album type, please enter manually (Album/Single)\n")

if releasedata['type'] not in accepted_types:
continue
else:
break


releasedata['title'] = tags['ALBUM'][0]
releasedata['artist'] = unique_album_artists
# If the value of album artist and artist is the same, we don't need to POST original artist.
if unique_album_artists != unique_track_artists:
releasedata['artistjp'] = unique_track_artists
#re.sub removes any date separators, jps doesn't accept them
releasedata['releasedate'] = re.sub(r"[^0-9]", "", tags['DATE'])
releasedata['format'] = format
releasedata['bitrate'] = bitrate
releasedata['media'] = media
releasedata['album_desc'] = album_description
releasedata['release_desc'] = release_description
releasedata['tags'] = unique_genre

# Enable freeleech if arg is passed
if freeleech:
releasedata['freeleech'] = "true"

## Language Checks
# This is a required check as we don't want to enter non-english/romaji characters into the title/artist field.
en = detectlanguage(releasedata['title'])
if debug:
print("_" * 100)
print("Title/Artist Language:\n")
print(f"{releasedata['title']} < English = {en}")
if en == False:
input_english_title = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Please enter the romaji/english title:\n")
# Create new key called titlejp and assign the old title to it
releasedata['titlejp'] = releasedata['title']
# Replace title with the user input.
releasedata['title'] = input_english_title

en = detectlanguage(releasedata['artist'])
if debug:
print(f"{releasedata['artist']} < English = {en}")
if en == False:
input_english_artist = input("\n" + "_" * 100 + "\nKorean/Japanese Detected. Please enter the romaji/english artist name:\n")
# Create new key called titlejp and assign the old title to it
# Replace title with the user input.
releasedata['artist'] = input_english_artist

return releasedata

# Simple function to split a string up into characters
def split(word):
return [char for char in word]

def detectlanguage(string):
## Language Detect
# This is a required check as we don't want to enter non-english/romaji characters into the title field.
characters = split(string)
language_list = []
for c in characters:
try:
language = detect(c)
language_list.append(language)
except:
langauge = "error"

if 'ko' or 'ja' in language_list:
en = False
else:
en = True

return en

def uploadtorrent(torrent, cover, releasedata):

# POST url.
uploadurl = "https://jpopsuki.eu/upload.php"

# Dataset containing all of the information obtained from our FLAC files.
data = releasedata

if debug:
print('_' * 100)
print('Release Data:\n')
print(releasedata)

try:
postDataFiles = {
'file_input': open(torrent, 'rb'),
'userfile': open(cover, 'rb')
}
except FileNotFoundError:
print("_" * 100)
print('File not found!\nPlease confirm file locations and names. Cover image or .torrent file could not be found')
sys.exit()

# If dryrun argument has not ben passed we will POST the results to JPopSuki.
if dryrun != True:
JPSres = j.retrieveContent(uploadurl, "post", data, postDataFiles)
print('\nUpload POSTED')

## TODO Filter through JPSres.text and create error handling based on responses
#print(JPSres.text)

# Function for transferring the contents of the torrent as well as the torrent.
def ftp_transfer(fileSource, fileDestination, directory, folder_name, watch_folder):

# Create session
session = ftplib.FTP(cfg['ftp_prefs']['ftp_server'],cfg['ftp_prefs']['ftp_username'],cfg['ftp_prefs']['ftp_password'])
# Set session encoding to utf-8 so we can properly handle hangul/other special characters
session.encoding='utf-8'

# Successful FTP Login Print
print("_" * 100)
print("FTP Login Successful")
print(f"Server Name: {cfg['ftp_prefs']['ftp_server']} : Username: {cfg['ftp_prefs']['ftp_username']}\n")

if cfg['ftp_prefs']['add_to_downloads_folder']:

# Create folder based on the directory name of the folder within the torrent.
try:
session.mkd(f"{fileDestination}/{folder_name}")
print(f'Created directory {fileDestination}/{folder_name}')
except ftplib.error_perm:
pass

# Notify user we are beginning the transfer.
print(f"Beginning transfer...")
# Set current folder to the users preferred destination
session.cwd(f"{fileDestination}/{folder_name}")
# Transfer each file in the chosen directory
for file in os.listdir(directory):
with open(f"{directory}/{file}",'rb') as f:
filesize = os.path.getsize(f"{directory}/{file}")
## Transfer file
# tqdm used for better user feedback.
with tqdm(unit = 'blocks', unit_scale = True, leave = False, miniters = 1, desc = f'Uploading [{file}]', total = filesize) as tqdm_instance:
session.storbinary('STOR ' + file, f, 2048, callback = lambda sent: tqdm_instance.update(len(sent)))
print(f"{file} | Complete!")
f.close()

if cfg['ftp_prefs']['add_to_watch_folder']:
with open(fileSource,'rb') as t:
# Set current folder to watch directory
session.cwd(watch_folder)
## Transfer file
# We avoid tqdm here due to the filesize of torrent files.
# Most connections will upload these within 1-3s, resulting in near useless progress bars.
session.storbinary(f"STOR {torrentfile}", t)
print(f"{torrentfile} | Sent to watch folder!")
t.close()
# Quit session when complete.
session.quit()

def localfileorganization(torrent, directory, watch_folder, downloads_folder):
# Move torrent directory to downloads_folder
if cfg['local_prefs']['add_to_downloads_folder']:
try:
os.mkdir(os.path.join(downloads_folder, os.path.basename(directory)))
except FileExistsError:
pass
copytree(directory, os.path.join(downloads_folder, os.path.basename(directory)))
shutil.rmtree(directory)
if cfg['local_prefs']['add_to_watch_folder']:
os.rename(torrent, f"{watch_folder}/{torrent}")

if __name__ == "__main__":

asciiart()
args = getargs()

# TODO consider calling args[] directly, we will then not need this line
dryrun = freeleech = tags = directory = debug = None

directory = args.directory
additional_tags = args.tags

if args.dryrun:
dryrun = True

if args.debug:
debug = True

if args.freeleech:
freeleech = True

# Load login credentials from JSON and use them to create a login session.
with open(f'json_data/config.json') as f:
cfg = json.load(f)
loginData = {'username': cfg['credentials']['username'], 'password': cfg['credentials']['password']}
loginUrl = "https://jpopsuki.eu/login.php"
loginTestUrl = "https://jpopsuki.eu"
successStr = "Latest 5 Torrents"

# j is an object which can be used to make requests with respect to the loginsession
j = jpspy.MyLoginSession(loginUrl, loginData, loginTestUrl, successStr, debug=args.debug)
# Acquire authkey
authkey = getauthkey()
# Gather data of FLAC file
releasedata = gatherdata(directory)

# Folder_name equals the last folder in the path, this is used to rename .torrent files to something relevant.
folder_name = os.path.basename(os.path.normpath(directory))
# Identifying cover.jpg path
cover_path = directory + "/" + cfg['local_prefs']['cover_name']

# Create torrent file.
torrentfile = createtorrent(authkey, directory, folder_name, releasedata)

# Upload torrent to JPopSuki
uploadtorrent(torrentfile, cover_path, releasedata)

# Setting variable for watch/download folders
ftp_watch_folder = cfg['ftp_prefs']['ftp_watch_folder']
ftp_downloads_folder = cfg['ftp_prefs']['ftp_downloads_folder']
local_watch_folder = cfg['local_prefs']['local_watch_folder']
local_downloads_folder = cfg['local_prefs']['local_downloads_folder']


if cfg['ftp_prefs']['enable_ftp']:
ftp_transfer(fileSource=torrentfile, fileDestination=ftp_downloads_folder, directory=directory, folder_name=folder_name, watch_folder=ftp_watch_folder)

if cfg['local_prefs']['add_to_watch_folder'] or cfg['local_prefs']['add_to_downloads_folder']:
localfileorganization(torrent=torrentfile, directory=directory, watch_folder=local_watch_folder, downloads_folder=local_downloads_folder)

+ 117
- 0
jpspy.py Bestand weergeven

@@ -0,0 +1,117 @@
import os
import pickle
import datetime
from urllib.parse import urlparse
import requests


class MyLoginSession:
def __init__(self,
loginUrl,
loginData,
loginTestUrl,
loginTestString,
sessionFileAppendix='_session.dat',
maxSessionTimeSeconds=30 * 60,
proxies=None,
userAgent='Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1',
debug=False,
forceLogin=False,
**kwargs):
"""
save some information needed to login the session

you'll have to provide 'loginTestString' which will be looked for in the
responses html to make sure, you've properly been logged in

'proxies' is of format { 'https' : 'https://user:pass@server:port', 'http' : ...
'loginData' will be sent as post data (dictionary of id : value).
'maxSessionTimeSeconds' will be used to determine when to re-login.
"""
urlData = urlparse(loginUrl)

self.proxies = proxies
self.loginData = loginData
self.loginUrl = loginUrl
self.loginTestUrl = loginTestUrl
self.maxSessionTime = maxSessionTimeSeconds
self.sessionFile = urlData.netloc + sessionFileAppendix
self.userAgent = userAgent
self.loginTestString = loginTestString
self.debug = debug

self.login(forceLogin, **kwargs)

def modification_date(self, filename):
"""
return last file modification date as datetime object
"""
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)

def login(self, forceLogin=False, **kwargs):
"""
login to a session. Try to read last saved session from cache file. If this fails
do proper login. If the last cache access was too old, also perform a proper login.
Always updates session cache file.
"""
wasReadFromCache = False
if self.debug:
print('loading or generating session...')
if os.path.exists(self.sessionFile) and not forceLogin:
time = self.modification_date(self.sessionFile)

# only load if file less than 30 minutes old
lastModification = (datetime.datetime.now() - time).seconds
if lastModification < self.maxSessionTime:
with open(self.sessionFile, "rb") as f:
self.session = pickle.load(f)
wasReadFromCache = True
if self.debug:
print("loaded session from cache (last access %ds ago) "
% lastModification)
if not wasReadFromCache:
self.session = requests.Session()
self.session.headers.update({'user-agent': self.userAgent})
res = self.session.post(self.loginUrl, data=self.loginData,
proxies=self.proxies, **kwargs)

if self.debug:
print('created new session with login')
self.saveSessionToCache()

# test login
res = self.session.get(self.loginTestUrl)
if res.text.lower().find(self.loginTestString.lower()) < 0:
if self.debug:
print(res.text)
raise Exception("could not log into provided site '%s'"
" (did not find successful login string)"
% self.loginUrl)

def saveSessionToCache(self):
"""
save session to a cache file
"""
# always save (to update timeout)
with open(self.sessionFile, "wb") as f:
pickle.dump(self.session, f)
if self.debug:
print('updated session cache-file %s' % self.sessionFile)

def retrieveContent(self, url, method="get", postData=None, postDataFiles=None, **kwargs):
"""
return the content of the url with respect to the session.

If 'method' is not 'get', the url will be called with 'postData'
as a post request.
"""
if method == 'get':
res = self.session.get(url, proxies=self.proxies, **kwargs)
else:
res = self.session.post(url, data=postData, proxies=self.proxies, files=postDataFiles, **kwargs)

# the session has been updated on the server, so also update in cache
self.saveSessionToCache()

return res

+ 26
- 0
json_data/config.json.example Bestand weergeven

@@ -0,0 +1,26 @@
{
"credentials": {
"username": "username",
"password": "password"
},
"local_prefs": {
"log_directory": "D:/Bugs/BugsPy-master/logs",
"generate_tracklist": true,
"save_tracklist": false,
"enable_release_description": false,
"cover_name": "cover.jpg",
"add_to_watch_folder": false,
"add_to_downloads_folder": false,
"local_watch_folder": "C:/watch",
"local_downloads_folder": "C:/downloads"
},
"ftp_prefs": {
"enable_ftp": false,
"add_to_watch_folder": true,
"add_to_downloads_folder": true,
"ftp_server": "server url",
"ftp_username": "username",
"ftp_password": "password",
"ftp_watch_folder": "/downloads/watch/transmission",
"ftp_downloads_folder": "/downloads"
}

+ 231
- 0
json_data/dictionary.json Bestand weergeven

@@ -0,0 +1,231 @@
{
"release_types": {
"Album": "Album",
"Single": "Single",
"EP": "Album",
"OST": "Album",
"싱글": "Single",
"EP(미니)": "Album",
"정규": "Album",
"컴필레이션": "Album",
"베스트": "Album",
"미니": "Album"
},
"genres": {
"R&B": "rnb",
"소울": "Soul",
"힙합": "hip.hop",
"랩": "Rap",
"영화": "Movie",
"로맨스": "Romance",
"드라마": "OST",
"TV 드라마": "OST",
"애니메이션": "anime",
"인디": "Indie",
"인디힙합": "Indie Hip-Hop",
"재즈 힙합": "Jazz-Hop",
"댄스 팝": "Dance",
"발라드": "Ballad",
"댄스": "Dance",
"포크 팝": "Folk",
"팝": "Pop",
"팝 락": "Pop.Rock",
"인디 락": "Indie.Rock",
"락": "Rock",
"메탈": "Metal",
"인디 팝": "Indie.Pop",
"일렉트로닉": "Electronic",
"일렉트로닉 팝": "Electro",
"인디일렉트로닉": "Indie.Electronic",
"신스 팝": "Synth-Pop",
"J-POP": "J-Pop",
"재즈": "Jazz",
"성인가요": "Trot",
"월드뮤직": "World Music",
"국악": "Traditional",
"종교": "Religious",
"CCM": "CCM",
"어린이": "Child",
"태교": "Taegyo",
"캐롤": "Christmas",
"트랩": "Trap",
"얼터너티브 팝": "Alternative.Pop",
"얼터너티브": "Alternative",
"뉴에이지": "New Age",
"켈틱": "Celtic",
"켈틱 퓨전": "Celtic.Fusion",
"퓨전": "Fusion",
"에스닉 퓨전": "Ethnic.Fusion",
"레게": "Reggae",
"댄스홀": "Dancehall",
"하우스": "House",
"트로트": "Trot",
"얼터너티브 락": "Alternative.Rock",
"덥": "Dub",
"싸이키델릭": "Psychedelic",
"인스트루멘탈 힙합": "Instrumental.Hip-Hop",
"인스트루멘탈": "Instrumental",
"클래식": "Classic",
"컨트리": "Country",
"종교음악": "Religious",
"전통음악": "Traditional",
"블루스": "Blues",
"라틴": "Latin",
"기타": "Other",
"기능성음악": "Functional",
"인디포크": "indie.folk",
"포크": "Folk",
"어쿠스틱": "Acoustic",
"Hip-Hop": "hip.hop"
},
"artist": {
"오아": "OA",
"이고": "Ego",
"ハルカトミユキ": null,
"琴音": null,
"下村陽子 × suis from ヨルシカ": null,
"川島ケイジ": null,
"裸体": null,
"空音": null,
"さかいゆう": null,
"美波": null,
"アルカラ": null,
"윤상": null,
"ブレッド & バター": null,
"Official髭男dism": null,
"優里": null,
"サニーデイ・サービス": null,
"ずっと真夜中でいいのに。": null,
"やなぎなぎ": null,
"米津玄師": null,
"梶浦由記": null,
"澁谷逆太郎": null,
"ポルカドットスティングレイ": null,
"김트와친구들": null,
"安斉かれん": null,
"坂口有望": null,
"空想委員会": null,
"ヨルシカ": null,
"向井太一": null,
"ペンギンラッシュ": null,
"黒子首": null,
"中島みゆき": null,
"ハリィさんとスイカくらぶ": null,
"堀込高樹": null,
"堀込泰行": null,
"スピラ・スピカ": null,
"17歳とベルリンの壁": null,
"天野月": null,
"ソールドシュガー": null,
"ナンカノユメ": null,
"ルルルルズ": null,
"東京事変": null,
"藍井エイル": null,
"阿部真央": null,
"赤いくらげ": null,
"週末CITY PLAY BOYZ": null,
"林 浩司": null,
"蒼山幸子": null,
"フラスコテーション": null,
"ゑんら": null,
"ハンブレッダーズ": null,
"鈴木このみ": null,
"みゆな": null,
"ビッケブランカ": null,
"めありー": null,
"キタニタツヤ": null,
"イロメガネ": null,
"ヤユヨ": null,
"ピロカルピン": null,
"ツユ": null,
"リリー楽綺団": null,
"山崎ハコ": null,
"いきものがかり": null,
"はるまきごはん": null,
"おくみずき": null,
"渣泥": null,
"竹渕慶": null,
"早見沙織": null,
"倖田來未": null,
"世武裕子": null,
"ラブリーサマーちゃん": null,
"SUPER☆GiRLS": null,
"österreich": null,
"フレデリック": null,
"ズーカラデル": null,
"神山羊": null,
"太田ひな": null,
"ヤバイTシャツ屋さん": null,
"當山みれい": null,
"大森靖子": null,
"大原櫻子": null,
"東京スカパラダイスオーケストラ": null,
"三月のパンタシア": null,
"雨のパレード": null,
"川崎鷹也": null,
"中島 美嘉": null,
"加藤ミリヤ": null,
"りぶ": null,
"雨ニマケテモ": null,
"三浦大知": null,
"コブクロ": null,
"ももいろクローバーZ": null,
"手嶌葵": null,
"Nao☆": null,
"尾崎裕哉": null,
"マーティ・フリードマン": null,
"幾田りら": null,
"山本彩": null,
"ビッケブランカ VS 岡崎体育": null,
"まるりとりゅうが": null,
"藤原さくら": null,
"藤井風": null,
"sicboy": "",
"LUCA & haruka nakamura": "arca",
"伊沢麻未": null,
"マカロニえんぴつ": null,
"チャラン・ポ・ランタン": null,
"鈴木瑛美子": null,
"神はサイコロを振らない": null,
"宇野実彩子": "AAA",
"ウルトラタワー": null,
"空白ごっこ": null,
"Cö shu Nie": null,
"くるり": null,
"流線形 & 一十三十一": null,
"清水翔太": null,
"あれくん": null,
"秋山黄色": null,
"웬디": "WENDY",
"瀧川ありさ": null,
"キリンジ": null,
"ユアネス": null,
"クレナズム": null,
"H△G": null,
"電音部": null,
"武藤彩未": null,
"中島美嘉": null,
"雫": null,
"坂本真綾": null,
"たかやん": null,
"布袋寅泰": null,
"アイラヴミー": null,
"ナナヲアカリ": null,
"福山雅治": null,
"Jacob&よみぃ": null,
"クミコ": null,
"リュックと添い寝ごはん": null,
"眉村ちあき": null,
"ちゃんみな & SKY-HI": null,
"関口シンゴ": null,
"角巻わため": null,
"Snail’s House": null,
"ロザリーナ": null,
"ニノミヤユイ": null,
"シド": null,
"森内寛樹": null,
"TK from 凛として時雨": null,
"スダンナユズユリー": null,
"ヤなことそっとミュート": null
}
}

+ 7
- 0
requirements.txt Bestand weergeven

@@ -0,0 +1,7 @@
bs4 == 0.0.1
langdetect == 1.0.7
mutagen == 1.42.0
requests == 2.22.0
torf == 2.1.0
tqdm == 4.39.0
html5lib

Laden…
Annuleren
Opslaan