111 lines
4.7 KiB
Python
111 lines
4.7 KiB
Python
import json
|
|
import sqlite3
|
|
import os
|
|
import xml.etree.ElementTree as ET
|
|
import time
|
|
|
|
def handleFile(filename, con):
|
|
with open(filename, encoding='utf-8') as json_file:
|
|
data = json.load(json_file)
|
|
artist = data['artist']
|
|
book = data['book'] # has a book
|
|
category = data['category'] # website name
|
|
count = data['count'] # Page count
|
|
description = data['description'] # Summary
|
|
event = data['event'] # ?
|
|
extension = data['extension']
|
|
filename = data['filename']
|
|
gallery_id = data['gallery_id'] # hn gallery id
|
|
lang = data['lang'] # LanguageISO
|
|
language = data['language'] # Language word
|
|
magazine = data['magazine'] # Magazine
|
|
num = data['num'] # Page number
|
|
parody = data['parody'] # Original work or parody name
|
|
publisher = data['publisher'] # FA##U usually
|
|
subcategory = data['subcategory'] # gallery
|
|
tags = data['tags'] # Array of tags
|
|
thumbnail = None # thumb url
|
|
title = data['title'] # title without artist
|
|
title_conventional = data['title_conventional'] # title with artist
|
|
bookType = data['type'] # manga...
|
|
|
|
|
|
cur = con.cursor()
|
|
|
|
# book_name = '%' + title.translate(str.maketrans({"-": r"\-",
|
|
# "]": r"\]",
|
|
# "\\": r"\\",
|
|
# "^": r"\^",
|
|
# "$": r"\$",
|
|
# "*": r"\*",
|
|
# ".": r"\.",
|
|
# "%": r"\%",
|
|
# "_": r"\_",
|
|
# "?": r"\?",}))
|
|
# book_name = '%' + title.translate(str.maketrans({"%": r"\%"}))
|
|
gallery_db_id = '{} %'.format(str(gallery_id)) # 1 [Renai Sample]
|
|
print("Currently Handling: {} {}".format(gallery_id, title_conventional))
|
|
for tag in tags:
|
|
t = (tag, gallery_db_id)
|
|
cur.execute('INSERT INTO SERIES_METADATA_TAG (TAG, SERIES_ID) SELECT ?, BOOK.SERIES_ID FROM BOOK WHERE BOOK.NAME LIKE ?', t)
|
|
cur.execute('INSERT INTO BOOK_METADATA_TAG (TAG, BOOK_ID) SELECT ?, BOOK.ID FROM BOOK WHERE BOOK.NAME LIKE ?', t)
|
|
t2 = (artist, gallery_db_id)
|
|
cur.execute('INSERT INTO BOOK_METADATA_AGGREGATION_AUTHOR(NAME, ROLE, SERIES_ID) SELECT ?, "writer", BOOK.SERIES_ID FROM BOOK WHERE BOOK.NAME LIKE ?', t2)
|
|
cur.execute('INSERT INTO BOOK_METADATA_AUTHOR(NAME, ROLE, BOOK_ID) SELECT ?, "writer", BOOK.ID FROM BOOK WHERE BOOK.NAME LIKE ?', t2)
|
|
cur.execute('UPDATE SERIES_METADATA SET PUBLISHER=? WHERE TITLE_SORT LIKE ?', t2)
|
|
t3 = (title_conventional, gallery_db_id)
|
|
cur.execute('UPDATE SERIES_METADATA SET TITLE=? WHERE TITLE_SORT LIKE ?', t3)
|
|
|
|
directory_in_str = '..\Metadata\HentaiNexus'
|
|
directory = os.fsencode(directory_in_str)
|
|
|
|
|
|
con = sqlite3.connect('database.sqlite')
|
|
|
|
try:
|
|
for subdir in os.listdir(directory):
|
|
path = os.path.join(directory, subdir)
|
|
first_file = next((os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))))
|
|
filename = os.fsdecode(first_file)
|
|
handleFile(filename, con)
|
|
con.commit()
|
|
except sqlite3.Error as er:
|
|
print('SQLite error: %s' % (' '.join(er.args)))
|
|
print("Exception class is: ", er.__class__)
|
|
print('SQLite traceback: ')
|
|
exc_type, exc_value, exc_tb = sys.exc_info()
|
|
print(traceback.format_exception(exc_type, exc_value, exc_tb))
|
|
|
|
|
|
|
|
con.close()
|
|
|
|
|
|
# DELETE FROM BOOK_METADATA_AGGREGATION_AUTHOR;
|
|
# DELETE FROM BOOK_METADATA_TAG;
|
|
# DELETE FROM BOOK_METADATA_AUTHOR;
|
|
# DELETE FROM SERIES_METADATA_TAG;
|
|
|
|
def handlexml():
|
|
comicinfo_xml = ET.Element('ComicInfo')
|
|
if (title_conventional):
|
|
title_xml = ET.SubElement(comicinfo, 'Title')
|
|
elif (title):
|
|
title_xml = ET.SubElement(comicinfo, 'Title')
|
|
|
|
writer_xml = ET.SubElement(comicinfo, 'Writer')
|
|
number_xml = ET.SubElement(comicinfo, 'Number')
|
|
summary_xml = ET.SubElement(comicinfo, 'Summary')
|
|
series_xml = ET.SubElement(comicinfo, 'Series')
|
|
publisher_xml = ET.SubElement(comicinfo, 'Publisher')
|
|
manga_xml = ET.SubElement(comicinfo, 'Manga')
|
|
genre_xml = ET.SubElement(comicinfo, 'Genre')
|
|
languageIso_xml = ET.SubElement(comicinfo, 'LanguageISO')
|
|
seriesGroup_xml = ET.SubElement(comicinfo, 'SeriesGroup')
|
|
|
|
# create a new XML file with the results
|
|
mydata = ET.tostring(comicinfo, encoding='unicode', method='xml')
|
|
myfile = open("ComicInfo.xml", "w")
|
|
myfile.write(mydata)
|
|
|
|
|