I scraped a website and I get all the data from it and I want store it in json file so I could use it as an API the problem is that when it tries to write maps in JSON file it get duplicated because I called json.dumps inside for loop this is what I try to do
How can I append to chapters list and keep the title the same I have more books also it gonna be a list
{
"title":"Kingdom",
"chapters":[
{
"chapter-title": "Kingdom - 12",
"images" : [
"image1.jpg",
"image2.jpg",
"image3.jpg"
]
}{
"chapter-title": "Kingdom - 13",
"images" : [
"image1.jpg",
"image2.jpg",
"image3.jpg"
]
}{
"chapter-title": "Kingdom - 14",
"images" : [
"image1.jpg",
"image2.jpg",
"image3.jpg"
]
}{
"chapter-title": "Kingdom - 15",
"images" : [
"image1.jpg",
"image2.jpg",
"image3.jpg"
]
}
]
}
and this is what I got :
{
"title":"Kingdom",
"chapters":[
{
"chapter-title": "Kingdom - 12",
"images" : [
"image1.jpg",
"image2.jpg",
"image3.jpg"
]
}
]
}{
"title":"Kingdom",
"chapters":[
{
"chapter-title": "Kingdom - 13",
"images" : [
"image1.jpg",
"image2.jpg",
"image3.jpg"
]
}
]
}{
"title":"Kingdom",
"chapters":[
{
"chapter-title": "Kingdom - 14",
"images" : [
"image1.jpg",
"image2.jpg",
"image3.jpg"
]
}
]
}
def getAllImages(url=""):
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(chrome_options=chrome_options)
try:
driver.get(url)
driver.implicitly_wait(2)
except Exception as e:
print("Error Getting Images Page :", e)
print(driver.title)
divs = driver.find_elements_by_class_name("page-break ")
images = []
for div in divs:
image = div.find_elements_by_tag_name("img")
[images.append(j.get_attribute("src").strip()) for j in image]
chapters ={"chapter-title": driver.title, "images": images}
list = [{"title":"Kingdom","chapters":[]}]
list[0]["chapters"].append(chapters)
toJson = json.dumps(list, ensure_ascii=False, indent=2)
with open("./" + "manga.json", "r+") as f:
if len(f.read()) == 0:
f.write(toJson)
else:
f.write(",\n" + toJson)
print("Successfully created ")
for url in links:
getAllImages(url)