mirror of
https://github.com/saurabhan/Wallhaven-dl.git
synced 2026-01-18 07:48:02 +00:00
Merge pull request #16 from Tempest042/patch-1
Updates to work with Wallhaven Post Alpha
This commit is contained in:
@@ -4,29 +4,25 @@
|
|||||||
# #
|
# #
|
||||||
# Author - Saurabh Bhan #
|
# Author - Saurabh Bhan #
|
||||||
# #
|
# #
|
||||||
# dated- 26 June 2016 #
|
# Dated- 26 June 2016 #
|
||||||
# Update - 29 June 2016 #
|
# Update - 11 June 2019 #
|
||||||
########################################################
|
########################################################
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import getpass
|
import getpass
|
||||||
import bs4
|
|
||||||
import re
|
import re
|
||||||
import requests
|
import requests
|
||||||
import tqdm
|
import tqdm
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
import json
|
||||||
|
|
||||||
os.makedirs('Wallhaven', exist_ok=True)
|
os.makedirs('Wallhaven', exist_ok=True)
|
||||||
BASEURL=""
|
BASEURL=""
|
||||||
cookies=dict()
|
cookies=dict()
|
||||||
|
|
||||||
def login():
|
global APIKEY
|
||||||
global cookies
|
APIKEY = "EnterYourAPIKeyHere"
|
||||||
print('NSFW images require login')
|
|
||||||
username = input('Enter username: ')
|
|
||||||
password = getpass.getpass('Enter password: ')
|
|
||||||
cookies = requests.post('https://alpha.wallhaven.cc/auth/login', data={'username':username, 'password':password}).cookies
|
|
||||||
|
|
||||||
def category():
|
def category():
|
||||||
global BASEURL
|
global BASEURL
|
||||||
@@ -63,51 +59,46 @@ def category():
|
|||||||
ptags = {'sfw':'100', 'sketchy':'010', 'nsfw':'001', 'ws':'110', 'wn':'101', 'sn':'011', 'all':'111'}
|
ptags = {'sfw':'100', 'sketchy':'010', 'nsfw':'001', 'ws':'110', 'wn':'101', 'sn':'011', 'all':'111'}
|
||||||
ptag = ptags[pcode]
|
ptag = ptags[pcode]
|
||||||
|
|
||||||
if pcode in ['nsfw', 'wn', 'sn', 'all']:
|
BASEURL = 'https://wallhaven.cc/api/v1/search?apikey=' + APIKEY + "&categories=" +\
|
||||||
login()
|
|
||||||
|
|
||||||
BASEURL = 'https://alpha.wallhaven.cc/search?categories=' + \
|
|
||||||
ctag + '&purity=' + ptag + '&page='
|
ctag + '&purity=' + ptag + '&page='
|
||||||
|
|
||||||
def latest():
|
def latest():
|
||||||
global BASEURL
|
global BASEURL
|
||||||
print('Downloading latest')
|
print('Downloading latest')
|
||||||
BASEURL = 'https://alpha.wallhaven.cc/latest?page='
|
topListRange = '1M'
|
||||||
|
BASEURL = 'https://wallhaven.cc/api/v1/search?apikey=' + APIKEY + '&topRange=' +\
|
||||||
|
topListRange + '&sorting=toplist&page='
|
||||||
|
|
||||||
def search():
|
def search():
|
||||||
global BASEURL
|
global BASEURL
|
||||||
query = input('Enter search query: ')
|
query = input('Enter search query: ')
|
||||||
BASEURL = 'https://alpha.wallhaven.cc/search?q=' + \
|
BASEURL = 'https://wallhaven.cc/api/v1/search?apikey=' + APIKEY + '&q=' + \
|
||||||
urllib.parse.quote_plus(query) + '&page='
|
urllib.parse.quote_plus(query) + '&page='
|
||||||
|
|
||||||
def downloadPage(pageId, totalImage):
|
def downloadPage(pageId, totalImage):
|
||||||
url = BASEURL + str(pageId)
|
url = BASEURL + str(pageId)
|
||||||
urlreq = requests.get(url, cookies=cookies)
|
urlreq = requests.get(url, cookies=cookies)
|
||||||
soup = bs4.BeautifulSoup(urlreq.text, 'lxml')
|
pagesImages = json.loads(urlreq.content);
|
||||||
soupid = soup.findAll('a', {'class': 'preview'})
|
pageData = pagesImages["data"]
|
||||||
res = re.compile(r'\d+')
|
|
||||||
imgId = res.findall(str(soupid))
|
for i in range(len(pageData)):
|
||||||
imgext = ['jpg', 'png', 'bmp']
|
currentImage = (((pageId - 1) * 24) + (i + 1))
|
||||||
for imgIt in range(len(imgId)):
|
|
||||||
currentImage = (((pageId - 1) * 24) + (imgIt + 1))
|
url = pageData[i]["path"]
|
||||||
filename = 'wallhaven-%s.' % imgId[imgIt]
|
|
||||||
url = 'https://wallpapers.wallhaven.cc/wallpapers/full/%s' % filename
|
filename = os.path.basename(url)
|
||||||
for ext in imgext:
|
osPath = os.path.join('Wallhaven', filename)
|
||||||
iurl = url + ext
|
if not os.path.exists(osPath):
|
||||||
osPath = os.path.join('Wallhaven', filename)
|
imgreq = requests.get(url, cookies=cookies)
|
||||||
if not os.path.exists(osPath):
|
if imgreq.status_code == 200:
|
||||||
imgreq = requests.get(iurl, cookies=cookies)
|
print("Downloading : %s - %s / %s" % (filename, currentImage , totalImage))
|
||||||
if imgreq.status_code == 200:
|
with open(osPath, 'ab') as imageFile:
|
||||||
print("Downloading : %s - %s / %s" % (filename, currentImage , totalImage))
|
for chunk in imgreq.iter_content(1024):
|
||||||
with open(osPath, 'ab') as imageFile:
|
imageFile.write(chunk)
|
||||||
for chunk in imgreq.iter_content(1024):
|
elif (imgreq.status_code != 403 and imgreq.status_code != 404):
|
||||||
imageFile.write(chunk)
|
print("Unable to download %s - %s / %s" % (filename, currentImage , totalImage))
|
||||||
break
|
else:
|
||||||
elif (imgreq.status_code != 403 and imgreq.status_code != 404):
|
print("%s already exist - %s / %s" % (filename, currentImage , totalImage))
|
||||||
print("Unable to download %s - %s / %s" % (filename, currentImage , totalImage))
|
|
||||||
else:
|
|
||||||
print("%s already exist - %s / %s" % (filename, currentImage , totalImage))
|
|
||||||
break
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
Choice = input('''Choose how you want to download the image:
|
Choice = input('''Choose how you want to download the image:
|
||||||
|
|||||||
Reference in New Issue
Block a user