Snippets
Created by
Andrew
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 | from bs4 import BeautifulSoup
import warnings
warnings.filterwarnings("ignore", category=UserWarning, module='bs4')
from scrape import UrlCreator, get_list_of_dicts_from_csv_file, CSV_FILE_PATH, CATEGORY_INDEX
#from proxy import do_req
import requests
import urllib
import logging as logger
import MySQLdb
import os
import time
timestr = time.strftime("%m-%d-%Y_%H:%M:%S")
header = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
import logging as logger
logger.basicConfig(filename='/Users/coffeeman/Documents/Projects/scrapev2/logs/list.log',
filemode='w', level=logger.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
number_of_pages = 1
data_from_csv = get_list_of_dicts_from_csv_file(CSV_FILE_PATH)
url_list = [] # here we will store the url_list created
for data in data_from_csv:
url_creator = UrlCreator(data['brand'], data['code'], number_of_pages)
url1 = url_creator.create_url()
url_list.append(url1)
#urls = json.dumps(url_list, indent=4)
#print(urls)
class PageExtractor:
def __init__(self, url):
self.url = url
def get_number_of_pages(self):
# Removing proxy w/ do_req
#response = do_req(self.url)
response = self.url
#soup = BeautifulSoup(response.text, 'html.parser')
soup = BeautifulSoup(response, features='lxml')
page = soup.find('ul', {'class': 'pagination'})
try:
#print(self.url)
number_of_pages = page.find('a', {'id': 'last'})['data-page']
#print('(try)Pages = ' + str(number_of_pages))
except:
number_of_pages = 1
#print('(except) statement Pages = ' + str(number_of_pages))
self.scrape()
return number_of_pages
def scrape(self):
pass
class DataExtractor:
def __init__(self, url, brand, category):
self.url = url
#print(f"Self.url: {self.url}")
self.brand = urllib.parse.quote(brand)
#print(f"Self.url: {self.brand}")
self.category = category
#print(f"Self.url: {self.category}")
def extract(self):
db = MySQLdb.connect(host='127.0.0.1', user='thriftuser', passwd='kYJbqzI2Bvp41tp', db='thrift_db')
#db = MySQLdb.connect(host='10.0.0.171', user='thriftuser', passwd='kYJbqzI2Bvp41tp', db='thrift_db')
#db = MySQLdb.connect(host='ub16-03', user='scripter', passwd='o3c3bJRom8', db='thrift_db')
# db = MySQLdb.connect(host='127.0.0.1', user='thriftuser', passwd='kYJbqzI2Bvp41tp', db='thrift_db')
cur = db.cursor()
alljobs = []
logger.info("Print self.url: %s " % (self.url))
#response = self.url
response = requests.get(self.url, headers=header).text
#print(f"Self.url: {self.url}")
logger.info("Print response: %s " % (response))
soup = BeautifulSoup(response, features='lxml')
logger.info("Print soup: %s " % (soup))
#print(soup)
jobs = soup.find_all('li', class_='widget')
print(f"Jobs: {jobs}")
logger.info("Print jobs: %s " % (jobs))
#print(jobs)
for job in jobs:
try:
designer = self.brand
searchURL = 'http://nullrefer.com/?' + url
author_id = '1'
title = job.findAll('div', attrs={'class': 'title'})[0].text.encode('utf-8').strip().splitlines()[:1]
#print(title)
link = job.findAll('a', attrs={'class': 'product'})[0]['href']
productUrl = 'http://blankrefer.com/?https://www.shopgoodwill.com' + link
imageUrl = job.findAll('img', attrs={'class': 'lazy-load'})[0]['src']
price = job.findAll('div', attrs={'class': 'price'})[0].text.encode('utf-8')[3:10]
countdown = job.findAll(attrs={"product-countdown"})[0].get('data-countdown')
script = self.category
print(F"script = {script}")
published_date = time.strftime("%Y-%m-%d %H:%M:%S.000000")
icon = job.findAll('img', attrs={'class': 'lazy-load'})[0]['data-src']
published_date = time.strftime("%Y-%m-%d %H:%M:%S.000000")
savedir = time.strftime('%m%d%Y_%H')
#scriptName = 'women'
scriptName = self.category
imagesFolder = '/mnt/images/'
# print('imagesFolder:', imagesFolder)
# the full savepath is then:
shortpath = os.path.join(scriptName + '_' + savedir)
# print('shortpath:', shortpath)
savepath = os.path.join(imagesFolder + '/', scriptName + '_' + savedir)
# print('savepath:', savepath)
try:
os.makedirs(savepath)
except Exception as e:
pass
try:
if imageUrl.find('/'):
imageName = imageUrl.rsplit('/', 1)[1]
# imageFolder = '/home/ubuntu/scripts/gwscripts/images'
# imagePath = imageFolder + '/' + imageName
imagePath = savepath + '/' + imageName
with open(imagePath, 'wb') as f:
response = requests.get(imageUrl, headers=header)
f.write(response.content)
except Exception as e:
print(e)
awsimageUrl = 'https://s3-us-west-2.amazonaws.com/imagekickbucket/' + shortpath + '/' + imageName
# print(awsimageUrl)
except Exception as e:
print(e)
logger.info("Post Jobs Try error: %s " % (e))
currentJob = {
'designer': designer,
'searchURL': searchURL,
'author_id': author_id,
'title': title,
'productUrl': productUrl,
'awsimageUrl': awsimageUrl,
'imagePath': imagePath,
'price': price,
'countdown': countdown,
'script': script,
'savepath': savepath,
'published_date': published_date,
'icon': icon,
}
alljobs.append(currentJob)
logger.info("currentJob: %s " % (currentJob))
try:
cur.execute(
"INSERT INTO `blog_post` (designer, searchURL, author_id, title, productUrl, awsimageUrl, "
"imagePath, price, countdown, script, savepath, published_date, icon)"
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
(designer, searchURL, author_id, title, productUrl, awsimageUrl, imagePath, price, countdown,
script, savepath, published_date, icon))
db.commit()
except MySQLdb.Error as e:
logger.error(e)
logger.info("MySQLdb Error: %s " % (e))
print(e)
logger.info("womens Ended:: %s " % (timestr))
db.close()
return alljobs
if __name__ == "__main__":
for url in url_list:
page_extractor = PageExtractor(url)
number_of_pages = page_extractor.get_number_of_pages()
#print(f"for url: {url} no_pages: {number_of_pages}")
results = DataExtractor().extract(url)
# if __name__ == "__main__":
# data_from_csv = get_list_of_dicts_from_csv_file(CSV_FILE_PATH)
# urls = [] #here we will store the urls created
# for data in data_from_csv:
# url_creator = UrlCreator(data['brand'], data['code'])
# url = url_creator.create_url()
# urls.append(url)
# x = json.dumps(urls, indent=4)
# print(type(x))
# print(json.dumps(urls, indent=4))
|
Comments (0)
You can clone a snippet to your computer for local editing. Learn more.