gitignore amendments, indentation and spacing

This commit is contained in:
Dascienz
2019-08-18 13:49:08 -04:00
parent 76df7378f3
commit 28185eb7be
3 changed files with 52 additions and 40 deletions

8
.gitignore vendored
View File

@@ -1,4 +1,12 @@
# Exclude Files
*.DS_Store *.DS_Store
*.csv *.csv
*.json *.json
*.txt *.txt
*.tsv
*.xls
*.xlsx
# Exclude Directories
__pycache__/
.ipynb_checkpoints/

View File

@@ -38,3 +38,5 @@ From within `/phpBB_scraper/`:
`scrapy crawl phpBB` to launch the crawler. `scrapy crawl phpBB` to launch the crawler.
`scrapy crawl phpBB -o posts.csv` to launch the crawler and save results to CSV. `scrapy crawl phpBB -o posts.csv` to launch the crawler and save results to CSV.
NOTE: Please adjust `settings.py` to throttle your requests.

View File

@@ -16,7 +16,8 @@ class PhpbbSpider(scrapy.Spider):
def parse(self, response): def parse(self, response):
# LOGIN TO PHPBB BOARD AND CALL AFTER_LOGIN # LOGIN TO PHPBB BOARD AND CALL AFTER_LOGIN
if self.form_login: if self.form_login:
formdata = {'username':self.username,'password':self.password} formdata = {'username': self.username,
'password': self.password}
form_request = [scrapy.FormRequest.from_response(response, form_request = [scrapy.FormRequest.from_response(response,
formdata=formdata, formdata=formdata,
callback=self.after_login, callback=self.after_login,
@@ -46,18 +47,17 @@ class PhpbbSpider(scrapy.Spider):
yield scrapy.Request(response.urljoin(link), callback=self.parse_posts) yield scrapy.Request(response.urljoin(link), callback=self.parse_posts)
# IF NEXT PAGE EXISTS, FOLLOW # IF NEXT PAGE EXISTS, FOLLOW
Next = response.xpath("//li[@class='next']//a[@rel='next']/@href").extract_first() next_link = response.xpath('//li[@class="next"]//a[@rel="next"]/@href').extract_first()
if Next: if next_link:
yield scrapy.Request(response.urljoin(Next),callback=self.parse_topics) yield scrapy.Request(response.urljoin(next_link), callback=self.parse_topics)
def clean_quote(self, string): def clean_quote(self, string):
# CLEAN HTML TAGS FROM POST TEXT, MARK QUOTES # CLEAN HTML TAGS FROM POST TEXT, MARK QUOTES
soup = BeautifulSoup(string, 'lxml') soup = BeautifulSoup(string, 'lxml')
blockQuotes = soup.find_all('blockquote') block_quotes = soup.find_all('blockquote')
for i, quote in enumerate(blockQuotes): for i, quote in enumerate(block_quotes):
blockQuotes[i] = '<quote-%s>=' + str(i) + quote.get_text() block_quotes[i] = '<quote-%s>=' + str(i) + quote.get_text()
text = ''.join(blockQuotes) return ''.join(block_quotes)
return text
def clean_text(self, string): def clean_text(self, string):
# CLEAN HTML TAGS FROM POST TEXT, MARK REPLIES TO QUOTES # CLEAN HTML TAGS FROM POST TEXT, MARK REPLIES TO QUOTES
@@ -66,24 +66,26 @@ class PhpbbSpider(scrapy.Spider):
for tag in tags: for tag in tags:
for i, item in enumerate(soup.find_all(tag)): for i, item in enumerate(soup.find_all(tag)):
item.replaceWith('<reply-%s>=' + str(i)) item.replaceWith('<reply-%s>=' + str(i))
text = re.sub(' +',' ',soup.get_text()) return re.sub(r' +', r' ', soup.get_text())
return text
def parse_posts(self, response): def parse_posts(self, response):
# COLLECT FORUM POST DATA # COLLECT FORUM POST DATA
usernames = response.xpath('//p[@class="author"]//a[@class="username"]//text()').extract() usernames = response.xpath('//p[@class="author"]//a[@class="username"]//text()').extract()
postCounts = response.xpath('//dd[@class="profile-posts"]//a/text()').extract() post_counts = response.xpath('//dd[@class="profile-posts"]//a/text()').extract()
postTimes = response.xpath('//p[@class="author"]/text()').extract() post_times = response.xpath('//p[@class="author"]/text()').extract()
postTexts = response.xpath('//div[@class="postbody"]//div[@class="content"]').extract() post_texts = response.xpath('//div[@class="postbody"]//div[@class="content"]').extract()
postQuotes = [self.clean_quote(s) for s in postTexts] post_quotes = [self.clean_quote(s) for s in post_texts]
postTexts = [self.clean_text(s) for s in postTexts] post_texts = [self.clean_text(s) for s in post_texts]
#YIELD POST DATA #YIELD POST DATA
for i in range(len(usernames)): for i in range(len(usernames)):
yield {'User':usernames[i],'Count':postCounts[i], yield {'Username': usernames[i],
'Time':postTimes[i],'Post Text':postTexts[i],'Quote Text':postQuotes[i]} 'PostCount': post_counts[i],
'PostTime': post_times[i],
'PostText': post_texts[i],
'QuoteText': post_quotes[i]}
# CLICK THROUGH NEXT PAGE # CLICK THROUGH NEXT PAGE
Next = response.xpath("//li[@class='next']//a[@rel='next']/@href").extract_first() next_link = response.xpath('//li[@class="next"]//a[@rel="next"]/@href').extract_first()
if Next: if next_link:
yield scrapy.Request(response.urljoin(Next),callback=self.parse_posts) yield scrapy.Request(response.urljoin(next_link), callback=self.parse_posts)