diff --git a/README.md b/README.md index 5d8f07e..837aa47 100644 --- a/README.md +++ b/README.md @@ -20,8 +20,7 @@ you can run `pip3 install Mastodon.py praw` to install both of these. - [x] Separate methods methods to make code cleaner **Likely** -- [ ] Keep track of what has been scraped and tooted to not duplicate posts - - This is a pain in the butt for some reason +- [?] Keep track of what has been scraped and tooted to not duplicate posts - [ ] Debugging logging - [ ] Move all vars into config - [ ] Docker image diff --git a/bot.py b/bot.py index 99a5406..2c11cff 100644 --- a/bot.py +++ b/bot.py @@ -3,8 +3,7 @@ import logging # Mastodon bot to post things class bot(): - def __init__(self, config, debug=False): - self.debug = debug + def __init__(self, config, neuter=False): self.masto = Mastodon(access_token=config["mastodon"]["access_token"], api_base_url=config["mastodon"]["host"]) # uploads media to mastodon, returns the mastodon ID @@ -17,9 +16,15 @@ class bot(): def upload_all_media(self, filenames): ids = [] for fn in filenames: - ids.append(self.upload_media(fn)) + if not self.neuter: + ids.append(self.upload_media(fn)) + else: + print(f"Would have uploaded {fn}") return ids def toot(self, text, media=None): logging.info(f"Posting:\n Text: {text}") - self.masto.status_post(text, media_ids=media) + if not self.neuter: + self.masto.status_post(text, media_ids=media) + else: + print(f"Would have tooted: {text}") diff --git a/helper.py b/helper.py index 81ab600..17c1d34 100644 --- a/helper.py +++ b/helper.py @@ -13,7 +13,6 @@ class helper(): # service to pass itself in every time service = service.service low_activity_random = service.low_activity_random - debug = service.debug places = service.places seent = service.seent diff --git a/reddit.py b/reddit.py index d9c80ca..fd0bf3c 100644 --- a/reddit.py +++ b/reddit.py @@ -17,19 +17,17 @@ class reddit_scraper: savefile = json.load(savefile) try: self.seent = savefile["reddit"] except: self.seent = {} - - - ### REDDIT METHODS + # gets posts from a given subreddit def scrape(self, sub, limit): # make sure self.seent has the sub, add if not if sub not in self.seent: self.seent[sub] = time.time() - # get posts that aren't in seent list + # get posts that aren't seent post_list = [] posts = self.login.subreddit(sub).new(limit=limit) posts = helper.reddit_listify(posts) for p in posts[::-1]: - if helper.ts_older(p.created, self.seent[sub]): + if helper.ts_older(self.seent[sub], p.created): break logging.info(f"Scraping post {p.id}") post_list.append(p) diff --git a/scraper.py b/scraper.py index efe4370..84b142b 100644 --- a/scraper.py +++ b/scraper.py @@ -35,7 +35,7 @@ class scraper: # downloads a given post's media and return the locations def download(self, post): logging.info(f"Downloading {post.id}... ") - result = self.login.download(post) + result = [] #self.login.download(post) neuter logging.info(f"Done downloading {post.id}.") return result