From c36e3d75ff99a1b9801fc16e065415ed895b4752 Mon Sep 17 00:00:00 2001 From: Luke Ogburn <21106956+logburn@users.noreply.github.com> Date: Wed, 13 Apr 2022 22:56:16 -0400 Subject: [PATCH] hopefully neuter works --- README.md | 1 + autotoot.py | 4 ++-- bot.py | 15 ++++++++++++--- scraper.py | 7 ++++--- 4 files changed, 19 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index a1bd86e..8e18702 100644 --- a/README.md +++ b/README.md @@ -21,6 +21,7 @@ you can run `pip3 install Mastodon.py praw` to install both of these. **Likely** - [ ] Keep track of what has been scraped and tooted to not duplicate posts +- [ ] Actually add neuter settings for development - [ ] Debugging logging - [ ] Move all vars into config - [ ] Docker image diff --git a/autotoot.py b/autotoot.py index 6f2f7f2..fe686c8 100644 --- a/autotoot.py +++ b/autotoot.py @@ -18,8 +18,8 @@ def main(): # get config config = json.load(open('config.json', 'r')) # make bots - masto = bot(config) - reddit = scraper("reddit", config, low_activity_random=True) + masto = bot(config, neuter=True) + reddit = scraper("reddit", config, neuter=True) # run bots run(masto, reddit) # buffer time bc posts only happen so often so why check diff --git a/bot.py b/bot.py index 7a45bf0..d3c7dd7 100644 --- a/bot.py +++ b/bot.py @@ -3,14 +3,20 @@ import logging # Mastodon bot to post things class bot(): - def __init__(self, config): + def __init__(self, config, neuter=False): + self.neuter = neuter self.masto = Mastodon(access_token=config["mastodon"]["access_token"], api_base_url=config["mastodon"]["host"]) # uploads media to mastodon, returns the mastodon ID # specify mimetype of video files as "video/mp4" to avoid error def upload_media(self, filename, mimetype=None): logging.info(f"Uploading media {filename}") - return self.masto.media_post(filename, mime_type=mimetype) + if not self.neuter: + returnval = self.masto.media_post(filename, mime_type=mimetype) + else: + print(f"Would have uploaded {filename}") + returnval = True + return returnval # uploads all given media def upload_all_media(self, filenames): @@ -21,4 +27,7 @@ class bot(): def toot(self, text, media=None): logging.info(f"Posting:\n Text: {text}") - self.masto.status_post(text, media_ids=media) + if not self.neuter: + self.masto.status_post(text, media_ids=media) + else: + print(f"Would have posted {text}") diff --git a/scraper.py b/scraper.py index 38e104e..a4e1f13 100644 --- a/scraper.py +++ b/scraper.py @@ -4,7 +4,7 @@ import json from reddit import reddit_scraper as reddit class scraper: - def __init__(self, service, config, low_activity_random=False): + def __init__(self, service, config, neuter=False): # error checking scrapers = ["reddit"] if service.lower() not in scrapers: @@ -16,8 +16,8 @@ class scraper: f = open("savefile.json", "w+") f.write("{}") # set object variables - self.low_activity_random = low_activity_random self.service = service + self.neuter = neuter # login to service if service == "reddit": self.login = reddit(config) @@ -35,7 +35,8 @@ class scraper: # downloads a given post's media and return the locations def download(self, post): logging.info(f"Downloading {post.id}... ") - self.login.download(post) + if not self.neuter: self.login.download(post) + else: print(f"Neuter: would have downloaded {post} content") logging.info(f"Done downloading {post.id}.") return result