From 1c09b57c70214583a1b52c0aee9ccdb9f375a802 Mon Sep 17 00:00:00 2001 From: Falko Zurell Date: Tue, 18 Feb 2025 12:26:32 +0100 Subject: [PATCH] Fixed to keep state in a file --- .gitignore | 1 + mbot.py | 101 +++++++++++++++++++++++++++++++---------------------- 2 files changed, 61 insertions(+), 41 deletions(-) diff --git a/.gitignore b/.gitignore index 5b6c096..36a82ca 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ config.toml +replied_mentions.txt diff --git a/mbot.py b/mbot.py index 49ebf69..c93179d 100644 --- a/mbot.py +++ b/mbot.py @@ -1,59 +1,78 @@ import toml -from mastodon import Mastodon -import time +import requests +import mastodon +import os import html2text -from ollama import Client +import ollama +import time -# Load configuration from config.toml -with open('config.toml', 'r') as f: - config = toml.load(f) +CONFIG_PATH = 'config.toml' +STATE_FILE = 'replied_mentions.txt' -mastodon_config = config['mastodon'] -ollama_config = config['ollama'] -preamble = config.get('preamble', '') +def load_config(path): + with open(path, 'r') as file: + return toml.load(file) -# Initialize Mastodon client -mastodon = Mastodon( - access_token=mastodon_config['access_token'], - api_base_url=mastodon_config['host'] -) +def load_state(): + if not os.path.exists(STATE_FILE): + return set() + with open(STATE_FILE, 'r') as file: + return set(line.strip() for line in file.readlines()) -# Function to send text to Ollama and get a response -def ask_ollama(prompt): - client = Client( - host=f"http://{ollama_config['host']}:{ollama_config['port']}" +def save_state(state): + with open(STATE_FILE, 'w') as file: + for item in state: + file.write(f"{item}\n") + +def send_to_ollama(prompt, config): + client = ollama.Client( + host=f"http://{config['ollama']['host']}:{config['ollama']['port']}" ) try: - response = client.generate(f"{ollama_config['model']}", f"{preamble}\n{prompt}") - print(f"{ollama_config['model']}", f"{preamble}\n{prompt}") + response = client.generate(f"{config['ollama']['model']}", f"{config['ollama']['preamble']}\n{prompt}") + print(f"{config['ollama']['model']}", f"{config['ollama']['preamble']}\n{prompt}") return response['response'] except Exception as e: print(f"HTTP error occurred: {e}") return "Error contacting Ollama server." -# Function to process mentions -def process_mentions(): - since_id = None - while True: - mentions = mastodon.notifications(since_id=since_id) if since_id else mastodon.notifications() - for mention in mentions: - if mention['type'] == 'mention': - content = html2text.html2text(mention['status']['content']) - user = mention['status']['account']['acct'] - status_id = mention['status']['id'] - print(f"Mention from {user}: {content}") +def main(): + config = load_config(CONFIG_PATH) + state = load_state() - response_text = ask_ollama(content) - print(f"Ollama response: {response_text}") + masto = mastodon.Mastodon( + api_base_url=config['mastodon']['host'], + access_token=config['mastodon']['access_token'] + ) - mastodon.status_post( - status=f"@{user} {response_text}", - in_reply_to_id=status_id, - visibility=mastodon_config.get('default_visibility', 'public') - ) - since_id = mention['id'] + mentions = masto.notifications(mentions_only=True) - time.sleep(30) + for mention in mentions: + if mention['id'] in state: + continue + + content = mention['status']['content'] + username = mention['status']['account']['username'] + prompt = html2text.html2text(content) + + if username != "maxheadroom": + continue + + response = send_to_ollama(prompt, config) + if not response or config['mastodon']['bot_username'] in response: + continue + + masto.status_post( + status=f"@{username} {response}", + in_reply_to_id=mention['status']['id'], + visibility=config['mastodon']['default_visibility'] + ) + + state.add(mention['id']) + + save_state(state) if __name__ == "__main__": - process_mentions() + while True: + main() + time.sleep(30)