1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
|
import concurrent.futures
import time
import traceback
from common.logger import logger
from core.entry_filter import filter_entry
from core.image import contains_image
import miniflux
from markdownify import markdownify as md
import markdown
from openai import OpenAI
from yaml import safe_load
config = safe_load(open('config.yml', encoding='utf8'))
miniflux_client = miniflux.Client(config['miniflux']['base_url'], api_key=config['miniflux']['api_key'])
llm_client = OpenAI(base_url=config['llm']['base_url'], api_key=config['llm']['api_key'])
def process_entry(entry):
llm_result = ''
for agent in config['agents'].items():
messages = [
{"role": "system", "content": agent[1]['prompt']},
{"role": "user", "content": "The following is the title:\n---\n " + entry['title']},
{"role": "user", "content": "The following is the author:\n---\n " + entry['author']},
{"role": "user", "content": "The following is the input content:\n---\n " + md(entry['content']) }
]
image_url = contains_image(entry['content'])
if image_url:
messages.append({"role": "user", "content": [
{"type": "text", "text": "The following is the first image in the content:"},
{"type": "image_url", "image_url": {"url": image_url}}
]})
# filter, if AI is not generating, and in allow_list, or not in deny_list
if filter_entry(config, agent, entry):
completion = llm_client.chat.completions.create(
model=config['llm']['model'],
messages= messages,
timeout=config.get('llm', {}).get('timeout', 60)
)
response_content = completion.choices[0].message.content
logger.info(f"agents:{agent[0]} feed_title:{entry['title']} result:{response_content}")
if agent[1]['style_block']:
llm_result = (llm_result + '<pre style="white-space: pre-wrap;"><code>\n'
+ agent[1]['title'] + ':'
+ response_content.replace('\n', '').replace('\r', '')
+ '\n</code></pre><hr><br />')
else:
llm_result = llm_result + f"{agent[1]['title']}:{markdown.markdown(response_content)}<hr><br />"
if len(llm_result) > 0:
miniflux_client.update_entry(entry['id'], content= llm_result + entry['content'])
while True:
entries = miniflux_client.get_entries(status=['unread'], limit=10000)
start_time = time.time()
logger.info('Fetched unread entries: ' + str(len(entries['entries']))) if len(entries['entries']) > 0 else logger.info('No new entries')
with concurrent.futures.ThreadPoolExecutor(max_workers=config.get('llm', {}).get('max_workers', 4)) as executor:
futures = [executor.submit(process_entry, i) for i in entries['entries']]
for future in concurrent.futures.as_completed(futures):
try:
data = future.result()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('generated an exception: %s' % e)
if len(entries['entries']) > 0 and time.time() - start_time >= 3:
logger.info('Done')
time.sleep(60)
|