1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
|
import concurrent.futures
import time
import logging
import traceback
import miniflux
from markdownify import markdownify as md
import markdown
from openai import OpenAI
from yaml import safe_load
config = safe_load(open('config.yml', encoding='utf8'))
miniflux_client = miniflux.Client(config['miniflux']['base_url'], api_key=config['miniflux']['api_key'])
llm_client = OpenAI(base_url=config['llm']['base_url'], api_key=config['llm']['api_key'])
logger = logging.getLogger(__name__)
logger.setLevel(config.get('log_level', 'INFO'))
formatter = logging.Formatter('%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s')
console = logging.StreamHandler()
console.setFormatter(formatter)
logger.addHandler(console)
def process_entry(entry):
llm_result = ''
start_with_list = [name[1]['title'] for name in config['agents'].items()]
style_block = [name[1]['style_block'] for name in config['agents'].items()]
[start_with_list.append('<pre') for i in style_block if i]
for agent in config['agents'].items():
# Todo Compatible with whitelist/blacklist parameter, to be removed
allow_list = agent[1].get('allow_list') if agent[1].get('allow_list') is not None else agent[1].get('whitelist')
deny_list = agent[1]['deny_list'] if agent[1].get('deny_list') is not None else agent[1].get('blacklist')
messages = [
{"role": "system", "content": agent[1]['prompt']},
{"role": "user", "content": "The following is the input content:\n---\n " + md(entry['content']) }
]
# filter, if AI is not generating, and in allow_list, or not in deny_list
if ((not entry['content'].startswith(tuple(start_with_list))) and
(((allow_list is not None) and (entry['feed']['site_url'] in allow_list)) or
(deny_list is not None and entry['feed']['site_url'] not in deny_list) or
(allow_list is None and deny_list is None))):
completion = llm_client.chat.completions.create(
model=config['llm']['model'],
messages= messages,
timeout=config.get('llm', {}).get('timeout', 60)
)
response_content = completion.choices[0].message.content
logger.info(f"\nagents:{agent[0]} \nfeed_title:{entry['title']} \nresult:{response_content}")
if agent[1]['style_block']:
llm_result = (llm_result + '<pre style="white-space: pre-wrap;"><code>\n'
+ agent[1]['title'] + ':'
+ response_content.replace('\n', '').replace('\r', '')
+ '\n</code></pre><hr><br />')
else:
llm_result = llm_result + f"{agent[1]['title']}:{markdown.markdown(response_content)}<hr><br />"
if len(llm_result) > 0:
miniflux_client.update_entry(entry['id'], content= llm_result + entry['content'])
while True:
entries = miniflux_client.get_entries(status=['unread'], limit=10000)
start_time = time.time()
logger.info('Fetched unread entries: ' + str(len(entries['entries']))) if len(entries['entries']) > 0 else logger.info('No new entries')
with concurrent.futures.ThreadPoolExecutor(max_workers=config.get('llm', {}).get('max_workers', 4)) as executor:
futures = [executor.submit(process_entry, i) for i in entries['entries']]
for future in concurrent.futures.as_completed(futures):
try:
data = future.result()
except Exception as e:
logger.error(traceback.format_exc())
logger.error('generated an exception: %s' % e)
if len(entries['entries']) > 0 and time.time() - start_time >= 3:
logger.info('Done')
time.sleep(60)
|