autoGPT with Harbour support

autoGPT with Harbour support

Postby Antonio Linares » Tue Apr 18, 2023 10:37 am

microGPT is based on autoGPT concepts and with some little modifications we can make it generate Harbour code automatically:

https://github.com/muellerberndt/micro-gpt

Harbour code execution:
Code: Select all  Expand view
           elif command == "execute_harbour":
                with open( "test.prg", 'w') as file:
                   file.write( arg )
                   file.close()
                print( colored( "code saved to test.prg", "red" ) )
                print( colored( arg, "white" ) )
                print( colored( "compiling it...", "red" ) )
                result = subprocess.run( "c:\\harbour\\bin\\win\\bcc\\harbour.exe test.prg", stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True)
                print( result.stderr )
                input()
                # _stdout = StringIO()
                # with redirect_stdout(_stdout):
                #    exec(arg)
                if len( result.stderr ) != 0:
                   memory.add(f"{mem}{result.stderr}")
                else :
                   memory.add(f"{mem}{result.stdout}")  


If you are interested in cooperating please let me know it

example of use:
python microgpt.py "write function fibonacci using the Harbour language"

microGPT should write the code, test it and automatically modify it if needed
regards, saludos

Antonio Linares
www.fivetechsoft.com
User avatar
Antonio Linares
Site Admin
 
Posts: 41314
Joined: Thu Oct 06, 2005 5:47 pm
Location: Spain

Re: autoGPT with Harbour support

Postby Antonio Linares » Tue Apr 18, 2023 11:32 am

This version already remains in a loop until it writes and fixes the Harbour code. No need to touch the keyboard! :-)

Please check your OpenAI billing. These auto implementations may consume more than usual.

microgpt.py
Code: Select all  Expand view
import os
import sys
import json
import openai
from termcolor import colored
from bs4 import BeautifulSoup
from urllib.request import urlopen
from duckduckgo_search import ddg
from io import StringIO
from contextlib import redirect_stdout
import subprocess
from dotenv import load_dotenv
from spinner import Spinner

load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")

debug = False # if os.getenv("DEBUG") in ['true', '1', 't', 'y', 'yes'] else False

from memory import get_memory_instance

SYSTEM_PROMPT = "You are an autonomous agent who fulfills the user's objective."
INSTRUCTIONS = '''
Carefully consider your next command.
Respond with a JSON-encoded dict containing one of the commands: execute_harbour, execute_shell, read_file, web_search, web_scrape, talk_to_user, or done
{"thought": "[REASONING]", "cmd": "[COMMAND]", "arg": "[ARGUMENT]"}
Use only non-interactive shell commands.
Harbour code run with execute_harbour must include a function Main() at the top of the code.
Harbour code must print using "?" the result of the proposed code as a function.
Harbour code does not require any escape character.
Harbour code functions DONT'
T end with "end", "endfunc", "endfunction", "end of function".
Harbour code does not use "endwhile".
Harbour code must start with "function Main()" and end with "return nil".
Harbour code does not use for ... next ... do. Remove the do.
Use the "done" command after the objective was achieved

Examples:
{"thought": Search for websites relevant to salami pizza.", "cmd": "web_search", "arg": "salami pizza"}
{"
thought": "Scrape information about Apples.", "cmd": "web_scrape", "arg": "https://en.wikipedia.org/wiki/Apple"}
{"thought": "Showing results to the user", "cmd": "talk_to_user", "arg": "[My results]. Did I achieve my objective?"}
{"thought": "I need to ask the user for guidance", "cmd": "talk_to_user", "arg": "What is URL of Domino's Pizza API?"}

IMPORTANT: ALWAYS RESPOND ONLY WITH THIS EXACT JSON FORMAT. DOUBLE-CHECK YOUR RESPONSE TO MAKE SURE IT CONTAINS VALID JSON. DO NOT INCLUDE ANY EXTRA TEXT WITH THE RESPONSE.
'''

if __name__ == "__main__":

    model = os.getenv("MODEL")

    if(len(sys.argv) != 2):
        print("Usage: microgpt.py <objective>")
        quit()

    print( "Harbour autocoder starting..." )
 
    objective = sys.argv[1]
    max_memory_item_size = int(os.getenv("MAX_MEMORY_ITEM_SIZE"))
    memory = get_memory_instance()
    context = objective
    thought = "I awakened moments ago."

    while(True):
        context = memory.get_context(f"{objective}, {thought}")

        if debug:
            print(f"SYSTEMPROMPT: {SYSTEM_PROMPT}")
            print(f"CONTEXT: {context}")
            print(f"OBJECTIVE: {objective}")
            print(f"INSTRUCTIONS: {INSTRUCTIONS}")

        with Spinner():
            rs = openai.ChatCompletion.create(
                model=model,
                messages = [
                    {"role": "system", "content": SYSTEM_PROMPT},
                    {"role": "user", "content": f"OBJECTIVE:{objective}"},
                    {"role": "user", "content": f"CONTEXT:\n{context}"},
                    {"role": "user", "content": f"INSTRUCTIONS:\n{INSTRUCTIONS}"},
                ])

        response_text = rs['
choices'][0]['message']['content']

        # if debug:
        #    print(f"RAW RESPONSE:\n{response_text}")

        try:
            response = json.loads(response_text)
            thought = response["thought"]
            command = response["cmd"]
            arg = response["arg"]

            mem = f"Your thought: {thought}\nYour command: {command}\nCmd argument:\n{arg}\nResult:\n"

        except Exception as e:
            print(colored("Unable to parse response. Retrying...\n", "red"))
            continue

        if (command == "talk_to_user"):
            print(colored(f"MicroGPT: {arg}", '
cyan'))
            user_input = input('
Your response: ')
            memory.add(f"{mem}The user responded with: {user_input}.")
            continue

        _arg = arg.replace("\n", "\\n") if len(arg) < 64 else f"{arg[:64]}...".replace("\n", "\\n")
        print(colored(f"MicroGPT: {thought}\nCmd: {command}, Arg: \"{_arg}\"", "cyan"))
        user_input = "" # input('
Press enter to perform this action or abort by typing feedback: ')

        if (len(user_input) > 0):
            memory.add(f"{mem}The user responded: {user_input}. Take this comment into consideration.")
            continue
        try:
            if command == "execute_harbour":
                arg = arg.replace( "endfunction", "" )
                arg = arg.replace( "endwhile", "" )
                with open( "test.prg", '
w') as file:
                   file.write( arg )
                   file.close()
                print( colored( "code saved to test.prg", "red" ) )
                print( colored( arg, "white" ) )
                print( colored( "compiling it...", "red" ) )
                result = subprocess.run( "c:\\harbour\\bin\\win\\bcc\\harbour.exe test.prg", stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True)
                print( result.stderr )
                if len( result.stderr ) != 0:
                   objective = "fix the code"
                   memory.add(f"{mem}{result.stderr}")
                else :
                   memory.add(f"{mem}{result.stdout}")    
                   print( "done!" )
                   exit()

            elif command == "execute_shell":
                result = subprocess.run(arg, capture_output=True, shell=True)
                memory.add(f"{mem}STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}")
            elif command == "web_search":
                memory.add(f"{mem}{ddg(arg, max_results=5)}")
            elif command == "web_scrape":
                html = urlopen(arg).read()
                response_text = memory.summarize_memory_if_large(BeautifulSoup(html, features="lxml").get_text(), max_memory_item_size)
                memory.add(f"{mem}{response_text}")
            elif command == "read_file":
                f = open(arg, "r")
                file_content = memory.summarize_memory_if_large(f.read(), max_memory_item_size)
                memory.add(f"{mem}{file_content}")
            elif command == "done":
                print("Objective achieved.")
                quit()
        except Exception as e:
                memory.add(f"{mem}The command returned an error:\n{str(e)}\nYou should fix the command.")
regards, saludos

Antonio Linares
www.fivetechsoft.com
User avatar
Antonio Linares
Site Admin
 
Posts: 41314
Joined: Thu Oct 06, 2005 5:47 pm
Location: Spain

Re: autoGPT with Harbour support

Postby Antonio Linares » Tue Apr 18, 2023 11:49 am

Enhanced version:

microgpt.py
Code: Select all  Expand view
import os
import sys
import json
import openai
from termcolor import colored
from bs4 import BeautifulSoup
from urllib.request import urlopen
from duckduckgo_search import ddg
from io import StringIO
from contextlib import redirect_stdout
import subprocess
from dotenv import load_dotenv
from spinner import Spinner

load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")

debug = False # if os.getenv("DEBUG") in ['true', '1', 't', 'y', 'yes'] else False

from memory import get_memory_instance

SYSTEM_PROMPT = "You are an autonomous agent who fulfills the user's objective."
INSTRUCTIONS = '''
Carefully consider your next command.
Respond with a JSON-encoded dict containing one of the commands: execute_harbour, execute_shell, read_file, web_search, web_scrape, talk_to_user, or done
{"thought": "[REASONING]", "cmd": "[COMMAND]", "arg": "[ARGUMENT]"}
Use only non-interactive shell commands.
Harbour code run with execute_harbour must include a function Main() at the top of the code.
Harbour code must print using "?" the result of the proposed code as a function.
Harbour code does not require any escape character.
Harbour code functions DONT'
T end with "end", "endfunc", "endfunction", "end of function".
Harbour code does not use "endwhile".
Harbour code must start with "function Main()" and end with "return nil".
Harbour code does not use for ... next ... do. Remove the do.
Use the "done" command after the objective was achieved

Examples:
{"thought": Search for websites relevant to salami pizza.", "cmd": "web_search", "arg": "salami pizza"}
{"
thought": "Scrape information about Apples.", "cmd": "web_scrape", "arg": "https://en.wikipedia.org/wiki/Apple"}
{"thought": "Showing results to the user", "cmd": "talk_to_user", "arg": "[My results]. Did I achieve my objective?"}
{"thought": "I need to ask the user for guidance", "cmd": "talk_to_user", "arg": "What is URL of Domino's Pizza API?"}

IMPORTANT: ALWAYS RESPOND ONLY WITH THIS EXACT JSON FORMAT. DOUBLE-CHECK YOUR RESPONSE TO MAKE SURE IT CONTAINS VALID JSON. DO NOT INCLUDE ANY EXTRA TEXT WITH THE RESPONSE.
'''

if __name__ == "__main__":

    model = os.getenv("MODEL")

    if(len(sys.argv) != 2):
        print("Usage: microgpt.py <objective>")
        quit()

    print( "Harbour autocoder starting..." )
 
    objective = sys.argv[1]
    max_memory_item_size = int(os.getenv("MAX_MEMORY_ITEM_SIZE"))
    memory = get_memory_instance()
    context = objective
    thought = "I awakened moments ago."

    while(True):
        context = memory.get_context(f"{objective}, {thought}")

        if debug:
            print(f"SYSTEMPROMPT: {SYSTEM_PROMPT}")
            print(f"CONTEXT: {context}")
            print(f"OBJECTIVE: {objective}")
            print(f"INSTRUCTIONS: {INSTRUCTIONS}")

        with Spinner():
            rs = openai.ChatCompletion.create(
                model=model,
                messages = [
                    {"role": "system", "content": SYSTEM_PROMPT},
                    {"role": "user", "content": f"OBJECTIVE:{objective}"},
                    {"role": "user", "content": f"CONTEXT:\n{context}"},
                    {"role": "user", "content": f"INSTRUCTIONS:\n{INSTRUCTIONS}"},
                ])

        response_text = rs['
choices'][0]['message']['content']

        # if debug:
        #    print(f"RAW RESPONSE:\n{response_text}")

        try:
            response = json.loads(response_text)
            thought = response["thought"]
            command = response["cmd"]
            arg = response["arg"]

            mem = f"Your thought: {thought}\nYour command: {command}\nCmd argument:\n{arg}\nResult:\n"

        except Exception as e:
            print(colored("Unable to parse response. Retrying...\n", "red"))
            continue

        if (command == "talk_to_user"):
            print(colored(f"MicroGPT: {arg}", '
cyan'))
            user_input = input('
Your response: ')
            memory.add(f"{mem}The user responded with: {user_input}.")
            continue

        _arg = arg.replace("\n", "\\n") if len(arg) < 64 else f"{arg[:64]}...".replace("\n", "\\n")
        print(colored(f"MicroGPT: {thought}\nCmd: {command}, Arg: \"{_arg}\"", "cyan"))
        user_input = "" # input('
Press enter to perform this action or abort by typing feedback: ')

        if (len(user_input) > 0):
            memory.add(f"{mem}The user responded: {user_input}. Take this comment into consideration.")
            continue
        try:
            if command == "execute_harbour":
                arg = arg.replace( "endfunction", "" )
                arg = arg.replace( "endfunc", "" )
                arg = arg.replace( "endwhile", "" )
                with open( "test.prg", '
w') as file:
                   file.write( arg )
                   file.close()
                print( colored( "code saved to test.prg", "red" ) )
                print( colored( arg, "white" ) )
                print( colored( "compiling it...", "red" ) )
                result = subprocess.run( "c:\\harbour\\bin\\win\\bcc\\harbour.exe test.prg", stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True)
                print( result.stderr )
                if len( result.stderr ) != 0:
                   objective = "fix the code"
                   memory.add(f"{mem}{result.stderr}")
                else :
                   memory.add(f"{mem}{result.stdout}")    
                   print( "done!" )
                   exit()

            elif command == "execute_shell":
                result = subprocess.run(arg, capture_output=True, shell=True)
                memory.add(f"{mem}STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}")
            elif command == "web_search":
                memory.add(f"{mem}{ddg(arg, max_results=5)}")
            elif command == "web_scrape":
                html = urlopen(arg).read()
                response_text = memory.summarize_memory_if_large(BeautifulSoup(html, features="lxml").get_text(), max_memory_item_size)
                memory.add(f"{mem}{response_text}")
            elif command == "read_file":
                f = open(arg, "r")
                file_content = memory.summarize_memory_if_large(f.read(), max_memory_item_size)
                memory.add(f"{mem}{file_content}")
            elif command == "done":
                print("Objective achieved.")
                quit()
        except Exception as e:
                memory.add(f"{mem}The command returned an error:\n{str(e)}\nYou should fix the command.")
regards, saludos

Antonio Linares
www.fivetechsoft.com
User avatar
Antonio Linares
Site Admin
 
Posts: 41314
Joined: Thu Oct 06, 2005 5:47 pm
Location: Spain


Return to latest AI news

Who is online

Users browsing this forum: No registered users and 8 guests