Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .env.template
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
OPENAI_API_KEY=place-your-key-here
AWS_PROFILE=place-your-profile-here
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -158,3 +158,6 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/


*.ipynb
48 changes: 48 additions & 0 deletions plotai/llm/bedrock.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import os
import boto3
from botocore.exceptions import ClientError

from dotenv import load_dotenv

load_dotenv()



class Bedrock:


def __init__(self, model: str, region_name: str = "us-east-1"):
profile_name = os.environ.get("AWS_PROFILE")
if profile_name is None:
raise Exception(
"Please set AWS_PROFILE environment variable."
"You can obtain API key from https://console.aws.amazon.com/iam/home#/security_credentials"
)
self.region_name = region_name
self.session = boto3.Session(profile_name=profile_name)
self.model = model
self.client = self.session.client("bedrock-runtime", region_name=self.region_name)


def chat(self, prompt):
conversation = [
{
"role": "user",
"content": [{"text": prompt}],
}
]
try:
response = self.client.converse(
modelId=self.model,
messages=conversation,
inferenceConfig={"maxTokens": 512, "temperature": 0.5, "topP": 0.9},
)
response_text = response["output"]["message"]["content"][0]["text"]
return response_text
except (ClientError, Exception) as e:
return f"ERROR: Can't invoke '{self.model}'. Reason: {e}"


# br = Bedrock("anthropic.claude-3-5-sonnet-20240620-v1:0")

# print(br.chat("Tell me a joke"))
8 changes: 4 additions & 4 deletions plotai/llm/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,13 @@ class ChatGPT:
model = "gpt-3.5-turbo"

def __init__(self, model: str):
api_key = os.environ.get("OPENAI_API_KEY")
if api_key is None:
self.api_key = os.environ.get("OPENAI_API_KEY")
if self.api_key is None:
raise Exception(
"Please set OPENAI_API_KEY environment variable."
"You can obtain API key from https://platform.openai.com/account/api-keys"
)
openai.api_key = api_key
openai.api_key = self.api_key
self.model = model

@property
Expand All @@ -37,7 +37,7 @@ def _default_params(self):
}

def chat(self, prompt):
client = openai.OpenAI()
client = openai.OpenAI(api_key=self.api_key)

params = {
**self._default_params,
Expand Down
19 changes: 3 additions & 16 deletions plotai/plotai.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@

class PlotAI:

def __init__(self, *args, **kwargs):
self.model_version = "gpt-3.5-turbo"
def __init__(self, llm, *args, **kwargs):
self.llm = llm
# DataFrame to plot
self.df, self.x, self.y, self.z = None, None, None, None

Expand All @@ -28,24 +28,11 @@ def make(self, prompt):

Logger().log({"title": "Prompt", "details": p.value})

response = ChatGPT(model=self.model_version).chat(p.value)
response = self.llm.chat(p.value)

Logger().log({"title": "Response", "details": response})

executor = Executor()
error = executor.run(response, globals(), {"df":self.df, "x": self.x, "y": self.y, "z": self.z})
if error is not None:
Logger().log({"title": "Error in code execution", "details": error})

# p_again = Prompt(prompt, self.df, self.x, self.y, self.z, previous_code=response, previous_error=error)

# Logger().log({"title": "Prompt with fix", "details": p_again.value})

# response = ChatGPT().chat(p.value)

# Logger().log({"title": "Response", "details": response})

# executor = Executor()
# error = executor.run(response, globals(), locals())
# if error is not None:
# Logger().log({"title": "Error in code execution", "details": error})
9 changes: 6 additions & 3 deletions plotai/prompt/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@ def __init__(self, prompt="", df=None, x=None, y=None, z=None, previous_code="",
def input_data_str(self):
if self.df is not None:
return f"""
```python
```
python
# pandas DataFrame
'''
{self.df.head(5)}
Expand Down Expand Up @@ -43,12 +44,14 @@ def value(self):

Initial python code to be updated

```python
```
python
# TODO import required dependencies
# TODO Provide the plot
```

Output only Python code.
Output only Python code. Don't even think about providing the plot. Just the code.
There is also no need to tell me you are sending Python code, I know what I'm expecting
"""

if self.previous_code != "":
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ pandas
numpy
openai
python-dotenv
boto3