Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
WIP - oh its too slow? need gpu?
  • Loading branch information
s2t2 committed Jan 14, 2024
commit c151a10fd1f8f50d5a19624baa79d1f1a828aff7
34 changes: 21 additions & 13 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -79,12 +79,31 @@ FILE_ID_SPLIT_INDEX="0" # 0 for files from Canvas, 1 for files from Blackboard

## Usage

### Submission Files Manager

Demonstrate ability to access submission files:

```sh
python -m app.submissions_manager
```

### LLM

Demonstrate ability to query your LLM of choice (OpenAI or Meta Llama).

Query the OpenAI LLM:

```sh
TEMP=0.6 python -m app.openai.llm
```

Query the Meta Llama LLM:

```sh
TEMP=0.6 python -m app.meta.llm
```
> NOTE: the first time the LLama model is run, it will take a while to download.

### Cell-based Document Splitting

Process the starter file:
Expand Down Expand Up @@ -122,25 +141,14 @@ DOCS_LIMIT=5 python -m app.submissions_retriever

### Retreival Augmented Generation (RAG)

#### OpenAI LLM

Chat with the LLM:
Use an LLM for grading:

```sh
TEMP=0.6 python -m app.openai_llm
```

```sh
DOCS_LIMIT=5 python -m app.submissions_grader
DOCS_LIMIT=5 python -m app.openai.submissions_grader

# DOCS_LIMIT=5 SIMILARITY_THRESHOLD=0.75 CHUNK_SIZE=1000 CHUNK_OVERLAP=0 python -m app.submissions_grader
```

#### Llama 2 LLM

```sh
TEMP=0.6 python -m app.meta_llm
```

## Testing

Expand Down
33 changes: 14 additions & 19 deletions app/meta_llama/chain.py → app/meta/chain.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,31 +11,26 @@
from app.llama_llm import LlamaService


load_dotenv()

TEMP = float(os.getenv("TEMP", default="0.0")) # @param {type:"slider", min:0, max:1, step:0.1}


if __name__ == "__main__":

service = LlamaService()
pipeline = service.pipeline
llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={"temperature":TEMP})
print(llm)
#pipeline = service.pipeline
#llm = HuggingFacePipeline(pipeline=pipeline, model_kwargs={"temperature":TEMP})
#print(llm)

# SIMPLE LLM CHAIN

system_prompt = "You are an advanced assistant that excels at translation. "
instruction = "Convert the following text from English to French:\n\n {text}"
template = get_prompt(instruction, system_prompt)
print(template)
prompt = PromptTemplate(template=template, input_variables=["text"])

llm_chain = LLMChain(prompt=prompt, llm=llm)

query = "how are you today?"
response = llm_chain.run(query)
parse_text(response)
#system_prompt = "You are an advanced assistant that excels at translation. "
#instruction = "Convert the following text from English to French:\n\n {text}"
#template = get_prompt(instruction, system_prompt)
#print(template)
#prompt = PromptTemplate(template=template, input_variables=["text"])
#
#llm_chain = LLMChain(prompt=prompt, llm=llm)
#
#query = "how are you today?"
#response = llm_chain.run(query)
#parse_text(response)


# CHAT CHAIN
Expand Down
163 changes: 163 additions & 0 deletions app/meta/llm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@

# adapted from youtube video about llama and langchain: ________________

import os
from dotenv import load_dotenv

import torch
#import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.llms.huggingface_pipeline import HuggingFacePipeline

from app.meta.prompts import get_prompt, parse_text, cut_off_text, remove_substring

load_dotenv()

HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
MODEL_NAME = "meta-llama/Llama-2-7b-chat-hf" # os.getenv("MODEL_NAME", default="meta-llama/Llama-2-7b-chat-hf")

#MAX_NEW_TOKENS = 512
TEMP = float(os.getenv("TEMP", default="0.0")) # @param {type:"slider", min:0, max:1, step:0.1}

# THIS IS THE OFFICIAL SYSTEM PROMPT?
INST, INST_END = "[INST]", "[/INST]"
SYS, SYS_END = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.

If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
"""

def compile_prompt(prompt, system_prompt=DEFAULT_SYSTEM_PROMPT, input_variables=[]) -> PromptTemplate:
"""Wraps your query in syntax the model understands. Uses default system instructions, or ones you provide.

Params:
prompt (str) : your prompt string, optionally with placeholder {} for input vars

input variables: a list of string input variable names in your prompt, default is None

Returns: langchain.PromptTemplate
"""
formatted_prompt = f"{INST} {SYS} {system_prompt} {SYS_END} {prompt} {INST_END}"
return PromptTemplate(template=formatted_prompt, input_variables=input_variables)


class HuggingFaceService:
def __init__(self, model_name=MODEL_NAME, temp=TEMP, token=HUGGINGFACE_TOKEN): # device_type="cpu",
self.model_name = model_name
self.token = token # hugging face api token
self.temp = temp

#self.device_type = device_type # "cpu" for local dev, or "cuda" for colab gpu

@property
def tokenizer(self):
# https://huggingface.co/transformers/v2.11.0/model_doc/auto.html?highlight=autotokenizer#autotokenizer
return AutoTokenizer.from_pretrained(self.model_name, token=self.token) # cache_dir=CACHE_DIRPATH

@property
def model(self):
# https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM
return AutoModelForCausalLM.from_pretrained(self.model_name, token=self.token,
device_map="auto",
#torch_dtype=torch.float16, # GPU ONLY? https://stackoverflow.com/a/73530618/670433
torch_dtype=torch.float32 # CPU
)

@property
def pipeline(self):
"""wrapper for tokenizer and model, for performing the 'text-generation' task"""
# https://huggingface.co/docs/transformers/main_classes/pipelines
return pipeline(task="text-generation", model=self.model, tokenizer=self.tokenizer,
device_map="auto",
max_new_tokens=512, do_sample=True, top_k=30, num_return_sequences=1,
eos_token_id=self.tokenizer.eos_token_id,
#torch_dtype=torch.bfloat16, # GPU ONLY? https://stackoverflow.com/a/73530618/670433
torch_dtype=torch.float32, # CPU
)

@property
def llm(self):
return HuggingFacePipeline(pipeline=self.pipeline, model_kwargs={"temperature":self.temp})


#def predict(self, query):


#def formatted_response(self, prompt, system_prompt=DEFAULT_SYSTEM_PROMPT, input_variables=None):
# prompt = self.compile_prompt(prompt)
#
# llm_chain = LLMChain(prompt=prompt, llm=llm)
# response = llm_chain.run(query)
# parse_text(response)

#def generate(self, text):
# prompt = get_prompt(text)
#
# with torch.autocast(self.device_type, dtype=torch.bfloat16):
# #inputs = self.tokenizer(prompt, return_tensors="pt").to('cuda') # on CPU as well?
# inputs = self.tokenizer(prompt, return_tensors="pt") #
# breakpoint()
# #if self.device_type == "cuda":
# # inputs = inputs.to("cuda")
#
# outputs = self.model.generate(**inputs,
# max_new_tokens=512,
# eos_token_id=self.tokenizer.eos_token_id,
# pad_token_id=self.tokenizer.eos_token_id,
# )
# final_outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
# final_outputs = cut_off_text(final_outputs, '</s>')
# final_outputs = remove_substring(final_outputs, prompt)
#
# return final_outputs#, outputs





if __name__ == "__main__":

hf = HuggingFaceService()

llm = hf.llm
print(llm)

general_knowlege_queries = [
"What year was America founded?",
"Tell us about the first humans who landed on the moon."
]

for query in general_knowlege_queries:
# response = llm.predict(query).strip()
prompt = compile_prompt(prompt=query)
llm_chain = LLMChain(prompt=prompt, llm=llm)
#response = llm_chain.run(query) # chain({'foo': 1, 'bar': 2})
#> ValueError: A single string input was passed in, but this chain expects multiple inputs (set()). When a chain expects multiple inputs, please call it by passing in a dictionary, eg `chain({'foo': 1, 'bar': 2})`
response = llm_chain({"query": query}) # ooh it's slow?
parse_text(response)


breakpoint()
exit()

# PROMPT

system_prompt = "You are an advanced assistant that excels at translation. "
instruction = "Convert the following text from English to French:\n\n {text}"
prompt = compile_prompt(prompt=instruction, system_prompt=system_prompt, input_variables=["text"])
print(template)

# CHAIN

llm_chain = LLMChain(prompt=prompt, llm=llm)

query = "how are you today?"
while query != "":
print(query)
response = llm_chain.run(query)
parse_text(response)
print("------")
query = input("Query (or press enter to stop): ")
File renamed without changes.
60 changes: 0 additions & 60 deletions app/meta_llama/llm.py

This file was deleted.

2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ faiss-cpu
# llama:
torch # 2.1.0+cu121 (for colab)
transformers # 4.35.2
#accelerate # 0.25.0
accelerate # 0.25.0
# torchtext # 0.16.0


Expand Down