View file src/colab/tinyllama.py - Download
# -*- coding: utf-8 -*-
"""TinyLlama.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1fwM_T61Hb6EiVKZgnuOQd_4P3oFFpQCK
https://www.atpostlight.com/?_=%2FTinyLlama%2FTinyLlama-1.1B-Chat-v1.0%23KJWqMdlUlBnjPuoSWRPngYr2fc9jFA%3D%3D
"""
!pip install accelerate
# Install transformers from source - only needed for versions <= v4.34
# pip install git+https://github.com/huggingface/transformers.git
# pip install accelerate
import torch
from transformers import pipeline
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto")
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
]
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
print(outputs[0]["generated_text"])
prompt = "Why are cow eggs larger than ostrich eggs?"
messages = [
{
"role": "system",
"content": "You are a friendly chatbot who always responds in the style of a pirate",
},
{"role": "user", "content": prompt},
]
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
print(outputs[0]["generated_text"])