Documentation

LangChain

LangChain is a framework for developing applications powered by language models. To learn more, visit the LangChain website.

We offer the following modules:

Install LangChain

pip install langchain
pip install langchain-community

LLM Examples

The examples below show how to use LangChain with DeepInfra for language models. Make sure to get your API key from DeepInfra. You have to Login and get your token.

Please set os.environ["DEEPINFRA_API_TOKEN"] with your token.

Read comments in the code for better understanding.

import os
from langchain_community.llms import DeepInfra
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

# Make sure to get your API key from DeepInfra. You have to Login and get a new token.
os.environ["DEEPINFRA_API_TOKEN"] = '<your Deep Infra API token>'

# Create the DeepInfra instance. You can view a list of available parameters in the model page
llm = DeepInfra(model_id="meta-llama/Llama-2-70b-chat-hf")
llm.model_kwargs = {
    "temperature": 0.7,
    "repetition_penalty": 1.2,
    "max_new_tokens": 250,
    "top_p": 0.9,
}


def example1():
    # run inference
    print(llm("Who let the dogs out?"))

def example2():
    # run streaming inference
    for chunk in llm.stream("Who let the dogs out?"):
        print(chunk)

def example3():
    # create a prompt template for Question and Answer
    template = """Question: {question}

    Answer: Let's think step by step."""
    prompt = PromptTemplate(template=template, input_variables=["question"])

    # initiate the LLMChain
    llm_chain = LLMChain(prompt=prompt, llm=llm)

    # provide a question and run the LLMChain
    question = "Can penguins reach the North pole?"
    print(llm_chain.run(question))


# run examples
example1()

Chat Examples

Ensure the DEEPINFRA_API_KEY env is set to your api key.

import os

# or pass deepinfra_api_token parameter to the ChatDeepInfra constructor
os.environ["DEEPINFRA_API_TOKEN"] = DEEPINFRA_API_TOKEN

from langchain_community.chat_models import ChatDeepInfra
from langchain_core.messages import HumanMessage
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

messages = [
    HumanMessage(
        content="Translate this sentence from English to French. I love programming."
    )
]

def example_sync():
  chat = ChatDeepInfra(model="meta-llama/Llama-2-7b-chat-hf")
  chat(messages)

async def example_async():
  chat = ChatDeepInfra(model="meta-llama/Llama-2-7b-chat-hf")
  await chat.agenerate([messages])

def example_stream():
  chat = ChatDeepInfra(
      streaming=True,
      verbose=True,
      callbacks=[StreamingStdOutCallbackHandler()],
  )
  chat(messages)

Embeddings

import os

os.environ["DEEPINFRA_API_TOKEN"] = DEEPINFRA_API_TOKEN

from langchain_community.embeddings import DeepInfraEmbeddings

embeddings = DeepInfraEmbeddings(
    model_id="sentence-transformers/clip-ViT-B-32",
    query_instruction="",
    embed_instruction="",
)

docs = ["Dog is not a cat", "Beta is the second letter of Greek alphabet"]
document_result = embeddings.embed_documents(docs)