Source code for langchain_core.prompt_values
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Sequence, cast
from typing_extensions import TypedDict
from langchain_core.load.serializable import Serializable
from langchain_core.messages import (
AnyMessage,
BaseMessage,
HumanMessage,
get_buffer_string,
)
[docs]class PromptValue(Serializable, ABC):
"""Base abstract class for inputs to any language model.
PromptValues can be converted to both LLM (pure text-generation) inputs and
ChatModel inputs.
"""
[docs] @classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this class is serializable."""
return True
[docs] @classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "schema", "prompt"]
[docs] @abstractmethod
def to_string(self) -> str:
"""Return prompt value as string."""
[docs] @abstractmethod
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of Messages."""
[docs]class StringPromptValue(PromptValue):
"""String prompt value."""
text: str
"""Prompt text."""
type: Literal["StringPromptValue"] = "StringPromptValue"
[docs] @classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "base"]
[docs] def to_string(self) -> str:
"""Return prompt as string."""
return self.text
[docs] def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=self.text)]
[docs]class ChatPromptValue(PromptValue):
"""Chat prompt value.
A type of a prompt value that is built from messages.
"""
messages: Sequence[BaseMessage]
"""List of messages."""
[docs] def to_string(self) -> str:
"""Return prompt as string."""
return get_buffer_string(self.messages)
[docs] def to_messages(self) -> List[BaseMessage]:
"""Return prompt as a list of messages."""
return list(self.messages)
[docs] @classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]
[docs]class ImageURL(TypedDict, total=False):
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image."""
url: str
"""Either a URL of the image or the base64 encoded image data."""
[docs]class ImagePromptValue(PromptValue):
"""Image prompt value."""
image_url: ImageURL
"""Prompt image."""
type: Literal["ImagePromptValue"] = "ImagePromptValue"
[docs] def to_string(self) -> str:
"""Return prompt as string."""
return self.image_url["url"]
[docs] def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
return [HumanMessage(content=[cast(dict, self.image_url)])]
[docs]class ChatPromptValueConcrete(ChatPromptValue):
"""Chat prompt value which explicitly lists out the message types it accepts.
For use in external schemas."""
messages: Sequence[AnyMessage]
type: Literal["ChatPromptValueConcrete"] = "ChatPromptValueConcrete"
[docs] @classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "chat"]