LLMCostCard#

class council.llm.LLMCostCard(input: float, output: float)[source]#

Bases: object

LLM cost per million token

get_costs(prompt_tokens: int, completion_tokens: int) Tuple[float, float][source]#

Return tuple of (prompt_tokens_cost, completion_token_cost)

property input: float#

Cost per million input (prompt) tokens.

input_cost(tokens: int) float[source]#

Get prompt_tokens_cost for a given amount of input tokens.

property output: float#

Cost per million output (completion) tokens.

output_cost(tokens: int) float[source]#

Get completion_token_cost for a given amount of completion tokens.

LLMConsumptionCalculatorBase#

class council.llm.LLMConsumptionCalculatorBase(model: str)[source]#

Bases: ABC

Helper class to manage LLM consumptions.

abstract find_model_costs() LLMCostCard | None[source]#

Get LLMCostCard for self to calculate cost consumptions.

format_kind(token_kind: TokenKind, cost: bool = False) str[source]#

Format Consumption.kind - from ‘prompt’ to ‘{self.model}:prompt_tokens’

abstract get_consumptions(*args, **kwargs) List[Consumption][source]#

Each calculator will implement with its own parameters.

get_default_consumptions(duration: float) List[Consumption][source]#

1 call and specified duration consumptions. To use when token info is not available

LLMCostManagerObject#

class council.llm.LLMCostManagerObject(kind: str, version: str, metadata: DataObjectMetadata, spec: T)[source]#

Bases: DataObject[LLMCostManagerSpec]

Helper class to instantiate an LLMCostManagerObject from a YAML file

get_cost_map(category: str) Dict[str, LLMCostCard][source]#

Get cost mapping {model: LLMCostCard} for a given category

TokenKind#

class council.llm.TokenKind(value)[source]#

Bases: str, Enum

An enumeration.

prompt = 'prompt'#

Prompt tokens

completion = 'completion'#

Completion tokens

total = 'total'#

Total tokens

reasoning = 'reasoning'#

Reasoning tokens, specific for OpenAI o1 models

cache_creation_prompt = 'cache_creation_prompt'#

Cache creation prompt tokens, specific for Anthropic prompt caching

cache_read_prompt = 'cache_read_prompt'#

Cache read prompt tokens, specific for Anthropic and OpenAI prompt caching