{"data":[{"id":"duguet-ai/deepseek-v3-2","name":"DeepSeek V3.2","canonical_slug":"duguet-ai/deepseek-v3-2","description":"DeepSeek V3.2 — frontier open-source general-purpose model.","created":1775000000,"context_length":163840,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek"},"pricing":{"prompt":"0.0000004000","completion":"0.0000006000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":163840,"completion_tokens":16384},"top_provider":{"context_length":163840,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/deepseek-v3-2"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"deepseek-ai/DeepSeek-V3.2","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/deepseek-r1","name":"DeepSeek R1","canonical_slug":"duguet-ai/deepseek-r1","description":"DeepSeek R1 — o1-class reasoning model with transparent chain-of-thought.","created":1775000000,"context_length":163840,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek"},"pricing":{"prompt":"0.0000005000","completion":"0.0000020000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":163840,"completion_tokens":32768},"top_provider":{"context_length":163840,"max_completion_tokens":32768,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/deepseek-r1"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"deepseek-ai/DeepSeek-R1","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/llama-3.3-70b","name":"Llama 3.3 70B Instruct","canonical_slug":"duguet-ai/llama-3.3-70b","description":"Meta Llama 3.3 70B Instruct — strong general-purpose with tool use.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.0000003000","completion":"0.0000004000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":8192},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/llama-3.3-70b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"meta-llama/Llama-3.3-70B-Instruct","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/llama-3.1-8b","name":"Llama 3.1 8B Instruct","canonical_slug":"duguet-ai/llama-3.1-8b","description":"Meta Llama 3.1 8B Instruct — fast, capable, cheap.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Llama3","instruct_type":"llama3"},"pricing":{"prompt":"0.0000000500","completion":"0.0000000800","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":4096},"top_provider":{"context_length":131072,"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/llama-3.1-8b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"meta-llama/Llama-3.1-8B-Instruct","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/mistral-large-3","name":"Mistral Large 3","canonical_slug":"duguet-ai/mistral-large-3","description":"Mistral Large 3 — flagship European model, instruction following.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.0000004000","completion":"0.0000012000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":8192},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/mistral-large-3"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"mistralai/Mistral-Large-3","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/gpt-oss-120b","name":"GPT-OSS 120B","canonical_slug":"duguet-ai/gpt-oss-120b","description":"Microsoft GPT-OSS 120B — open-weight reasoning model.","created":1775000000,"context_length":128000,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"none"},"pricing":{"prompt":"0.0000001500","completion":"0.0000006000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":128000,"completion_tokens":16384},"top_provider":{"context_length":128000,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/gpt-oss-120b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/deepseek-v3-1","name":"DeepSeek V3.1","canonical_slug":"duguet-ai/deepseek-v3-1","description":"DeepSeek V3.1 — adds tool calling on top of the V3 family.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"DeepSeek","instruct_type":"deepseek"},"pricing":{"prompt":"0.0000002700","completion":"0.0000011000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":16384},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/deepseek-v3-1"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"deepseek-ai/DeepSeek-V3.1","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/llama-4-maverick","name":"Llama 4 Maverick 17B","canonical_slug":"duguet-ai/llama-4-maverick","description":"Meta Llama 4 Maverick — first Llama 4 generation, 1M context, 12 languages, image+text.","created":1775000000,"context_length":1000000,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Llama4","instruct_type":"llama4"},"pricing":{"prompt":"0.0000005000","completion":"0.0000015000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":1000000,"completion_tokens":8192},"top_provider":{"context_length":1000000,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/llama-4-maverick"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"meta-llama/Llama-4-Maverick-17B-128E-Instruct","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/cohere-command-a","name":"Cohere Command-A","canonical_slug":"duguet-ai/cohere-command-a","description":"Cohere Command-A — multilingual (10 languages), strong RAG and tool-calling.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Cohere","instruct_type":"command"},"pricing":{"prompt":"0.0000025000","completion":"0.0000100000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":8192},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/cohere-command-a"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/mistral-medium","name":"Mistral Medium 2505","canonical_slug":"duguet-ai/mistral-medium","description":"Mistral Medium 2505 — mid-tier between Nemo and Large 3, image+text.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.0000004000","completion":"0.0000020000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":8192},"top_provider":{"context_length":131072,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/mistral-medium"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/grok-4-1-fast","name":"xAI Grok 4.1 Fast","canonical_slug":"duguet-ai/grok-4-1-fast","description":"xAI Grok 4.1 Fast (reasoning) — fast Grok variant with reasoning + tool use.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"none"},"pricing":{"prompt":"0.0000002000","completion":"0.0000005000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":16384},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/grok-4-1-fast"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/phi-4-mini-reasoning","name":"Phi-4 Mini Reasoning","canonical_slug":"duguet-ai/phi-4-mini-reasoning","description":"Microsoft Phi-4 Mini — small, cheap reasoning model with thinking traces.","created":1775000000,"context_length":128000,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"none"},"pricing":{"prompt":"0.0000001000","completion":"0.0000003000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":128000,"completion_tokens":8192},"top_provider":{"context_length":128000,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/phi-4-mini-reasoning"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"microsoft/Phi-4-mini-reasoning","powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/gpt-5","name":"GPT-5","canonical_slug":"duguet-ai/gpt-5","description":"OpenAI GPT-5 — flagship proprietary model.","created":1775000000,"context_length":400000,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT-5","instruct_type":"none"},"pricing":{"prompt":"0.0000025000","completion":"0.0000100000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":400000,"completion_tokens":16384},"top_provider":{"context_length":400000,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/gpt-5"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure OpenAI"},{"id":"duguet-ai/gpt-5.2","name":"GPT-5.2","canonical_slug":"duguet-ai/gpt-5.2","description":"OpenAI GPT-5.2 — improved reasoning and speed.","created":1775000000,"context_length":400000,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT-5","instruct_type":"none"},"pricing":{"prompt":"0.0000025000","completion":"0.0000100000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":400000,"completion_tokens":16384},"top_provider":{"context_length":400000,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/gpt-5.2"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure OpenAI"},{"id":"duguet-ai/gpt-5.5","name":"GPT-5.5","canonical_slug":"duguet-ai/gpt-5.5","description":"OpenAI GPT-5.5 — flagship, 1.05M context, image+text reasoning.","created":1775000000,"context_length":1050000,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT-5","instruct_type":"none"},"pricing":{"prompt":"0.0000025000","completion":"0.0000150000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":1050000,"completion_tokens":131072},"top_provider":{"context_length":1050000,"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/gpt-5.5"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure OpenAI"},{"id":"duguet-ai/gpt-5.4","name":"GPT-5.4","canonical_slug":"duguet-ai/gpt-5.4","description":"OpenAI GPT-5.4 — production workhorse, 1.05M context, strong agentic + long-doc.","created":1775000000,"context_length":1050000,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT-5","instruct_type":"none"},"pricing":{"prompt":"0.0000025000","completion":"0.0000150000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":1050000,"completion_tokens":131072},"top_provider":{"context_length":1050000,"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/gpt-5.4"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure OpenAI"},{"id":"duguet-ai/gpt-5.4-mini","name":"GPT-5.4 Mini","canonical_slug":"duguet-ai/gpt-5.4-mini","description":"OpenAI GPT-5.4 Mini — drop-in mid-tier, 400K context, reasoning.","created":1775000000,"context_length":400000,"architecture":{"input_modalities":["text","image"],"output_modalities":["text"],"tokenizer":"GPT-5","instruct_type":"none"},"pricing":{"prompt":"0.0000002500","completion":"0.0000020000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":400000,"completion_tokens":131072},"top_provider":{"context_length":400000,"max_completion_tokens":131072,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed","frequency_penalty","presence_penalty"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/gpt-5.4-mini"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure OpenAI"},{"id":"duguet-ai/kimi-k2.5","name":"Kimi K2.5","canonical_slug":"duguet-ai/kimi-k2.5","description":"Moonshot AI Kimi K2.5 — strong reasoning model with thinking traces.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"none"},"pricing":{"prompt":"0.0000006000","completion":"0.0000024000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":16384},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/kimi-k2.5"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/kimi-k2.6","name":"Kimi K2.6","canonical_slug":"duguet-ai/kimi-k2.6","description":"Moonshot AI Kimi K2.6 — latest frontier reasoning model with thinking traces.","created":1775000000,"context_length":131072,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Other","instruct_type":"none"},"pricing":{"prompt":"0.0000006000","completion":"0.0000024000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":131072,"completion_tokens":16384},"top_provider":{"context_length":131072,"max_completion_tokens":16384,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/kimi-k2.6"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":null,"powered_by":null,"hosting":"Azure AI Foundry"},{"id":"duguet-ai/qwen3-8b","name":"Qwen 3 8B","canonical_slug":"duguet-ai/qwen3-8b","description":"Qwen 3 8B — fast multilingual model, self-hosted on dedicated A100.","created":1775000000,"context_length":32768,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"chatml"},"pricing":{"prompt":"0.0000000500","completion":"0.0000000800","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":32768,"completion_tokens":8192},"top_provider":{"context_length":32768,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/qwen3-8b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"Qwen/Qwen3-8B","powered_by":"xinity-ai · github.com/xinity-ai","hosting":"self-hosted (sovereign A100, orchestrated by xinity-ai)"},{"id":"duguet-ai/qwen2.5-coder-7b","name":"Qwen 2.5 Coder 7B","canonical_slug":"duguet-ai/qwen2.5-coder-7b","description":"Qwen 2.5 Coder 7B — code-specialised self-hosted model.","created":1775000000,"context_length":32768,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Qwen","instruct_type":"chatml"},"pricing":{"prompt":"0.0000000500","completion":"0.0000000800","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":32768,"completion_tokens":8192},"top_provider":{"context_length":32768,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/qwen2.5-coder-7b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"Qwen/Qwen2.5-Coder-7B-Instruct","powered_by":"xinity-ai · github.com/xinity-ai","hosting":"self-hosted (sovereign A100, orchestrated by xinity-ai)"},{"id":"duguet-ai/mistral-nemo-12b","name":"Mistral Nemo 12B","canonical_slug":"duguet-ai/mistral-nemo-12b","description":"Mistral Nemo 12B — strong reasoning, self-hosted on dedicated A100.","created":1775000000,"context_length":128000,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Mistral","instruct_type":"mistral"},"pricing":{"prompt":"0.0000001000","completion":"0.0000001500","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":128000,"completion_tokens":8192},"top_provider":{"context_length":128000,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/mistral-nemo-12b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"mistralai/Mistral-Nemo-Instruct-2407","powered_by":"xinity-ai · github.com/xinity-ai","hosting":"self-hosted (sovereign A100, orchestrated by xinity-ai)"},{"id":"duguet-ai/gemma3-27b","name":"Gemma 3 27B","canonical_slug":"duguet-ai/gemma3-27b","description":"Google Gemma 3 27B — strong vision + text model, self-hosted on dedicated A100.","created":1775000000,"context_length":128000,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"Gemma","instruct_type":"gemma"},"pricing":{"prompt":"0.0000001500","completion":"0.0000002000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":128000,"completion_tokens":8192},"top_provider":{"context_length":128000,"max_completion_tokens":8192,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/gemma3-27b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"google/gemma-3-27b-it","powered_by":"xinity-ai · github.com/xinity-ai","hosting":"self-hosted (sovereign A100, orchestrated by xinity-ai)"},{"id":"duguet-ai/glm4-9b","name":"GLM-4 9B","canonical_slug":"duguet-ai/glm4-9b","description":"Zhipu AI GLM-4 9B — bilingual (EN/ZH) chat model, self-hosted.","created":1775000000,"context_length":128000,"architecture":{"input_modalities":["text"],"output_modalities":["text"],"tokenizer":"GLM","instruct_type":"chatglm"},"pricing":{"prompt":"0.0000000500","completion":"0.0000000800","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":128000,"completion_tokens":4096},"top_provider":{"context_length":128000,"max_completion_tokens":4096,"is_moderated":false},"supported_parameters":["temperature","top_p","stop","max_tokens","seed"],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/glm4-9b"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"THUDM/glm-4-9b-chat","powered_by":"xinity-ai · github.com/xinity-ai","hosting":"self-hosted (sovereign A100, orchestrated by xinity-ai)"},{"id":"duguet-ai/nomic-embed","name":"Nomic Embed Text","canonical_slug":"duguet-ai/nomic-embed","description":"High-quality text embeddings, self-hosted.","created":1775000000,"context_length":8192,"architecture":{"input_modalities":["text"],"output_modalities":["embeddings"],"tokenizer":"Other","instruct_type":null},"pricing":{"prompt":"0.0000000200","completion":"0.0000000000","image":"0","request":"0"},"per_request_limits":{"prompt_tokens":8192,"completion_tokens":0},"top_provider":{"context_length":8192,"max_completion_tokens":0,"is_moderated":false},"supported_parameters":[],"default_parameters":{},"links":{"details":"https://duguetlabs.com/models/nomic-embed"},"knowledge_cutoff":null,"expiration_date":null,"hugging_face_id":"nomic-ai/nomic-embed-text-v1.5","powered_by":"xinity-ai · github.com/xinity-ai","hosting":"self-hosted (sovereign A100, orchestrated by xinity-ai)"}]}