| _id: gpt4free |
| author: Anton Breslavskii | https://github.com/breslavsky |
| description: Free generative AI service providers |
| readme: Small internal refactor |
| title: GPT for free |
| url: https://huggingface.co/PiperMy/Node-Packages/resolve/main/gpt4free.yaml |
| version: 12 |
| nodes: |
| ask_llm_agent_gpt4free: |
| _id: ask_llm_agent_gpt4free |
| arrange: |
| x: 480 |
| y: 130 |
| category: |
| _id: llm_agents |
| title: en=Language Agents;ru=Языковые агенты |
| execution: deferred |
| inputs: |
| instructions: |
| order: 1 |
| title: en=Instructions;ru=Инструкции |
| type: string |
| multiline: true |
| placeholder: en=Leave empty if you don't need it;ru=Оставьте пустым если не требуется |
| question: |
| order: 2 |
| title: en=Question;ru=Вопрос |
| type: string |
| required: true |
| multiline: true |
| default: What is your name? |
| model: |
| order: 3 |
| title: en=Model;ru=Модель |
| type: string |
| default: gpt-4o-mini |
| enum: |
| - gpt-3.5-turbo |
| - gpt-4 |
| - gpt-4o |
| - gpt-4o-mini |
| - o1 |
| - o1-mini |
| - o3-mini |
| - gigachat |
| - meta-ai |
| - llama-2-7b |
| - llama-3-8b |
| - llama-3-70b |
| - llama-3.1-8b |
| - llama-3.1-70b |
| - llama-3.1-405b |
| - llama-3.2-1b |
| - llama-3.2-3b |
| - llama-3.2-11b |
| - llama-3.2-90b |
| - llama-3.3-70b |
| - mixtral-8x7b |
| - mixtral-8x22b |
| - mistral-nemo |
| - mixtral-small-24b |
| - hermes-3 |
| - phi-3.5-mini |
| - phi-4 |
| - wizardlm-2-7b |
| - wizardlm-2-8x22b |
| - gemini-exp |
| - gemini-1.5-flash |
| - gemini-1.5-pro |
| - gemini-2.0 |
| - gemini-2.0-flash |
| - gemini-2.0-flash-thinking |
| - gemini-2.0-pro |
| - claude-3-haiku |
| - claude-3-sonnet |
| - claude-3-opus |
| - claude-3.5-sonnet |
| - claude-3.7-sonnet |
| - claude-3.7-sonnet-thinking |
| - reka-core |
| - blackboxai |
| - blackboxai-pro |
| - command-r |
| - command-r-plus |
| - command-r7b |
| - command-a |
| - qwen-1.5-7b |
| - qwen-2-72b |
| - qwen-2-vl-7b |
| - qwen-2.5-72b |
| - qwen-2.5-coder-32b |
| - qwen-2.5-1m |
| - qwen-2-5-max |
| - qwq-32b |
| - qvq-72b |
| - pi |
| - deepseek-chat |
| - deepseek-v3 |
| - deepseek-r1 |
| - janus-pro-7b |
| - grok-3 |
| - grok-3-r1 |
| - sonar |
| - sonar-pro |
| - sonar-reasoning |
| - sonar-reasoning-pro |
| - r1-1776 |
| - nemotron-70b |
| - dbrx-instruct |
| - glm-4 |
| - mini_max |
| - yi-34b |
| - dolphin-2.6 |
| - dolphin-2.9 |
| - airoboros-70b |
| - lzlv-70b |
| - minicpm-2.5 |
| - tulu-3-1-8b |
| - tulu-3-70b |
| - tulu-3-405b |
| - olmo-1-7b |
| - olmo-2-13b |
| - olmo-2-32b |
| - olmo-4-synthetic |
| - lfm-40b |
| - evil |
| outputs: |
| answer: |
| title: en=Answer;ru=Ответ |
| type: string |
| model: |
| title: en=Model;ru=Модель |
| type: string |
| provider: |
| title: en=Provider;ru=Провайдер |
| type: string |
| package: gpt4free |
| script: | |
| export async function run({ inputs }) { |
| |
| const { NextNode } = DEFINITIONS; |
| const { model = 'gpt-4o-mini', instructions, question = 'Are you AI?' } = inputs; |
|
|
| const { data } = await httpClient({ |
| method: 'post', |
| url: `http://${NODE_ENV === 'test' ? '0.0.0.0:8080' : 'gpt4free:1337'}/v1/chat/completions`, |
| timeout: 60000, |
| data: { |
| model, |
| stream: false, |
| messages: [ |
| ...(!!instructions ? [{ |
| role: 'system', |
| content: instructions |
| }] : []), |
| ...[ |
| { |
| role: 'user', |
| content: question |
| } |
| ] |
| ] |
| }, |
| headers: { |
| 'Content-Type': 'application/json', |
| } |
| }); |
|
|
| const { model: used, provider, choices: [{ message: { content: answer } }] } = data; |
| return NextNode.from({ outputs: { model: used, provider, answer } }); |
| } |
| source: catalog |
| title: en=Ask LLM agent for free;ru=Спросить LLM агента бесп. |
| version: 2 |
| generate_image_gpt4free: |
| _id: generate_image_gpt4free |
| arrange: |
| x: 130 |
| y: 80 |
| category: |
| _id: generate_images |
| title: en=Generate images;ru=Генерация изображений |
| environment: {} |
| execution: deferred |
| inputs: |
| prompt: |
| order: 1 |
| title: en=Prompt;ru=Подсказка |
| type: string |
| required: true |
| multiline: true |
| default: superhero game card |
| model: |
| order: 2 |
| title: en=Model;ru=Модель |
| type: string |
| default: flux-dev |
| enum: |
| - sdxl-turbo |
| - sd-3.5 |
| - flux |
| - flux-pro |
| - flux-dev |
| - flux-schnell |
| - dall-e-3 |
| - midjourney |
| outputs: |
| image: |
| title: en=Image;ru=Изображение |
| type: image |
| provider: |
| title: en=Provider;ru=Провайдер |
| type: string |
| model: |
| title: en=Model;ru=Модель |
| type: string |
| package: gpt4free |
| script: | |
| // https://github.com/nomic-ai/gpt4all |
| |
| export async function run({ inputs }) { |
|
|
| const { NextNode } = DEFINITIONS; |
| const { model = 'flux-dev', prompt = 'superhero game card' } = inputs; |
|
|
| const { data } = await httpClient({ |
| method: 'post', |
| url: `http://${NODE_ENV === 'test' ? '0.0.0.0:8080' : 'gpt4free:1337'}/v1/images/generate`, |
| timeout: 60000, |
| data: { |
| model, |
| prompt, |
| response_format: 'b64_json' |
| }, |
| headers: { |
| 'Content-Type': 'application/json', |
| } |
| }); |
|
|
| const { model: used, provider, data: [{ b64_json }] } = data; |
| return NextNode.from({ outputs: { model: used, provider, image: Buffer.from(b64_json, 'base64') } }); |
| } |
| source: catalog |
| title: en=Generate image for free;ru=Генерация изобр. бесп. |
| version: 6 |
|
|