Free Claude with limited context:

  1. Copy and paste the below code into a file called "vercel.js"
  2. Open CMD and navigate to your file and run it by using "node vercel.js" (LEAVE THIS UP AND RUNNING)
  3. It will give you a proxy link called: http://127.0.0.1:5004/anthropic/claude-v1/v1
  4. Plug it in how you would all the other reverse proxies.
  5. Make sure to set the max context size to 1024 or lower, otherwise it could get a little wonky.
const https = require('https');
const http = require('http');

const options = {
    hostname: 'play.vercel.ai',
    port: 443,
    method: 'POST',
    headers: {
        'Content-Type': 'application/json',
    },
};

const readBody = (res, json) => new Promise((resolve, reject) => {
    let buffer = '';

    res.on('data', chunk => {
        buffer += chunk;
    });

    res.on('end', () => {
        try {
            if (json) buffer = JSON.parse(buffer);
            resolve(buffer);
        } catch (e) {
            console.error(buffer);
            reject(e);
        }
    });
})

const request = (path, data) =>
    new Promise((resolve, reject) => {
        options.headers['User-Agent'] = `Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/${Math.floor(Math.random() * 10000000)} Firefox/${(Math.random() * 200).toFixed(2)}`;
        const req = https.request({ ...options, path }, async (res) => {
            try {
                const body = await readBody(res);
                resolve(body);
            } catch (e) {
                reject(e);
            }
        });

        req.write(JSON.stringify(data));
        req.end();
    });

async function generate(text, { model, temperature, maxTokens, frequencyPenalty, presencePenalty }) {
    console.log(model);

    const response = await request('/api/generate', {
        prompt: text,
        model,
        temperature,
        maxTokens: Math.min(maxTokens, 500),
        topP: 1,
        frequencyPenalty,
        presencePenalty,
        stopSequences: model.startsWith('ahtropic:claude') ? ['\n\nHuman:'] : [],
    });

    const lines = response.split('\n').map(l => {
        try {
            return JSON.parse(l);
        } catch (e) {
            return l;
        }
    });

    let currentLine = '';
    for (const line of lines) {
        if (currentLine && line.startsWith(currentLine)) {
            currentLine = line;
        } else {
            currentLine += line;
        }
    }

    return currentLine;
}

function preparePrompt(messages) {
    return messages.filter(m => m.content?.trim()).map(m => {
        let author = '';
        switch (m.role) {
            case 'user': author = 'Human'; break;
            case 'assistant': author = 'Assistant'; break;
            case 'system': author = 'System Note'; break;
            default: author = m.role; break;
        }

        return `${author}: ${m.content.trim()}`;
    }).join('\n') + `\nAssistant: `;
}

async function main() {
    const server = http.createServer(async (req, res) => {
        res.setHeader('Content-Type', 'application/json');
        if (req.method.toUpperCase() === 'POST') {
            const body = await readBody(req, true);
            const [, owner, modelName] = req.url.split('/');
            const model = `${owner}:${modelName}`;

            const {
                messages,
                temperature,
                max_tokens,
                presence_penalty,
                frequency_penalty,
            } = body;

            const prompt = preparePrompt(messages);
            const result = await generate(prompt, {
                model,
                temperature,
                maxTokens: max_tokens,
                frequencyPenalty: frequency_penalty,
                presencePenalty: presence_penalty,
            });

            res.write(JSON.stringify({
                id: `chatcmpl-${(Math.random().toString(36).slice(2))}`,
                object: 'chat.completion',
                model: modelName,
                choices: [{
                    message: {
                        role: 'assistant',
                        content: result,
                    },
                    finish_reason: 'stop',
                    index: 0,
                }]
            }));
        } else {
            res.write(JSON.stringify({
                object: 'list',
                data: [
                    { id: 'gpt-4', object: 'model', created: Date.now(), owned_by: 'openai', permission: [], root: 'gpt-4', parent: null },
                    { id: 'claude-v1', object: 'model', created: Date.now(), owned_by: 'anthropic', permission: [], root: 'claude-v1', parent: null },
                ]
            }));
        }
        res.end();
    });

    server.listen(5004, '0.0.0.0', () => {
        console.log(`proxy for claude-v1: 'http://127.0.0.1:5004/anthropic/claude-v1/v1'`);
        console.log(`proxy for gpt-4: 'http://127.0.0.1:5004/openai/gpt-4/v1'`);
    });
}

main().catch(console.error);
Edit Report
Pub: 19 Apr 2023 22:09 UTC
Edit: 19 Apr 2023 23:11 UTC
Views: 8603