From e487f6d7331a0ba1c03e5f878ff20c02b0560c0d Mon Sep 17 00:00:00 2001 From: RecentRichRail <80782821+RecentRichRail@users.noreply.github.com> Date: Tue, 28 Mar 2023 12:37:56 -0400 Subject: [PATCH] Create gpt.js --- searx/static/themes/simple/js/gpt.js | 44 ++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 searx/static/themes/simple/js/gpt.js diff --git a/searx/static/themes/simple/js/gpt.js b/searx/static/themes/simple/js/gpt.js new file mode 100644 index 000000000..ae6cd57b2 --- /dev/null +++ b/searx/static/themes/simple/js/gpt.js @@ -0,0 +1,44 @@ +document.addEventListener('DOMContentLoaded', () => { + const chatgptApiKey = '{{ chatgpt_api_key_var }}'; + const query = '{{ q }}'; + const gptResultsContent = document.getElementById('gptResultsContent'); + + if (chatgptApiKey && query && gptResultsContent) { + fetchGPTResults(chatgptApiKey, query).then((results) => { + if (results) { + gptResultsContent.innerHTML = results; + } + }); + } +}); + +async function fetchGPTResults(apiKey, query) { + const apiEndpoint = 'https://api.openai.com/v1/engines/davinci-codex/completions'; + const prompt = `Generate a brief summary for the following search query: ${query}`; + + try { + const response = await fetch(apiEndpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${apiKey}`, + }, + body: JSON.stringify({ + prompt: prompt, + max_tokens: 50, + n: 1, + stop: null, + temperature: 0.5, + }), + }); + + const data = await response.json(); + if (data.choices && data.choices.length > 0) { + return data.choices[0].text.trim(); + } + } catch (error) { + console.error('Error fetching GPT-3.5 Turbo results:', error); + } + + return null; +}