Skip to content

Commit ff96e89

Browse files
author
Eric
committed
Version Bump 0.2.2
1 parent 209fd44 commit ff96e89

File tree

6 files changed

+97
-34
lines changed

6 files changed

+97
-34
lines changed

README.md

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
[![arduino-library-badge](https://www.ardu-badge.com/badge/ChatGPT_Client.svg)](https://www.ardu-badge.com/ChatGPT_Client)
2-
31
## Overview
42
The ChatGPT Arduino Library provides a convenient way to interact with the OpenAI GPT models from Arduino environments, such as ESP32 devices. With this library, you can easily send text and vision queries to the ChatGPT API and receive responses directly in your Arduino projects.
53

examples/Arduino_BearSSLExample/Arduino_BearSSLExample.ino

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* Project ChatGPT Client For Arduino
33
* Description: For HTTPS connection using ArduinoBearSSL
44
* Author: Eric Nam
5-
* Date: 05-19-2024
5+
* Date: 07-17-2024
66
*/
77

88
//#define ARDUINO_DISABLE_ECCX08
@@ -35,11 +35,12 @@ void exampleTextQuestion() {
3535
max_tokens: Maximum number of tokens to generate in the response.
3636
content_only: Flag indicating whether to extract only the content of the response. (e.g., true - answer only, false - full response)
3737
result: Reference to a String variable to store the result of the API call.
38+
mem_dynamic: Select whether to use dynamic memory or static memory when configuring the post body (default: dynamic memory)
3839
*/
3940

4041
String result;
4142
Serial.println("\n\n[ChatGPT] - Asking a Text Question");
42-
if (chatGPT_Client.chat_message("gpt-3.5-turbo", "user", "What is the best feature of GPT-4o?", 100, false, result)) {
43+
if (chatGPT_Client.chat_message("gpt-3.5-turbo", "user", "What is the best feature of GPT-4o?", 100, false, result, false)) {
4344
Serial.print("[ChatGPT] Response: ");
4445
Serial.println(result);
4546
} else {
@@ -48,7 +49,7 @@ void exampleTextQuestion() {
4849
}
4950
}
5051

51-
void exampleVisionQuestionBaee64() {
52+
void exampleVisionQuestionBase64() {
5253
/*
5354
model: Model to use for generating the response (e.g., "gpt-4o").
5455
role: Role of the message sender (e.g., "user" or "assistant").
@@ -60,6 +61,7 @@ void exampleVisionQuestionBaee64() {
6061
max_tokens: Maximum number of tokens to generate in the response.
6162
content_only: Flag indicating whether to extract only the content of the response. (e.g., true - answer only, false - full response)
6263
result: Reference to a String variable to store the result of the API call.
64+
mem_dynamic: Select whether to use dynamic memory or static memory when configuring the post body (default: dynamic memory)
6365
*/
6466

6567
String result;
@@ -110,9 +112,10 @@ void setup() {
110112

111113
Serial.println("[ChatGPT] - Examples");
112114
delay(1000);
113-
exampleTextQuestion();
115+
exampleTextQuestion(); // Post Body in Static Memory
116+
114117
delay(1000);
115-
exampleVisionQuestionWithURL();
118+
exampleVisionQuestionWithURL(); // Post Body in Dynamic memory
116119
}
117120

118121
void loop() {}

examples/Arduino_SSLClient/Arduino_SSLClient.ino

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* Project ChatGPT Client For Arduino (Arduino Giga R1, Arduino Portenta H7)
33
* Description: For HTTPS connection using Arduino WiFiSSLClient
44
* Author: Eric Nam
5-
* Date: 05-19-2024
5+
* Date: 07-17-2024
66
*/
77

88
#include <WiFi.h>
@@ -33,11 +33,12 @@ void exampleTextQuestion() {
3333
max_tokens: Maximum number of tokens to generate in the response.
3434
content_only: Flag indicating whether to extract only the content of the response. (e.g., true - answer only, false - full response)
3535
result: Reference to a String variable to store the result of the API call.
36+
mem_dynamic: Select whether to use dynamic memory or static memory when configuring the post body (default: dynamic memory)
3637
*/
3738

3839
String result;
3940
Serial.println("\n\n[ChatGPT] - Asking a Text Question");
40-
if (chatGPT_Client.chat_message("gpt-3.5-turbo", "user", "What is the best feature of GPT-4o?", 100, false, result)) {
41+
if (chatGPT_Client.chat_message("gpt-3.5-turbo", "user", "What is the best feature of GPT-4o?", 100, false, result, false)) {
4142
Serial.print("[ChatGPT] Response: ");
4243
Serial.println(result);
4344
} else {
@@ -46,7 +47,7 @@ void exampleTextQuestion() {
4647
}
4748
}
4849

49-
void exampleVisionQuestionBaee64() {
50+
void exampleVisionQuestionBase64() {
5051
/*
5152
model: Model to use for generating the response (e.g., "gpt-4o").
5253
role: Role of the message sender (e.g., "user" or "assistant").
@@ -58,6 +59,7 @@ void exampleVisionQuestionBaee64() {
5859
max_tokens: Maximum number of tokens to generate in the response.
5960
content_only: Flag indicating whether to extract only the content of the response. (e.g., true - answer only, false - full response)
6061
result: Reference to a String variable to store the result of the API call.
62+
mem_dynamic: Select whether to use dynamic memory or static memory when configuring the post body (default: dynamic memory)
6163
*/
6264

6365
String result;
@@ -112,9 +114,10 @@ void setup() {
112114

113115
Serial.println("[ChatGPT] - Examples");
114116
delay(1000);
115-
exampleTextQuestion();
117+
exampleTextQuestion(); // Post Body in Static Memory
118+
116119
delay(1000);
117-
exampleVisionQuestionWithURL();
120+
exampleVisionQuestionWithURL(); // Post Body in Dynamic memory
118121
}
119122

120123
void loop() {}

examples/ESP32_WiFiClientSecure/ESP32_WiFiClientSecure.ino

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* Project ChatGPT Client For ESP32
33
* Description: For HTTPS connection using WiFiClientSecure
44
* Author: Eric Nam
5-
* Date: 05-28-2024
5+
* Date: 07-17-2024
66
*/
77

88
#include <WiFi.h>
@@ -32,20 +32,24 @@ void exampleTextQuestion() {
3232
max_tokens: Maximum number of tokens to generate in the response.
3333
content_only: Flag indicating whether to extract only the content of the response. (e.g., true - answer only, false - full response)
3434
result: Reference to a String variable to store the result of the API call.
35+
mem_dynamic: Select whether to use dynamic memory or static memory when configuring the post body (default: dynamic memory)
3536
*/
3637

3738
String result;
3839
Serial.println("\n\n[ChatGPT] - Asking a Text Question");
39-
if (chatGPT_Client.chat_message("gpt-3.5-turbo", "user", "What is the best feature of GPT-4o?", 100, false, result)) {
40+
if (chatGPT_Client.chat_message("gpt-3.5-turbo", "user", "What is the best feature of GPT-4o?", 100, false, result, false))
41+
{
4042
Serial.print("[ChatGPT] Response: ");
4143
Serial.println(result);
42-
} else {
44+
}
45+
else
46+
{
4347
Serial.print("[ChatGPT] Error: ");
4448
Serial.println(result);
4549
}
4650
}
4751

48-
void exampleVisionQuestionBaee64() {
52+
void exampleVisionQuestionBase64() {
4953
/*
5054
model: Model to use for generating the response (e.g., "gpt-4o").
5155
role: Role of the message sender (e.g., "user" or "assistant").
@@ -57,6 +61,7 @@ void exampleVisionQuestionBaee64() {
5761
max_tokens: Maximum number of tokens to generate in the response.
5862
content_only: Flag indicating whether to extract only the content of the response. (e.g., true - answer only, false - full response)
5963
result: Reference to a String variable to store the result of the API call.
64+
mem_dynamic: Select whether to use dynamic memory or static memory when configuring the post body (default: dynamic memory)
6065
*/
6166

6267
String result;
@@ -100,9 +105,10 @@ void setup() {
100105

101106
Serial.println("[ChatGPT] - Examples");
102107
delay(1000);
103-
exampleTextQuestion();
108+
exampleTextQuestion(); // Post Body in Static Memory
109+
104110
delay(1000);
105-
exampleVisionQuestionWithURL();
111+
exampleVisionQuestionWithURL(); // Post Body in Dynamic memory
106112
}
107113

108114
void loop() {}

library.properties

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
name=ChatGPT_Client
2-
version=0.2.1
2+
version=0.2.2
33
author=Eric Nam <[email protected]>
44
maintainer=Eric Nam <[email protected]>
55
sentence=Library for communication with ChatGPT

src/ChatGPT.hpp

Lines changed: 68 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* Project ChatGPT Client
33
* Description: The official method using API Key for communication with ChatGPT
44
* Author: Eric Nam
5-
* Date: 05-19-2024
5+
* Date: 07-17-2024
66
*/
77

88
#ifndef __CHATGPT__
@@ -22,7 +22,6 @@ class ChatGPT
2222
* - api_key_in: The API key for authentication.
2323
* - timeout_in: The timeout duration for requests.
2424
*/
25-
ChatGPT() {}
2625

2726
ChatGPT(T *_client,
2827
const char *api_version_in,
@@ -50,6 +49,7 @@ class ChatGPT
5049
* - max_tokens: The maximum number of tokens to generate.
5150
* - content_only: If true, extracts and returns only the content from the response.
5251
* - result: Stores the response from the API.
52+
* - mem_dynamic: Select Dynamic/Static Memory, (Default: Dyanmic Memory)
5353
*
5454
* Returns:
5555
* - True if the request is successful, false otherwise.
@@ -64,20 +64,46 @@ class ChatGPT
6464
const char *detail,
6565
int max_tokens,
6666
bool content_only,
67-
String &result)
67+
String &result,
68+
bool mem_dynamic = true)
6869
{
69-
int post_body_size = snprintf(nullptr, 0,
70-
"{\"model\": \"%s\", \"messages\": [{\"role\": \"%s\", \"content\": [{\"type\": \"%s\", \"text\": \"%s\"}, {\"type\": \"%s\", \"image_url\": {\"url\": \"%s\", \"detail\": \"%s\"}}]}], \"max_tokens\": %d}",
71-
model, role, type, text, image_type, image_url, detail, max_tokens) +
72-
1;
70+
char *post_body = nullptr; // Initialize post_body pointer
71+
int post_body_size = 0;
72+
if (mem_dynamic)
73+
{
74+
// Calculate the required size for dynamic allocation
75+
post_body_size = snprintf(nullptr, 0,
76+
"{\"model\": \"%s\", \"messages\": [{\"role\": \"%s\", \"content\": [{\"type\": \"%s\", \"text\": \"%s\"}, {\"type\": \"%s\", \"image_url\": {\"url\": \"%s\", \"detail\": \"%s\"}}]}], \"max_tokens\": %d}",
77+
model, role, type, text, image_type, image_url, detail, max_tokens) + 1;
78+
post_body = new char[post_body_size];
79+
if (post_body == nullptr)
80+
{
81+
result = "[ERR] Memory allocation failed!";
82+
return false;
83+
}
84+
}
85+
else
86+
{
87+
// Use a static buffer with a fixed size
88+
static const int static_buffer_size = 512;
89+
char static_post_body[static_buffer_size];
90+
post_body_size = static_buffer_size;
91+
post_body = static_post_body;
92+
}
7393

74-
char *post_body = new char[post_body_size];
94+
// Format the post_body string
7595
snprintf(post_body, post_body_size,
7696
"{\"model\": \"%s\", \"messages\": [{\"role\": \"%s\", \"content\": [{\"type\": \"%s\", \"text\": \"%s\"}, {\"type\": \"%s\", \"image_url\": {\"url\": \"%s\", \"detail\": \"%s\"}}]}], \"max_tokens\": %d}",
7797
model, role, type, text, image_type, image_url, detail, max_tokens);
7898

99+
// Call the _post function
79100
bool success = _postStream(post_body, content_only, result);
80-
delete[] post_body;
101+
102+
// Free dynamic memory if allocated
103+
if (mem_dynamic)
104+
{
105+
delete[] post_body;
106+
}
81107
return success;
82108
}
83109

@@ -92,6 +118,7 @@ class ChatGPT
92118
* - max_tokens: The maximum number of tokens to generate.
93119
* - content_only: If true, extracts and returns only the content from the response.
94120
* - result: Stores the response from the API.
121+
* - mem_dynamic: Select Dynamic/Static Memory, (Default: Dyanmic Memory)
95122
*
96123
* Returns:
97124
* - True if the request is successful, false otherwise.
@@ -102,17 +129,43 @@ class ChatGPT
102129
const char *content,
103130
int max_tokens,
104131
bool content_only,
105-
String &result)
132+
String &result,
133+
bool mem_dynamic = true)
106134
{
107-
int post_body_size = snprintf(nullptr, 0, "{\"model\": \"%s\", \"max_tokens\": %d, \"messages\": [{\"role\": \"%s\", \"content\": \"%s\"}]}", model, max_tokens, role, content) + 1;
108-
char *post_body = new char[post_body_size];
135+
char *post_body = nullptr; // Initialize post_body pointer
136+
int post_body_size = 0;
137+
138+
if (mem_dynamic)
139+
{
140+
// Calculate the required size for dynamic allocation
141+
post_body_size = snprintf(nullptr, 0, "{\"model\": \"%s\", \"max_tokens\": %d, \"messages\": [{\"role\": \"%s\", \"content\": \"%s\"}]}", model, max_tokens, role, content) + 1;
142+
post_body = new char[post_body_size];
143+
if (post_body == nullptr)
144+
{
145+
result = "[ERR] Memory allocation failed!";
146+
return false;
147+
}
148+
}
149+
else
150+
{
151+
// Use a static buffer with a fixed size
152+
static const int static_buffer_size = 256;
153+
char static_post_body[static_buffer_size];
154+
post_body_size = static_buffer_size;
155+
post_body = static_post_body;
156+
}
109157

110158
// Format the post_body string
111159
snprintf(post_body, post_body_size, "{\"model\": \"%s\", \"max_tokens\": %d, \"messages\": [{\"role\": \"%s\", \"content\": \"%s\"}]}", model, max_tokens, role, content);
112160

113161
// Call the _post function
114162
bool success = _postStream(post_body, content_only, result);
115-
delete[] post_body;
163+
164+
// Free dynamic memory if allocated
165+
if (mem_dynamic)
166+
{
167+
delete[] post_body;
168+
}
116169
return success;
117170
}
118171

@@ -148,15 +201,15 @@ class ChatGPT
148201
return false;
149202
}
150203

151-
int payload_length = strlen(post_body);
204+
size_t payload_length = strlen(post_body);
152205
String auth_header = _get_auth_header(api_key);
153206
String http_request = "POST /" + api_version + "/chat/completions HTTP/1.1\r\n" + auth_header + "\r\n" + "Host: " + host + "\r\n" + "Cache-control: no-cache\r\n" + "User-Agent: ESP32 ChatGPT\r\n" + "Content-Type: application/json\r\n" + "Content-Length: " + String(payload_length) + "\r\n" + "Connection: close\r\n" + "\r\n";
154207

155208
// Send the HTTP request headers
156209
client->print(http_request);
157210

158211
// Send the HTTP request body in chunks
159-
int bytes_sent = 0;
212+
size_t bytes_sent = 0;
160213
while (bytes_sent < payload_length)
161214
{
162215
size_t chunk_size = minimum(payload_length - bytes_sent, static_cast<size_t>(1024)); // Adjust chunk size as needed

0 commit comments

Comments
 (0)