Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade openai to 4.24.1 #173934

Merged
merged 14 commits into from
Jan 2, 2024
2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -1010,7 +1010,7 @@
"normalize-path": "^3.0.0",
"object-hash": "^1.3.1",
"object-path-immutable": "^3.1.1",
"openai": "^3.3.0",
"openai": "^4.24.1",
"openpgp": "5.10.1",
"opn": "^5.5.0",
"ora": "^4.0.4",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
*/

import Boom from '@hapi/boom';
import { CreateChatCompletionResponse } from 'openai';
import type OpenAI from 'openai';
import { Readable } from 'stream';
import { Plugin, CoreSetup } from '@kbn/core/server';
import { schema } from '@kbn/config-schema';
Expand Down Expand Up @@ -112,7 +112,7 @@ export class GenAiStreamingResponseExamplePlugin implements Plugin<void, void> {
}

return response.ok({
body: executeResult.data as CreateChatCompletionResponse | Readable,
body: executeResult.data as OpenAI.ChatCompletion | Readable,
});
}
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import { encode } from 'gpt-tokenizer';
import { isEmpty, omitBy } from 'lodash';
import { Readable } from 'stream';
import { finished } from 'stream/promises';
import { CreateChatCompletionRequest } from 'openai';
import type OpenAI from 'openai';
import { Logger } from '@kbn/logging';

export async function getTokenCountFromOpenAIStream({
Expand All @@ -25,17 +25,19 @@ export async function getTokenCountFromOpenAIStream({
prompt: number;
completion: number;
}> {
const chatCompletionRequest = JSON.parse(body) as CreateChatCompletionRequest;
const chatCompletionRequest = JSON.parse(
body
) as OpenAI.ChatCompletionCreateParams.ChatCompletionCreateParamsStreaming;

// per https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
const tokensFromMessages = encode(
chatCompletionRequest.messages
.map(
(msg) =>
`<|start|>${msg.role}\n${msg.content}\n${
msg.name
'name' in msg
? msg.name
: msg.function_call
: 'function_call' in msg && msg.function_call
? msg.function_call.name + '\n' + msg.function_call.arguments
: ''
}<|end|>`
Expand Down
9 changes: 3 additions & 6 deletions x-pack/plugins/observability_ai_assistant/common/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,12 @@
*/

import type { JSONSchema } from 'json-schema-to-ts';
import type {
CreateChatCompletionResponse,
CreateChatCompletionResponseChoicesInner,
} from 'openai';
import type OpenAI from 'openai';
import type { Observable } from 'rxjs';

export type CreateChatCompletionResponseChunk = Omit<CreateChatCompletionResponse, 'choices'> & {
export type CreateChatCompletionResponseChunk = Omit<OpenAI.ChatCompletionChunk, 'choices'> & {
choices: Array<
Omit<CreateChatCompletionResponseChoicesInner, 'message'> & {
Omit<OpenAI.ChatCompletionChunk.Choice, 'message'> & {
delta: { content?: string; function_call?: { name?: string; arguments?: string } };
}
>;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,8 @@ export function registerEsqlFunction({
delta: {
content: cachedContent,
},
index: 0,
finish_reason: null,
},
],
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import { notImplemented } from '@hapi/boom';
import * as t from 'io-ts';
import { toBooleanRt } from '@kbn/io-ts-utils';
import type { CreateChatCompletionResponse } from 'openai';
import type OpenAI from 'openai';
import { Readable } from 'stream';
import { createObservabilityAIAssistantServerRoute } from '../create_observability_ai_assistant_server_route';
import { messageRt } from '../runtime_types';
Expand Down Expand Up @@ -38,7 +38,7 @@ const chatRoute = createObservabilityAIAssistantServerRoute({
}),
t.partial({ query: t.type({ stream: toBooleanRt }) }),
]),
handler: async (resources): Promise<Readable | CreateChatCompletionResponse> => {
handler: async (resources): Promise<Readable | OpenAI.ChatCompletion> => {
const { request, params, service } = resources;

const client = await service.getClient({ request });
Expand Down Expand Up @@ -93,7 +93,7 @@ const chatCompleteRoute = createObservabilityAIAssistantServerRoute({
}),
]),
}),
handler: async (resources): Promise<Readable | CreateChatCompletionResponse> => {
handler: async (resources): Promise<Readable | OpenAI.Chat.ChatCompletion> => {
const { request, params, service } = resources;

const client = await service.getClient({ request });
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import type { ElasticsearchClient, Logger } from '@kbn/core/server';
import type { DeeplyMockedKeys } from '@kbn/utility-types-jest';
import { waitFor } from '@testing-library/react';
import { last, merge, repeat } from 'lodash';
import { ChatCompletionResponseMessage } from 'openai';
import type OpenAI from 'openai';
import { Subject } from 'rxjs';
import { EventEmitter, PassThrough, type Readable } from 'stream';
import { finished } from 'stream/promises';
Expand Down Expand Up @@ -51,6 +51,8 @@ function createLlmSimulator() {
choices: [
{
delta: msg,
index: 0,
finish_reason: null,
},
],
};
Expand Down Expand Up @@ -974,6 +976,7 @@ describe('Observability AI Assistant client', () => {
model: 'gpt-4',
object: 'chat.completion.chunk',
choices: [
// @ts-expect-error
{
delta: {
content: 'Hello',
Expand Down Expand Up @@ -1024,6 +1027,7 @@ describe('Observability AI Assistant client', () => {
model: 'gpt-4',
object: 'chat.completion.chunk',
choices: [
// @ts-expect-error
{
delta: {
content: 'Hello',
Expand Down Expand Up @@ -1342,9 +1346,9 @@ describe('Observability AI Assistant client', () => {
it('truncates the message', () => {
const body = JSON.parse(
(actionsClientMock.execute.mock.lastCall![0].params as any).subActionParams.body
);
) as OpenAI.Chat.ChatCompletionCreateParams;

const parsed = JSON.parse(last(body.messages as ChatCompletionResponseMessage[])!.content!);
const parsed = JSON.parse(last(body.messages)!.content! as string);

expect(parsed).toEqual({
message: 'Function response exceeded the maximum length allowed and was truncated',
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,7 @@ import type { ActionsClient } from '@kbn/actions-plugin/server';
import type { ElasticsearchClient } from '@kbn/core/server';
import type { Logger } from '@kbn/logging';
import type { PublicMethodsOf } from '@kbn/utility-types';
import type {
ChatCompletionRequestMessage,
CreateChatCompletionRequest,
CreateChatCompletionResponse,
} from 'openai';
import type OpenAI from 'openai';
import { decode, encode } from 'gpt-tokenizer';
import { compact, isEmpty, last, merge, omit, pick, take } from 'lodash';
import { isObservable, lastValueFrom } from 'rxjs';
Expand Down Expand Up @@ -419,8 +415,12 @@ export class ObservabilityAIAssistantClient {
functionCall?: string;
stream?: TStream;
signal: AbortSignal;
}): Promise<TStream extends false ? CreateChatCompletionResponse : Readable> => {
const messagesForOpenAI: ChatCompletionRequestMessage[] = compact(
}): Promise<TStream extends false ? OpenAI.ChatCompletion : Readable> => {
const messagesForOpenAI: Array<
Omit<OpenAI.ChatCompletionMessageParam, 'role'> & {
role: MessageRole;
}
> = compact(
messages
.filter((message) => message.message.content || message.message.function_call?.name)
.map((message) => {
Expand All @@ -440,8 +440,8 @@ export class ObservabilityAIAssistantClient {

const functionsForOpenAI = functions;

const request: Omit<CreateChatCompletionRequest, 'model'> & { model?: string } = {
messages: messagesForOpenAI,
const request: Omit<OpenAI.ChatCompletionCreateParams, 'model'> & { model?: string } = {
messages: messagesForOpenAI as OpenAI.ChatCompletionCreateParams['messages'],
...(stream ? { stream: true } : {}),
...(!!functions?.length ? { functions: functionsForOpenAI } : {}),
temperature: 0,
Expand All @@ -468,7 +468,7 @@ export class ObservabilityAIAssistantClient {

const response = stream
? (executeResult.data as Readable)
: (executeResult.data as CreateChatCompletionResponse);
: (executeResult.data as OpenAI.ChatCompletion);

if (response instanceof Readable) {
signal.addEventListener('abort', () => response.destroy());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ export function createOpenAiChunk(
choices: [
{
delta: msg,
index: 0,
finish_reason: null,
},
],
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ import {
StreamingChatResponseEvent,
StreamingChatResponseEventType,
} from '@kbn/observability-ai-assistant-plugin/common/conversation_complete';
import { CreateChatCompletionRequest } from 'openai';
import type OpenAI from 'openai';
import { createLlmProxy, LlmProxy } from '../../common/create_llm_proxy';
import { createOpenAiChunk } from '../../common/create_openai_chunk';
import { FtrProviderContext } from '../../common/ftr_provider_context';
Expand Down Expand Up @@ -145,12 +145,16 @@ export default function ApiTest({ getService }: FtrProviderContext) {
before(async () => {
const titleInterceptor = proxy.intercept(
'title',
(body) => (JSON.parse(body) as CreateChatCompletionRequest).messages.length === 1
(body) =>
(JSON.parse(body) as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming).messages
.length === 1
);

const conversationInterceptor = proxy.intercept(
'conversation',
(body) => (JSON.parse(body) as CreateChatCompletionRequest).messages.length !== 1
(body) =>
(JSON.parse(body) as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming).messages
.length !== 1
);

const responsePromise = new Promise<Response>((resolve, reject) => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
*/

import expect from '@kbn/expect';
import { CreateChatCompletionRequest } from 'openai';
import type OpenAI from 'openai';
import {
createLlmProxy,
LlmProxy,
Expand Down Expand Up @@ -147,12 +147,16 @@ export default function ApiTest({ getService, getPageObjects }: FtrProviderConte
before(async () => {
const titleInterceptor = proxy.intercept(
'title',
(body) => (JSON.parse(body) as CreateChatCompletionRequest).messages.length === 1
(body) =>
(JSON.parse(body) as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming)
.messages.length === 1
);

const conversationInterceptor = proxy.intercept(
'conversation',
(body) => (JSON.parse(body) as CreateChatCompletionRequest).messages.length !== 1
(body) =>
(JSON.parse(body) as OpenAI.Chat.ChatCompletionCreateParamsNonStreaming)
.messages.length !== 1
);

await testSubjects.setValue(ui.pages.conversations.chatInput, 'hello');
Expand Down
25 changes: 5 additions & 20 deletions yarn.lock
Original file line number Diff line number Diff line change
Expand Up @@ -11794,13 +11794,6 @@ axe-core@^4.8.2:
resolved "https://registry.yarnpkg.com/axe-core/-/axe-core-4.8.2.tgz#2f6f3cde40935825cf4465e3c1c9e77b240ff6ae"
integrity sha512-/dlp0fxyM3R8YW7MFzaHWXrf4zzbr0vaYb23VBFCl83R7nWNPg/yaQw2Dc8jzCMmDVLhSdzH8MjrsuIUuvX+6g==

axios@^0.26.0:
version "0.26.1"
resolved "https://registry.yarnpkg.com/axios/-/axios-0.26.1.tgz#1ede41c51fcf51bbbd6fd43669caaa4f0495aaa9"
integrity sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==
dependencies:
follow-redirects "^1.14.8"

axios@^1.3.4, axios@^1.6.0, axios@^1.6.3:
version "1.6.3"
resolved "https://registry.npmjs.org/axios/-/axios-1.6.3.tgz#7f50f23b3aa246eff43c54834272346c396613f4"
Expand Down Expand Up @@ -17155,7 +17148,7 @@ folktale@2.3.2:
resolved "https://registry.yarnpkg.com/folktale/-/folktale-2.3.2.tgz#38231b039e5ef36989920cbf805bf6b227bf4fd4"
integrity sha512-+8GbtQBwEqutP0v3uajDDoN64K2ehmHd0cjlghhxh0WpcfPzAIjPA03e1VvHlxL02FVGR0A6lwXsNQKn3H1RNQ==

follow-redirects@^1.0.0, follow-redirects@^1.14.8, follow-redirects@^1.15.0:
follow-redirects@^1.0.0, follow-redirects@^1.15.0:
version "1.15.2"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13"
integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
Expand Down Expand Up @@ -23448,18 +23441,10 @@ open@^8.0.9, open@^8.4.0:
is-docker "^2.1.1"
is-wsl "^2.2.0"

openai@^3.3.0:
version "3.3.0"
resolved "https://registry.yarnpkg.com/openai/-/openai-3.3.0.tgz#a6408016ad0945738e1febf43f2fccca83a3f532"
integrity sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==
dependencies:
axios "^0.26.0"
form-data "^4.0.0"

openai@^4.17.0:
version "4.17.5"
resolved "https://registry.yarnpkg.com/openai/-/openai-4.17.5.tgz#096655741965656ec969731e97d4bce880112d66"
integrity sha512-SDgA933/QOjISCgWRc/JQhY1HweYZ6FOie3bWrCpj09FA5xIlaomldbyzICHNjtkh7SWEmGYFjRHIDtuwr+eTw==
openai@^4.17.0, openai@^4.24.1:
version "4.24.1"
resolved "https://registry.yarnpkg.com/openai/-/openai-4.24.1.tgz#3759001eca835228289fcf18c1bd8d35dae538ba"
integrity sha512-ezm/O3eiZMnyBqirUnWm9N6INJU1WhNtz+nK/Zj/2oyKvRz9pgpViDxa5wYOtyGYXPn1sIKBV0I/S4BDhtydqw==
dependencies:
"@types/node" "^18.11.18"
"@types/node-fetch" "^2.6.4"
Expand Down
Loading