diff --git a/app/controllers/discourse_ai/ai_helper/assistant_controller.rb b/app/controllers/discourse_ai/ai_helper/assistant_controller.rb
index c657c639..ce2ab288 100644
--- a/app/controllers/discourse_ai/ai_helper/assistant_controller.rb
+++ b/app/controllers/discourse_ai/ai_helper/assistant_controller.rb
@@ -10,7 +10,7 @@ module DiscourseAi
def prompts
render json:
ActiveModel::ArraySerializer.new(
- DiscourseAi::AiHelper::OpenAiPrompt.new.available_prompts,
+ DiscourseAi::AiHelper::LlmPrompt.new.available_prompts,
root: false,
),
status: 200
@@ -19,20 +19,21 @@ module DiscourseAi
def suggest
raise Discourse::InvalidParameters.new(:text) if params[:text].blank?
- prompt = CompletionPrompt.find_by(name: params[:mode])
+ prompt = CompletionPrompt.find_by(id: params[:mode])
raise Discourse::InvalidParameters.new(:mode) if !prompt || !prompt.enabled?
RateLimiter.new(current_user, "ai_assistant", 6, 3.minutes).performed!
hijack do
render json:
- DiscourseAi::AiHelper::OpenAiPrompt.new.generate_and_send_prompt(
+ DiscourseAi::AiHelper::LlmPrompt.new.generate_and_send_prompt(
prompt,
params[:text],
),
status: 200
end
- rescue DiscourseAi::Inference::OpenAiCompletions::CompletionFailed
+ rescue ::DiscourseAi::Inference::OpenAiCompletions::CompletionFailed,
+ ::DiscourseAi::Inference::AnthropicCompletions::CompletionFailed => e
render_json_error I18n.t("discourse_ai.ai_helper.errors.completion_request_failed"),
status: 502
end
diff --git a/app/models/completion_prompt.rb b/app/models/completion_prompt.rb
index a6c695df..167f9cbc 100644
--- a/app/models/completion_prompt.rb
+++ b/app/models/completion_prompt.rb
@@ -4,16 +4,17 @@ class CompletionPrompt < ActiveRecord::Base
# TODO(roman): Remove sept 2023.
self.ignored_columns = ["value"]
- VALID_ROLES = %w[system user assistant]
-
enum :prompt_type, { text: 0, list: 1, diff: 2 }
validates :messages, length: { maximum: 20 }
validate :each_message_length
- validate :each_message_role
def messages_with_user_input(user_input)
- self.messages << { role: "user", content: user_input }
+ if ::DiscourseAi::AiHelper::LlmPrompt.new.enabled_provider == "openai"
+ self.messages << { role: "user", content: user_input }
+ else
+ self.messages << { "role" => "Input", "content" => "#{user_input}" }
+ end
end
private
@@ -25,14 +26,6 @@ class CompletionPrompt < ActiveRecord::Base
errors.add(:messages, I18n.t("errors.prompt_message_length", idx: idx + 1))
end
end
-
- def each_message_role
- messages.each_with_index do |msg, idx|
- next if VALID_ROLES.include?(msg["role"])
-
- errors.add(:messages, I18n.t("errors.invalid_prompt_role", idx: idx + 1))
- end
- end
end
# == Schema Information
@@ -46,9 +39,10 @@ end
# enabled :boolean default(TRUE), not null
# created_at :datetime not null
# updated_at :datetime not null
-# messages :jsonb not null
+# messages :jsonb
+# provider :text
#
# Indexes
#
-# index_completion_prompts_on_name (name) UNIQUE
+# index_completion_prompts_on_name (name)
#
diff --git a/assets/javascripts/discourse/components/ai-helper.js b/assets/javascripts/discourse/components/ai-helper.js
index 2f3a59af..47da355c 100644
--- a/assets/javascripts/discourse/components/ai-helper.js
+++ b/assets/javascripts/discourse/components/ai-helper.js
@@ -21,6 +21,7 @@ export default class AiHelper extends Component {
@tracked proofreadDiff = null;
@tracked helperOptions = [];
+ prompts = [];
promptTypes = {};
constructor() {
@@ -29,7 +30,11 @@ export default class AiHelper extends Component {
}
async loadPrompts() {
- const prompts = await ajax("/discourse-ai/ai-helper/prompts");
+ let prompts = await ajax("/discourse-ai/ai-helper/prompts");
+
+ prompts.map((p) => {
+ this.prompts[p.id] = p;
+ });
this.promptTypes = prompts.reduce((memo, p) => {
memo[p.name] = p.prompt_type;
@@ -39,7 +44,7 @@ export default class AiHelper extends Component {
this.helperOptions = prompts.map((p) => {
return {
name: p.translated_name,
- value: p.name,
+ value: p.id,
};
});
}
@@ -53,7 +58,9 @@ export default class AiHelper extends Component {
@computed("selected", "selectedTitle", "translatingText", "proofreadingText")
get canSave() {
return (
- (this.promptTypes[this.selected] === LIST && this.selectedTitle) ||
+ (this.selected &&
+ this.prompts[this.selected].prompt_type === LIST &&
+ this.selectedTitle) ||
this.translatingText ||
this.proofreadingText
);
@@ -62,19 +69,26 @@ export default class AiHelper extends Component {
@computed("selected", "translatedSuggestion")
get translatingText() {
return (
- this.promptTypes[this.selected] === TEXT && this.translatedSuggestion
+ this.selected &&
+ this.prompts[this.selected].prompt_type === TEXT &&
+ this.translatedSuggestion
);
}
@computed("selected", "proofReadSuggestion")
get proofreadingText() {
- return this.promptTypes[this.selected] === DIFF && this.proofReadSuggestion;
+ return (
+ this.selected &&
+ this.prompts[this.selected].prompt_type === DIFF &&
+ this.proofReadSuggestion
+ );
}
@computed("selected", "generatedTitlesSuggestions")
get selectingTopicTitle() {
return (
- this.promptTypes[this.selected] === LIST &&
+ this.selected &&
+ this.prompts[this.selected].prompt_type === LIST &&
this.generatedTitlesSuggestions.length > 0
);
}
diff --git a/assets/javascripts/initializers/composer-ai-helper.js b/assets/javascripts/initializers/composer-ai-helper.js
index d6e4aad6..4b50b7ad 100644
--- a/assets/javascripts/initializers/composer-ai-helper.js
+++ b/assets/javascripts/initializers/composer-ai-helper.js
@@ -60,7 +60,7 @@ export default {
const allowedGroups = settings.ai_helper_allowed_groups
.split("|")
.map(parseInt);
- const canUseAssistant =
+ let canUseAssistant =
user && user.groups.some((g) => allowedGroups.includes(g.id));
if (helperEnabled && canUseAssistant) {
diff --git a/config/settings.yml b/config/settings.yml
index b7b9c80d..e548c4a6 100644
--- a/config/settings.yml
+++ b/config/settings.yml
@@ -87,6 +87,8 @@ plugins:
ai_openai_api_key:
default: ""
+ ai_anthropic_api_key:
+ default: ""
composer_ai_helper_enabled:
default: false
@@ -107,6 +109,7 @@ plugins:
choices:
- gpt-3.5-turbo
- gpt-4
+ - claude-v1
ai_embeddings_enabled:
default: false
@@ -165,4 +168,5 @@ plugins:
- long-t5-tglobal-base-16384-book-summary
- gpt-3.5-turbo
- gpt-4
+ - claude-v1
ai_summarization_rate_limit_minutes: 10
diff --git a/db/fixtures/ai-helper/600_completion_prompts.rb b/db/fixtures/ai-helper/600_openai_completion_prompts.rb
similarity index 97%
rename from db/fixtures/ai-helper/600_completion_prompts.rb
rename to db/fixtures/ai-helper/600_openai_completion_prompts.rb
index dcf33eb9..ec38dc8a 100644
--- a/db/fixtures/ai-helper/600_completion_prompts.rb
+++ b/db/fixtures/ai-helper/600_openai_completion_prompts.rb
@@ -1,6 +1,7 @@
# frozen_string_literal: true
CompletionPrompt.seed do |cp|
cp.id = -1
+ cp.provider = "openai"
cp.name = "translate"
cp.prompt_type = CompletionPrompt.prompt_types[:text]
cp.messages = [{ role: "system", content: <<~TEXT }]
@@ -15,6 +16,7 @@ end
CompletionPrompt.seed do |cp|
cp.id = -2
+ cp.provider = "openai"
cp.name = "generate_titles"
cp.prompt_type = CompletionPrompt.prompt_types[:list]
cp.messages = [{ role: "system", content: <<~TEXT }]
@@ -27,6 +29,7 @@ end
CompletionPrompt.seed do |cp|
cp.id = -3
+ cp.provider = "openai"
cp.name = "proofread"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [
@@ -83,6 +86,7 @@ end
CompletionPrompt.seed do |cp|
cp.id = -4
+ cp.provider = "openai"
cp.name = "markdown_table"
cp.prompt_type = CompletionPrompt.prompt_types[:diff]
cp.messages = [
diff --git a/db/fixtures/ai-helper/601_anthropic_completion_prompts.rb b/db/fixtures/ai-helper/601_anthropic_completion_prompts.rb
new file mode 100644
index 00000000..aa40a18d
--- /dev/null
+++ b/db/fixtures/ai-helper/601_anthropic_completion_prompts.rb
@@ -0,0 +1,56 @@
+# frozen_string_literal: true
+CompletionPrompt.seed do |cp|
+ cp.id = -101
+ cp.provider = "anthropic"
+ cp.name = "Traslate to English"
+ cp.prompt_type = CompletionPrompt.prompt_types[:text]
+ cp.messages = [{ role: "Human", content: <<~TEXT }]
+ I want you to act as an English translator, spelling corrector and improver. I will speak to you
+ in any language and you will detect the language, translate it and answer in the corrected and
+ improved version of my text, in English. I want you to replace my simplified A0-level words and
+ sentences with more beautiful and elegant, upper level English words and sentences.
+ Keep the meaning same, but make them more literary. I will provide you with a text inside tags,
+ please put the translation between tags.
+ TEXT
+end
+
+CompletionPrompt.seed do |cp|
+ cp.id = -102
+ cp.provider = "anthropic"
+ cp.name = "Suggest topic titles"
+ cp.prompt_type = CompletionPrompt.prompt_types[:list]
+ cp.messages = [{ role: "Human", content: <<~TEXT }]
+ I want you to act as a title generator for written pieces. I will provide you with a text inside tags,
+ and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words,
+ and ensure that the meaning is maintained. Replies will utilize the language type of the topic.
+ Please put each suggestion between tags.
+ TEXT
+end
+
+CompletionPrompt.seed do |cp|
+ cp.id = -103
+ cp.provider = "anthropic"
+ cp.name = "Proofread"
+ cp.prompt_type = CompletionPrompt.prompt_types[:diff]
+ cp.messages = [{ role: "Human", content: <<~TEXT }]
+ You are a markdown proofreader. You correct egregious typos and phrasing issues but keep the user's original voice.
+ You do not touch code blocks. I will provide you with text to proofread. If nothing needs fixing, then you will echo the text back.
+
+ Optionally, a user can specify intensity. Intensity 10 is a pedantic English teacher correcting the text.
+ Intensity 1 is a minimal proofreader. By default, you operate at intensity 1.
+ I will provide you with a text inside tags,
+ please reply with the corrected text between tags.
+ TEXT
+end
+
+CompletionPrompt.seed do |cp|
+ cp.id = -104
+ cp.provider = "anthropic"
+ cp.name = "Convert to table"
+ cp.prompt_type = CompletionPrompt.prompt_types[:diff]
+ cp.messages = [{ role: "Human", content: <<~TEXT }]
+ You are a markdown table formatter, I will provide you text and you will format it into a markdown table.
+ I will provide you with a text inside tags,
+ please reply with the corrected text between tags.
+ TEXT
+end
diff --git a/db/migrate/20230406135943_add_provider_to_completion_prompts.rb b/db/migrate/20230406135943_add_provider_to_completion_prompts.rb
new file mode 100644
index 00000000..516419a7
--- /dev/null
+++ b/db/migrate/20230406135943_add_provider_to_completion_prompts.rb
@@ -0,0 +1,22 @@
+# frozen_string_literal: true
+
+class AddProviderToCompletionPrompts < ActiveRecord::Migration[7.0]
+ def up
+ remove_index :completion_prompts, name: "index_completion_prompts_on_name"
+ add_column :completion_prompts, :provider, :text
+ add_index :completion_prompts, %i[name], unique: false
+
+ # set provider for existing prompts
+ DB.exec <<~SQL
+ UPDATE completion_prompts
+ SET provider = 'openai'
+ WHERE provider IS NULL;
+ SQL
+ end
+
+ def down
+ remove_column :completion_prompts, :provider
+ remove_index :completion_prompts, name: "index_completion_prompts_on_name"
+ add_index :completion_prompts, %i[name], unique: true
+ end
+end
diff --git a/lib/modules/ai_helper/entry_point.rb b/lib/modules/ai_helper/entry_point.rb
index 8f353611..5a7da82b 100644
--- a/lib/modules/ai_helper/entry_point.rb
+++ b/lib/modules/ai_helper/entry_point.rb
@@ -3,7 +3,7 @@ module DiscourseAi
module AiHelper
class EntryPoint
def load_files
- require_relative "open_ai_prompt"
+ require_relative "llm_prompt"
end
def inject_into(plugin)
diff --git a/lib/modules/ai_helper/llm_prompt.rb b/lib/modules/ai_helper/llm_prompt.rb
new file mode 100644
index 00000000..b6149682
--- /dev/null
+++ b/lib/modules/ai_helper/llm_prompt.rb
@@ -0,0 +1,111 @@
+# frozen_string_literal: true
+
+module DiscourseAi
+ module AiHelper
+ class LlmPrompt
+ def available_prompts
+ CompletionPrompt
+ .where(provider: enabled_provider)
+ .where(enabled: true)
+ .map do |prompt|
+ translation =
+ I18n.t("discourse_ai.ai_helper.prompts.#{prompt.name}", default: nil) ||
+ prompt.translated_name || prompt.name
+
+ {
+ id: prompt.id,
+ name: prompt.name,
+ translated_name: translation,
+ prompt_type: prompt.prompt_type,
+ }
+ end
+ end
+
+ def generate_and_send_prompt(prompt, text)
+ if enabled_provider == "openai"
+ openai_call(prompt, text)
+ else
+ anthropic_call(prompt, text)
+ end
+ end
+
+ def enabled_provider
+ if SiteSetting.ai_helper_model.start_with?("gpt")
+ "openai"
+ else
+ "anthropic"
+ end
+ end
+
+ private
+
+ def generate_diff(text, suggestion)
+ cooked_text = PrettyText.cook(text)
+ cooked_suggestion = PrettyText.cook(suggestion)
+
+ DiscourseDiff.new(cooked_text, cooked_suggestion).inline_html
+ end
+
+ def parse_content(prompt, content)
+ return "" if content.blank?
+
+ if enabled_provider == "openai"
+ return content.strip if !prompt.list?
+
+ content.gsub("\"", "").gsub(/\d./, "").split("\n").map(&:strip)
+ else
+ parse_antropic_content(prompt, content)
+ end
+ end
+
+ def openai_call(prompt, text)
+ result = { type: prompt.prompt_type }
+
+ messages = prompt.messages_with_user_input(text)
+
+ result[:suggestions] = DiscourseAi::Inference::OpenAiCompletions
+ .perform!(messages)
+ .dig(:choices)
+ .to_a
+ .flat_map { |choice| parse_content(prompt, choice.dig(:message, :content).to_s) }
+ .compact_blank
+
+ result[:diff] = generate_diff(text, result[:suggestions].first) if prompt.diff?
+
+ result
+ end
+
+ def anthropic_call(prompt, text)
+ result = { type: prompt.prompt_type }
+
+ filled_message = prompt.messages_with_user_input(text)
+
+ message =
+ filled_message.map { |msg| "#{msg["role"]}: #{msg["content"]}" }.join("\n\n") +
+ "Assistant:"
+
+ response = DiscourseAi::Inference::AnthropicCompletions.perform!(message)
+
+ result[:suggestions] = parse_content(prompt, response.dig(:completion))
+
+ result[:diff] = generate_diff(text, result[:suggestions].first) if prompt.diff?
+
+ result
+ end
+
+ def parse_antropic_content(prompt, content)
+ if prompt.list?
+ suggestions = Nokogiri::HTML5.fragment(content).search("ai").map(&:text)
+
+ if suggestions.length > 1
+ suggestions
+ else
+ suggestions.split("\n").map(&:strip)
+ end
+ else
+ [Nokogiri::HTML5.fragment(content).at("ai").text]
+ end
+ end
+ end
+ end
+end
diff --git a/lib/modules/ai_helper/open_ai_prompt.rb b/lib/modules/ai_helper/open_ai_prompt.rb
deleted file mode 100644
index 243adeb9..00000000
--- a/lib/modules/ai_helper/open_ai_prompt.rb
+++ /dev/null
@@ -1,52 +0,0 @@
-# frozen_string_literal: true
-
-module DiscourseAi
- module AiHelper
- class OpenAiPrompt
- def available_prompts
- CompletionPrompt
- .where(enabled: true)
- .map do |prompt|
- translation =
- I18n.t("discourse_ai.ai_helper.prompts.#{prompt.name}", default: nil) ||
- prompt.translated_name
-
- { name: prompt.name, translated_name: translation, prompt_type: prompt.prompt_type }
- end
- end
-
- def generate_and_send_prompt(prompt, text)
- result = { type: prompt.prompt_type }
-
- messages = prompt.messages_with_user_input(text)
-
- result[:suggestions] = DiscourseAi::Inference::OpenAiCompletions
- .perform!(messages)
- .dig(:choices)
- .to_a
- .flat_map { |choice| parse_content(prompt, choice.dig(:message, :content).to_s) }
- .compact_blank
-
- result[:diff] = generate_diff(text, result[:suggestions].first) if prompt.diff?
-
- result
- end
-
- private
-
- def generate_diff(text, suggestion)
- cooked_text = PrettyText.cook(text)
- cooked_suggestion = PrettyText.cook(suggestion)
-
- DiscourseDiff.new(cooked_text, cooked_suggestion).inline_html
- end
-
- def parse_content(prompt, content)
- return "" if content.blank?
- return content.strip if !prompt.list?
-
- content.gsub("\"", "").gsub(/\d./, "").split("\n").map(&:strip)
- end
- end
- end
-end
diff --git a/lib/modules/summarization/summary_generator.rb b/lib/modules/summarization/summary_generator.rb
index fd9c03d2..cce7ecb9 100644
--- a/lib/modules/summarization/summary_generator.rb
+++ b/lib/modules/summarization/summary_generator.rb
@@ -18,7 +18,16 @@ module DiscourseAi
attr_reader :target
def summarization_provider
- model.starts_with?("gpt") ? "openai" : "discourse"
+ case model
+ in "gpt-3.5-turbo"
+ "openai"
+ in "gpt-4"
+ "openai"
+ in "claude-v1"
+ "anthropic"
+ else
+ "discourse"
+ end
end
def get_content(content_since)
@@ -63,6 +72,23 @@ module DiscourseAi
)
end
+ def anthropic_summarization(content)
+ messages =
+ "Human: Summarize the following article that is inside tags.
+ Plese include only the summary inside tags.
+
+ ##{content}
+
+
+ Assistant:
+ "
+
+ response =
+ ::DiscourseAi::Inference::AnthropicCompletions.perform!(messages).dig(:completion)
+
+ Nokogiri::HTML5.fragment(response).at("ai").text
+ end
+
def model
SiteSetting.ai_summarization_model
end
diff --git a/lib/shared/inference/anthropic_completions.rb b/lib/shared/inference/anthropic_completions.rb
new file mode 100644
index 00000000..6346452f
--- /dev/null
+++ b/lib/shared/inference/anthropic_completions.rb
@@ -0,0 +1,36 @@
+# frozen_string_literal: true
+
+module ::DiscourseAi
+ module Inference
+ class AnthropicCompletions
+ CompletionFailed = Class.new(StandardError)
+
+ def self.perform!(prompt)
+ headers = {
+ "x-api-key" => SiteSetting.ai_anthropic_api_key,
+ "Content-Type" => "application/json",
+ }
+
+ model = "claude-v1"
+
+ connection_opts = { request: { write_timeout: 60, read_timeout: 60, open_timeout: 60 } }
+
+ response =
+ Faraday.new(nil, connection_opts).post(
+ "https://api.anthropic.com/v1/complete",
+ { model: model, prompt: prompt, max_tokens_to_sample: 300 }.to_json,
+ headers,
+ )
+
+ if response.status != 200
+ Rails.logger.error(
+ "AnthropicCompletions: status: #{response.status} - body: #{response.body}",
+ )
+ raise CompletionFailed
+ end
+
+ JSON.parse(response.body, symbolize_names: true)
+ end
+ end
+ end
+end
diff --git a/plugin.rb b/plugin.rb
index b2dc70e9..17289573 100644
--- a/plugin.rb
+++ b/plugin.rb
@@ -23,6 +23,7 @@ after_initialize do
require_relative "lib/shared/inference/discourse_reranker"
require_relative "lib/shared/inference/openai_completions"
require_relative "lib/shared/inference/openai_embeddings"
+ require_relative "lib/shared/inference/anthropic_completions"
require_relative "lib/shared/classificator"
require_relative "lib/shared/post_classificator"
diff --git a/spec/lib/modules/ai_helper/open_ai_prompt_spec.rb b/spec/lib/modules/ai_helper/llm_prompt_spec.rb
similarity index 97%
rename from spec/lib/modules/ai_helper/open_ai_prompt_spec.rb
rename to spec/lib/modules/ai_helper/llm_prompt_spec.rb
index 5d73a8a0..ea00dcdd 100644
--- a/spec/lib/modules/ai_helper/open_ai_prompt_spec.rb
+++ b/spec/lib/modules/ai_helper/llm_prompt_spec.rb
@@ -2,7 +2,7 @@
require_relative "../../../support/openai_completions_inference_stubs"
-RSpec.describe DiscourseAi::AiHelper::OpenAiPrompt do
+RSpec.describe DiscourseAi::AiHelper::LlmPrompt do
let(:prompt) { CompletionPrompt.find_by(name: mode) }
describe "#generate_and_send_prompt" do
diff --git a/spec/models/completion_prompt_spec.rb b/spec/models/completion_prompt_spec.rb
index b5646884..1eb356df 100644
--- a/spec/models/completion_prompt_spec.rb
+++ b/spec/models/completion_prompt_spec.rb
@@ -17,13 +17,5 @@ RSpec.describe CompletionPrompt do
expect(prompt.valid?).to eq(false)
end
end
-
- context "when the message has invalid roles" do
- it "doesn't accept messages when the role is invalid" do
- prompt = described_class.new(messages: [{ role: "invalid", content: "a" }])
-
- expect(prompt.valid?).to eq(false)
- end
- end
end
end
diff --git a/spec/requests/ai_helper/assistant_controller_spec.rb b/spec/requests/ai_helper/assistant_controller_spec.rb
index 019cc0f3..51b41fce 100644
--- a/spec/requests/ai_helper/assistant_controller_spec.rb
+++ b/spec/requests/ai_helper/assistant_controller_spec.rb
@@ -5,7 +5,7 @@ require_relative "../../support/openai_completions_inference_stubs"
RSpec.describe DiscourseAi::AiHelper::AssistantController do
describe "#suggest" do
let(:text) { OpenAiCompletionsInferenceStubs.translated_response }
- let(:mode) { "proofread" }
+ let(:mode) { "-3" }
context "when not logged in" do
it "returns a 403 response" do
@@ -64,7 +64,7 @@ RSpec.describe DiscourseAi::AiHelper::AssistantController do
end
it "returns a suggestion" do
- OpenAiCompletionsInferenceStubs.stub_prompt(mode)
+ OpenAiCompletionsInferenceStubs.stub_prompt("proofread")
post "/discourse-ai/ai-helper/suggest", params: { mode: mode, text: text }
diff --git a/spec/support/openai_completions_inference_stubs.rb b/spec/support/openai_completions_inference_stubs.rb
index 594c2701..18e9bcda 100644
--- a/spec/support/openai_completions_inference_stubs.rb
+++ b/spec/support/openai_completions_inference_stubs.rb
@@ -6,6 +6,17 @@ class OpenAiCompletionsInferenceStubs
GENERATE_TITLES = "generate_titles"
class << self
+ def text_mode_to_id(mode)
+ case mode
+ when TRANSLATE
+ -1
+ when PROOFREAD
+ -3
+ when GENERATE_TITLES
+ -2
+ end
+ end
+
def spanish_text
<<~STRING
Para que su horror sea perfecto, César, acosado al pie de la estatua por lo impacientes puñales de sus amigos,
@@ -83,7 +94,7 @@ class OpenAiCompletionsInferenceStubs
end
def stub_prompt(type)
- prompt_builder = DiscourseAi::AiHelper::OpenAiPrompt.new
+ prompt_builder = DiscourseAi::AiHelper::LlmPrompt.new
text = type == TRANSLATE ? spanish_text : translated_response
prompt_messages = CompletionPrompt.find_by(name: type).messages_with_user_input(text)
diff --git a/spec/system/ai_helper/ai_composer_helper_spec.rb b/spec/system/ai_helper/ai_composer_helper_spec.rb
index 3c24f8de..12df0db4 100644
--- a/spec/system/ai_helper/ai_composer_helper_spec.rb
+++ b/spec/system/ai_helper/ai_composer_helper_spec.rb
@@ -28,7 +28,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
expect(ai_helper_modal).to be_visible
- ai_helper_modal.select_helper_model(mode)
+ ai_helper_modal.select_helper_model(OpenAiCompletionsInferenceStubs.text_mode_to_id(mode))
ai_helper_modal.save_changes
expect(composer.composer_input.value).to eq(
@@ -51,7 +51,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
expect(ai_helper_modal).to be_visible
- ai_helper_modal.select_helper_model(mode)
+ ai_helper_modal.select_helper_model(OpenAiCompletionsInferenceStubs.text_mode_to_id(mode))
ai_helper_modal.save_changes
expect(composer.composer_input.value).to eq(
@@ -74,7 +74,7 @@ RSpec.describe "AI Composer helper", type: :system, js: true do
expect(ai_helper_modal).to be_visible
- ai_helper_modal.select_helper_model(mode)
+ ai_helper_modal.select_helper_model(OpenAiCompletionsInferenceStubs.text_mode_to_id(mode))
ai_helper_modal.select_title_suggestion(2)
ai_helper_modal.save_changes