diff --git a/app/models/completion_prompt.rb b/app/models/completion_prompt.rb index e1b3636c..a6c695df 100644 --- a/app/models/completion_prompt.rb +++ b/app/models/completion_prompt.rb @@ -1,7 +1,38 @@ # frozen_string_literal: true class CompletionPrompt < ActiveRecord::Base + # TODO(roman): Remove sept 2023. + self.ignored_columns = ["value"] + + VALID_ROLES = %w[system user assistant] + enum :prompt_type, { text: 0, list: 1, diff: 2 } + + validates :messages, length: { maximum: 20 } + validate :each_message_length + validate :each_message_role + + def messages_with_user_input(user_input) + self.messages << { role: "user", content: user_input } + end + + private + + def each_message_length + messages.each_with_index do |msg, idx| + next if msg["content"].length <= 1000 + + errors.add(:messages, I18n.t("errors.prompt_message_length", idx: idx + 1)) + end + end + + def each_message_role + messages.each_with_index do |msg, idx| + next if VALID_ROLES.include?(msg["role"]) + + errors.add(:messages, I18n.t("errors.invalid_prompt_role", idx: idx + 1)) + end + end end # == Schema Information @@ -12,10 +43,10 @@ end # name :string not null # translated_name :string # prompt_type :integer default("text"), not null -# value :text not null # enabled :boolean default(TRUE), not null # created_at :datetime not null # updated_at :datetime not null +# messages :jsonb not null # # Indexes # diff --git a/config/locales/server.en.yml b/config/locales/server.en.yml index 0c32972f..a7c54905 100644 --- a/config/locales/server.en.yml +++ b/config/locales/server.en.yml @@ -51,6 +51,9 @@ en: flagged_by_toxicity: The AI plugin flagged this after classifying it as toxic. flagged_by_nsfw: The AI plugin flagged this after classifying at least one of the attached images as NSFW. + errors: + prompt_message_length: The message %{idx} is over the 1000 character limit. + invalid_prompt_role: The message %{idx} has an invalid role. discourse_ai: ai_helper: diff --git a/db/fixtures/ai-helper/600_completion_prompts.rb b/db/fixtures/ai-helper/600_completion_prompts.rb index 4a43777f..38a0571e 100644 --- a/db/fixtures/ai-helper/600_completion_prompts.rb +++ b/db/fixtures/ai-helper/600_completion_prompts.rb @@ -3,35 +3,80 @@ CompletionPrompt.seed do |cp| cp.id = -1 cp.name = "translate" cp.prompt_type = CompletionPrompt.prompt_types[:text] - cp.value = <<~STRING + cp.messages = [{ role: "system", content: <<~TEXT }] I want you to act as an English translator, spelling corrector and improver. I will speak to you in any language and you will detect the language, translate it and answer in the corrected and improved version of my text, in English. I want you to replace my simplified A0-level words and sentences with more beautiful and elegant, upper level English words and sentences. Keep the meaning same, but make them more literary. I want you to only reply the correction, the improvements and nothing else, do not write explanations. - STRING + TEXT end CompletionPrompt.seed do |cp| cp.id = -2 cp.name = "generate_titles" cp.prompt_type = CompletionPrompt.prompt_types[:list] - cp.value = <<~STRING + cp.messages = [{ role: "system", content: <<~TEXT }] I want you to act as a title generator for written pieces. I will provide you with a text, and you will generate five attention-grabbing titles. Please keep the title concise and under 20 words, and ensure that the meaning is maintained. Replies will utilize the language type of the topic. I want you to only reply the list of options and nothing else, do not write explanations. - STRING + TEXT end CompletionPrompt.seed do |cp| cp.id = -3 cp.name = "proofread" cp.prompt_type = CompletionPrompt.prompt_types[:diff] - cp.value = <<~STRING - I want you act as a proofreader. I will provide you with a text and I want you to review them for any spelling, - grammar, or punctuation errors. Once you have finished reviewing the text, provide me with any necessary - corrections or suggestions for improve the text. - STRING + cp.messages = [ + { role: "system", content: <<~TEXT }, + You are a markdown proofreader. You correct egregious typos and phrasing issues but keep the user's original voice. + You do not touch code blocks. I will provide you with text to proofread. If nothing needs fixing, then you will echo the text back. + + Optionally, a user can specify intensity. Intensity 10 is a pedantic English teacher correcting the text. + Intensity 1 is a minimal proofreader. By default, you operate at intensity 1. + TEXT + { role: "user", content: "![amazing car|100x100, 22%](upload://hapy.png)" }, + { role: "assistant", content: "![Amazing car|100x100, 22%](upload://hapy.png)" }, + { role: "user", content: <<~TEXT }, + Intensity 1: + The rain in spain stays mainly in the plane. + TEXT + { role: "assistant", content: "The rain in Spain, stays mainly in the Plane." }, + { role: "user", content: "The rain in Spain, stays mainly in the Plane." }, + { role: "assistant", content: "The rain in Spain, stays mainly in the Plane." }, + { role: "user", content: <<~TEXT }, + Intensity 1: + Hello, + + Sometimes the logo isn't changing automatically when color scheme changes. + + ![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov) + TEXT + { role: "assistant", content: <<~TEXT }, + Hello, + Sometimes the logo does not change automatically when the color scheme changes. + ![Screen Recording 2023-03-17 at 18.04.22|video](upload://2rcVL0ZMxHPNtPWQbZjwufKpWVU.mov) + TEXT + { role: "user", content: <<~TEXT }, + Intensity 1: + Any ideas what is wrong with this peace of cod? + > This quot contains a typo + ```ruby + # this has speling mistakes + testin.atypo = 11 + baad = "bad" + ``` + TEXT + { role: "assistant", content: <<~TEXT }, + Any ideas what is wrong with this piece of code? + > This quot contains a typo + ```ruby + # This has spelling mistakes + testing.a_typo = 11 + bad = "bad" + ``` + TEXT + ] end diff --git a/db/migrate/20230320185619_multi_message_completion_prompts.rb b/db/migrate/20230320185619_multi_message_completion_prompts.rb new file mode 100644 index 00000000..2cf46c7e --- /dev/null +++ b/db/migrate/20230320185619_multi_message_completion_prompts.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +class MultiMessageCompletionPrompts < ActiveRecord::Migration[7.0] + def change + add_column :completion_prompts, :messages, :jsonb, null: false + end +end diff --git a/db/post_migrate/20230320191928_drop_completion_prompt_value.rb b/db/post_migrate/20230320191928_drop_completion_prompt_value.rb new file mode 100644 index 00000000..a7864fd0 --- /dev/null +++ b/db/post_migrate/20230320191928_drop_completion_prompt_value.rb @@ -0,0 +1,7 @@ +# frozen_string_literal: true + +class DropCompletionPromptValue < ActiveRecord::Migration[7.0] + def change + remove_column :completion_prompts, :value, :text + end +end diff --git a/lib/modules/ai_helper/open_ai_prompt.rb b/lib/modules/ai_helper/open_ai_prompt.rb index 7b5476d6..3234cf13 100644 --- a/lib/modules/ai_helper/open_ai_prompt.rb +++ b/lib/modules/ai_helper/open_ai_prompt.rb @@ -23,10 +23,10 @@ module DiscourseAi def generate_and_send_prompt(prompt, text) result = { type: prompt.prompt_type } - ai_messages = [{ role: "system", content: prompt.value }, { role: "user", content: text }] + messages = prompt.messages_with_user_input(text) result[:suggestions] = DiscourseAi::Inference::OpenAiCompletions - .perform!(ai_messages) + .perform!(messages) .dig(:choices) .to_a .flat_map { |choice| parse_content(prompt, choice.dig(:message, :content).to_s) } diff --git a/lib/shared/inference/openai_completions.rb b/lib/shared/inference/openai_completions.rb index 097b6340..c5ef7916 100644 --- a/lib/shared/inference/openai_completions.rb +++ b/lib/shared/inference/openai_completions.rb @@ -3,7 +3,7 @@ module ::DiscourseAi module Inference class OpenAiCompletions - def self.perform!(content, model = "gpt-3.5-turbo") + def self.perform!(messages, model = "gpt-3.5-turbo") headers = { "Authorization" => "Bearer #{SiteSetting.ai_openai_api_key}", "Content-Type" => "application/json", @@ -14,7 +14,7 @@ module ::DiscourseAi response = Faraday.new(nil, connection_opts).post( "https://api.openai.com/v1/chat/completions", - { model: model, messages: content }.to_json, + { model: model, messages: messages }.to_json, headers, ) diff --git a/spec/models/completion_prompt_spec.rb b/spec/models/completion_prompt_spec.rb new file mode 100644 index 00000000..b5646884 --- /dev/null +++ b/spec/models/completion_prompt_spec.rb @@ -0,0 +1,29 @@ +# frozen_string_literal: true + +RSpec.describe CompletionPrompt do + describe "validations" do + context "when there are too many messages" do + it "doesn't accept more than 20 messages" do + prompt = described_class.new(messages: [{ role: "system", content: "a" }] * 21) + + expect(prompt.valid?).to eq(false) + end + end + + context "when the message is over the max length" do + it "doesn't accept messages when the length is more than 1000 characters" do + prompt = described_class.new(messages: [{ role: "system", content: "a" * 1001 }]) + + expect(prompt.valid?).to eq(false) + end + end + + context "when the message has invalid roles" do + it "doesn't accept messages when the role is invalid" do + prompt = described_class.new(messages: [{ role: "invalid", content: "a" }]) + + expect(prompt.valid?).to eq(false) + end + end + end +end diff --git a/spec/support/openai_completions_inference_stubs.rb b/spec/support/openai_completions_inference_stubs.rb index 886e48d9..c71d8e73 100644 --- a/spec/support/openai_completions_inference_stubs.rb +++ b/spec/support/openai_completions_inference_stubs.rb @@ -83,12 +83,11 @@ class OpenAiCompletionsInferenceStubs text = type == DiscourseAi::AiHelper::OpenAiPrompt::TRANSLATE ? spanish_text : translated_response - used_prompt = CompletionPrompt.find_by(name: type) - prompt = [{ role: "system", content: used_prompt.value }, { role: "user", content: text }] + prompt_messages = CompletionPrompt.find_by(name: type).messages_with_user_input(text) WebMock .stub_request(:post, "https://api.openai.com/v1/chat/completions") - .with(body: JSON.dump(model: "gpt-3.5-turbo", messages: prompt)) + .with(body: { model: "gpt-3.5-turbo", messages: prompt_messages }.to_json) .to_return(status: 200, body: JSON.dump(response(response_text_for(type)))) end end