diff --git a/config/settings.yml b/config/settings.yml index 8ab7cc4c..7240fa06 100644 --- a/config/settings.yml +++ b/config/settings.yml @@ -1,5 +1,5 @@ plugins: - ai_enabled: + discourse_ai_enabled: default: true client: true @@ -64,3 +64,10 @@ plugins: choices: - sentiment - emotion + + ai_nsfw_live_detection_enabled: false + ai_nsfw_inference_service_api_endpoint: + default: "https://nsfw-testing.demo-by-discourse.com" + ai_nsfw_inference_service_api_key: + default: "" + ai_nsfw_probability_threshold: 60 diff --git a/lib/modules/nsfw/entry_point.rb b/lib/modules/nsfw/entry_point.rb new file mode 100644 index 00000000..a0eeb280 --- /dev/null +++ b/lib/modules/nsfw/entry_point.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +module DiscourseAI + module NSFW + class EntryPoint + def inject_into(plugin) + require_relative "evaluation.rb" + require_relative "jobs/regular/evaluate_content.rb" + + plugin.add_model_callback(Upload, :after_create) do + Jobs.enqueue(:evaluate_content, upload_id: self.id) + end + end + end + end +end diff --git a/lib/modules/nsfw/evaluation.rb b/lib/modules/nsfw/evaluation.rb new file mode 100644 index 00000000..286734fc --- /dev/null +++ b/lib/modules/nsfw/evaluation.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +module DiscourseAI + module NSFW + class Evaluation + AVAILABLE_MODELS = %w[opennsfw2 nsfw_detector] + + def perform(upload) + result = { verdict: false, evaluation: {} } + + AVAILABLE_MODELS.each do |model| + model_result = evaluate_with_model(model, upload).symbolize_keys! + + model_result.values.each do |classification_prob| + if classification_prob.to_i >= SiteSetting.ai_nsfw_probability_threshold + result[:verdict] = true + end + end + + result[:evaluation][model.to_sym] = model_result + end + + result + end + + private + + def evaluate_with_model(model, upload) + DiscourseAI::InferenceManager.perform!( + "#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify", + model, + Discourse.store.cdn_url(upload.url), + SiteSetting.ai_nsfw_inference_service_api_key, + ) + end + end + end +end diff --git a/lib/modules/nsfw/jobs/regular/evaluate_content.rb b/lib/modules/nsfw/jobs/regular/evaluate_content.rb new file mode 100644 index 00000000..4976df83 --- /dev/null +++ b/lib/modules/nsfw/jobs/regular/evaluate_content.rb @@ -0,0 +1,17 @@ +# frozen_string_literal: true + +module Jobs + class EvaluateContent < ::Jobs::Base + def execute(args) + upload = Upload.find_by_id(args[:upload_id]) + + return unless upload + + result = DiscourseAI::NSFW::Evaluation.new.perform(upload) + + # FIXME(roman): This is a simplistic action just to create + # the basic flow. We'll introduce flagging capabilities in the future. + upload.destroy! if result[:verdict] + end + end +end diff --git a/plugin.rb b/plugin.rb index 755f5a49..afbad8d8 100644 --- a/plugin.rb +++ b/plugin.rb @@ -7,10 +7,10 @@ # url: TODO # required_version: 2.7.0 -enabled_site_setting :ai_enabled +enabled_site_setting :discourse_ai_enabled after_initialize do - module ::Disorder + module ::DiscourseAI PLUGIN_NAME = "discourse-ai" end @@ -40,4 +40,7 @@ after_initialize do on(:chat_message_edited) do |chat_message| DiscourseAI::Toxicity::EventHandler.handle_chat_async(chat_message) end + + require_relative "lib/modules/nsfw/entry_point.rb" + DiscourseAI::NSFW::EntryPoint.new.inject_into(self) end diff --git a/spec/.gitkeep b/spec/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/spec/lib/modules/nsfw/evaluation_spec.rb b/spec/lib/modules/nsfw/evaluation_spec.rb new file mode 100644 index 00000000..2ca9dbd0 --- /dev/null +++ b/spec/lib/modules/nsfw/evaluation_spec.rb @@ -0,0 +1,46 @@ +# frozen_string_literal: true + +require "rails_helper" +require_relative "../../../support/nsfw_inference_stubs" + +describe DiscourseAI::NSFW::Evaluation do + before { SiteSetting.ai_nsfw_live_detection_enabled = true } + + fab!(:image) { Fabricate(:s3_image_upload) } + + let(:available_models) { DiscourseAI::NSFW::Evaluation::AVAILABLE_MODELS } + + describe "perform" do + context "when we determine content is NSFW" do + before { NSFWInferenceStubs.positive(image) } + + it "returns true alongside the evaluation" do + result = subject.perform(image) + + expect(result[:verdict]).to eq(true) + + available_models.each do |model| + expect(result.dig(:evaluation, model.to_sym)).to eq( + NSFWInferenceStubs.positive_result(model), + ) + end + end + end + + context "when we determine content is safe" do + before { NSFWInferenceStubs.negative(image) } + + it "returns false alongside the evaluation" do + result = subject.perform(image) + + expect(result[:verdict]).to eq(false) + + available_models.each do |model| + expect(result.dig(:evaluation, model.to_sym)).to eq( + NSFWInferenceStubs.negative_result(model), + ) + end + end + end + end +end diff --git a/spec/lib/modules/nsfw/jobs/regular/evaluate_content_spec.rb b/spec/lib/modules/nsfw/jobs/regular/evaluate_content_spec.rb new file mode 100644 index 00000000..c74c0a68 --- /dev/null +++ b/spec/lib/modules/nsfw/jobs/regular/evaluate_content_spec.rb @@ -0,0 +1,30 @@ +# frozen_string_literal: true + +require "rails_helper" +require_relative "../../../../../support/nsfw_inference_stubs" + +describe Jobs::EvaluateContent do + fab!(:image) { Fabricate(:s3_image_upload) } + + describe "#execute" do + context "when we conclude content is NSFW" do + before { NSFWInferenceStubs.positive(image) } + + it "deletes the upload" do + subject.execute(upload_id: image.id) + + expect { image.reload }.to raise_error(ActiveRecord::RecordNotFound) + end + end + + context "when we conclude content is not NSFW" do + before { NSFWInferenceStubs.negative(image) } + + it "does nothing" do + subject.execute(upload_id: image.id) + + expect(image.reload).to be_present + end + end + end +end diff --git a/spec/support/nsfw_inference_stubs.rb b/spec/support/nsfw_inference_stubs.rb new file mode 100644 index 00000000..6d43cabb --- /dev/null +++ b/spec/support/nsfw_inference_stubs.rb @@ -0,0 +1,45 @@ +class NSFWInferenceStubs + class << self + def endpoint + "#{SiteSetting.ai_nsfw_inference_service_api_endpoint}/api/v1/classify" + end + + def upload_url(upload) + Discourse.store.cdn_url(upload.url) + end + + def positive_result(model) + return { nsfw_probability: 90 } if model == "opennsfw2" + { drawings: 1, hentai: 2, neutral: 0, porn: 90, sexy: 79 } + end + + def negative_result(model) + return { nsfw_probability: 3 } if model == "opennsfw2" + { drawings: 1, hentai: 2, neutral: 0, porn: 3, sexy: 1 } + end + + def positive(upload) + WebMock + .stub_request(:post, endpoint) + .with(body: JSON.dump(model: "nsfw_detector", content: upload_url(upload))) + .to_return(status: 200, body: JSON.dump(positive_result("nsfw_detector"))) + + WebMock + .stub_request(:post, endpoint) + .with(body: JSON.dump(model: "opennsfw2", content: upload_url(upload))) + .to_return(status: 200, body: JSON.dump(positive_result("opennsfw2"))) + end + + def negative(upload) + WebMock + .stub_request(:post, endpoint) + .with(body: JSON.dump(model: "nsfw_detector", content: upload_url(upload))) + .to_return(status: 200, body: JSON.dump(negative_result("nsfw_detector"))) + + WebMock + .stub_request(:post, endpoint) + .with(body: JSON.dump(model: "opennsfw2", content: upload_url(upload))) + .to_return(status: 200, body: JSON.dump(negative_result("opennsfw2"))) + end + end +end