api/pkg/apis/inference/v1/grpc_service_grpc.pb.go

1035 lines
43 KiB
Go

// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.2.0
// - protoc v3.21.6
// source: pkg/apis/inference/v1/grpc_service.proto
package inference
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.32.0 or later.
const _ = grpc.SupportPackageIsVersion7
// GRPCInferenceServiceClient is the client API for GRPCInferenceService service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type GRPCInferenceServiceClient interface {
// @@ .. cpp:var:: rpc ServerLive(ServerLiveRequest) returns
// @@ (ServerLiveResponse)
// @@
// @@ Check liveness of the inference server.
// @@
ServerLive(ctx context.Context, in *ServerLiveRequest, opts ...grpc.CallOption) (*ServerLiveResponse, error)
// @@ .. cpp:var:: rpc ServerReady(ServerReadyRequest) returns
// @@ (ServerReadyResponse)
// @@
// @@ Check readiness of the inference server.
// @@
ServerReady(ctx context.Context, in *ServerReadyRequest, opts ...grpc.CallOption) (*ServerReadyResponse, error)
// @@ .. cpp:var:: rpc ModelReady(ModelReadyRequest) returns
// @@ (ModelReadyResponse)
// @@
// @@ Check readiness of a model in the inference server.
// @@
ModelReady(ctx context.Context, in *ModelReadyRequest, opts ...grpc.CallOption) (*ModelReadyResponse, error)
// @@ .. cpp:var:: rpc ServerMetadata(ServerMetadataRequest) returns
// @@ (ServerMetadataResponse)
// @@
// @@ Get server metadata.
// @@
ServerMetadata(ctx context.Context, in *ServerMetadataRequest, opts ...grpc.CallOption) (*ServerMetadataResponse, error)
// @@ .. cpp:var:: rpc ModelMetadata(ModelMetadataRequest) returns
// @@ (ModelMetadataResponse)
// @@
// @@ Get model metadata.
// @@
ModelMetadata(ctx context.Context, in *ModelMetadataRequest, opts ...grpc.CallOption) (*ModelMetadataResponse, error)
// @@ .. cpp:var:: rpc ModelInfer(ModelInferRequest) returns
// @@ (ModelInferResponse)
// @@
// @@ Perform inference using a specific model.
// @@
ModelInfer(ctx context.Context, in *ModelInferRequest, opts ...grpc.CallOption) (*ModelInferResponse, error)
// @@ .. cpp:var:: rpc ModelStreamInfer(stream ModelInferRequest) returns
// @@ (stream ModelStreamInferResponse)
// @@
// @@ Perform streaming inference.
// @@
ModelStreamInfer(ctx context.Context, opts ...grpc.CallOption) (GRPCInferenceService_ModelStreamInferClient, error)
// @@ .. cpp:var:: rpc ModelConfig(ModelConfigRequest) returns
// @@ (ModelConfigResponse)
// @@
// @@ Get model configuration.
// @@
ModelConfig(ctx context.Context, in *ModelConfigRequest, opts ...grpc.CallOption) (*ModelConfigResponse, error)
// @@ .. cpp:var:: rpc ModelStatistics(
// @@ ModelStatisticsRequest)
// @@ returns (ModelStatisticsResponse)
// @@
// @@ Get the cumulative inference statistics for a model.
// @@
ModelStatistics(ctx context.Context, in *ModelStatisticsRequest, opts ...grpc.CallOption) (*ModelStatisticsResponse, error)
// @@ .. cpp:var:: rpc RepositoryIndex(RepositoryIndexRequest) returns
// @@ (RepositoryIndexResponse)
// @@
// @@ Get the index of model repository contents.
// @@
RepositoryIndex(ctx context.Context, in *RepositoryIndexRequest, opts ...grpc.CallOption) (*RepositoryIndexResponse, error)
// @@ .. cpp:var:: rpc RepositoryModelLoad(RepositoryModelLoadRequest) returns
// @@ (RepositoryModelLoadResponse)
// @@
// @@ Load or reload a model from a repository.
// @@
RepositoryModelLoad(ctx context.Context, in *RepositoryModelLoadRequest, opts ...grpc.CallOption) (*RepositoryModelLoadResponse, error)
// @@ .. cpp:var:: rpc RepositoryModelUnload(RepositoryModelUnloadRequest)
// @@ returns (RepositoryModelUnloadResponse)
// @@
// @@ Unload a model.
// @@
RepositoryModelUnload(ctx context.Context, in *RepositoryModelUnloadRequest, opts ...grpc.CallOption) (*RepositoryModelUnloadResponse, error)
// @@ .. cpp:var:: rpc SystemSharedMemoryStatus(
// @@ SystemSharedMemoryStatusRequest)
// @@ returns (SystemSharedMemoryStatusRespose)
// @@
// @@ Get the status of all registered system-shared-memory regions.
// @@
SystemSharedMemoryStatus(ctx context.Context, in *SystemSharedMemoryStatusRequest, opts ...grpc.CallOption) (*SystemSharedMemoryStatusResponse, error)
// @@ .. cpp:var:: rpc SystemSharedMemoryRegister(
// @@ SystemSharedMemoryRegisterRequest)
// @@ returns (SystemSharedMemoryRegisterResponse)
// @@
// @@ Register a system-shared-memory region.
// @@
SystemSharedMemoryRegister(ctx context.Context, in *SystemSharedMemoryRegisterRequest, opts ...grpc.CallOption) (*SystemSharedMemoryRegisterResponse, error)
// @@ .. cpp:var:: rpc SystemSharedMemoryUnregister(
// @@ SystemSharedMemoryUnregisterRequest)
// @@ returns (SystemSharedMemoryUnregisterResponse)
// @@
// @@ Unregister a system-shared-memory region.
// @@
SystemSharedMemoryUnregister(ctx context.Context, in *SystemSharedMemoryUnregisterRequest, opts ...grpc.CallOption) (*SystemSharedMemoryUnregisterResponse, error)
// @@ .. cpp:var:: rpc CudaSharedMemoryStatus(
// @@ CudaSharedMemoryStatusRequest)
// @@ returns (CudaSharedMemoryStatusRespose)
// @@
// @@ Get the status of all registered CUDA-shared-memory regions.
// @@
CudaSharedMemoryStatus(ctx context.Context, in *CudaSharedMemoryStatusRequest, opts ...grpc.CallOption) (*CudaSharedMemoryStatusResponse, error)
// @@ .. cpp:var:: rpc CudaSharedMemoryRegister(
// @@ CudaSharedMemoryRegisterRequest)
// @@ returns (CudaSharedMemoryRegisterResponse)
// @@
// @@ Register a CUDA-shared-memory region.
// @@
CudaSharedMemoryRegister(ctx context.Context, in *CudaSharedMemoryRegisterRequest, opts ...grpc.CallOption) (*CudaSharedMemoryRegisterResponse, error)
// @@ .. cpp:var:: rpc CudaSharedMemoryUnregister(
// @@ CudaSharedMemoryUnregisterRequest)
// @@ returns (CudaSharedMemoryUnregisterResponse)
// @@
// @@ Unregister a CUDA-shared-memory region.
// @@
CudaSharedMemoryUnregister(ctx context.Context, in *CudaSharedMemoryUnregisterRequest, opts ...grpc.CallOption) (*CudaSharedMemoryUnregisterResponse, error)
// @@ .. cpp:var:: rpc TraceSetting(TraceSettingRequest)
// @@ returns (TraceSettingResponse)
// @@
// @@ Update and get the trace setting of the Triton server.
// @@
TraceSetting(ctx context.Context, in *TraceSettingRequest, opts ...grpc.CallOption) (*TraceSettingResponse, error)
// @@ .. cpp:var:: rpc LogSettings(LogSettingsRequest)
// @@ returns (LogSettingsResponse)
// @@
// @@ Update and get the log settings of the Triton server.
// @@
LogSettings(ctx context.Context, in *LogSettingsRequest, opts ...grpc.CallOption) (*LogSettingsResponse, error)
}
type gRPCInferenceServiceClient struct {
cc grpc.ClientConnInterface
}
func NewGRPCInferenceServiceClient(cc grpc.ClientConnInterface) GRPCInferenceServiceClient {
return &gRPCInferenceServiceClient{cc}
}
func (c *gRPCInferenceServiceClient) ServerLive(ctx context.Context, in *ServerLiveRequest, opts ...grpc.CallOption) (*ServerLiveResponse, error) {
out := new(ServerLiveResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ServerLive", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) ServerReady(ctx context.Context, in *ServerReadyRequest, opts ...grpc.CallOption) (*ServerReadyResponse, error) {
out := new(ServerReadyResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ServerReady", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) ModelReady(ctx context.Context, in *ModelReadyRequest, opts ...grpc.CallOption) (*ModelReadyResponse, error) {
out := new(ModelReadyResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ModelReady", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) ServerMetadata(ctx context.Context, in *ServerMetadataRequest, opts ...grpc.CallOption) (*ServerMetadataResponse, error) {
out := new(ServerMetadataResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ServerMetadata", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) ModelMetadata(ctx context.Context, in *ModelMetadataRequest, opts ...grpc.CallOption) (*ModelMetadataResponse, error) {
out := new(ModelMetadataResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ModelMetadata", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) ModelInfer(ctx context.Context, in *ModelInferRequest, opts ...grpc.CallOption) (*ModelInferResponse, error) {
out := new(ModelInferResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ModelInfer", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) ModelStreamInfer(ctx context.Context, opts ...grpc.CallOption) (GRPCInferenceService_ModelStreamInferClient, error) {
stream, err := c.cc.NewStream(ctx, &GRPCInferenceService_ServiceDesc.Streams[0], "/inference.v1.GRPCInferenceService/ModelStreamInfer", opts...)
if err != nil {
return nil, err
}
x := &gRPCInferenceServiceModelStreamInferClient{stream}
return x, nil
}
type GRPCInferenceService_ModelStreamInferClient interface {
Send(*ModelInferRequest) error
Recv() (*ModelStreamInferResponse, error)
grpc.ClientStream
}
type gRPCInferenceServiceModelStreamInferClient struct {
grpc.ClientStream
}
func (x *gRPCInferenceServiceModelStreamInferClient) Send(m *ModelInferRequest) error {
return x.ClientStream.SendMsg(m)
}
func (x *gRPCInferenceServiceModelStreamInferClient) Recv() (*ModelStreamInferResponse, error) {
m := new(ModelStreamInferResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *gRPCInferenceServiceClient) ModelConfig(ctx context.Context, in *ModelConfigRequest, opts ...grpc.CallOption) (*ModelConfigResponse, error) {
out := new(ModelConfigResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ModelConfig", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) ModelStatistics(ctx context.Context, in *ModelStatisticsRequest, opts ...grpc.CallOption) (*ModelStatisticsResponse, error) {
out := new(ModelStatisticsResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/ModelStatistics", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) RepositoryIndex(ctx context.Context, in *RepositoryIndexRequest, opts ...grpc.CallOption) (*RepositoryIndexResponse, error) {
out := new(RepositoryIndexResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/RepositoryIndex", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) RepositoryModelLoad(ctx context.Context, in *RepositoryModelLoadRequest, opts ...grpc.CallOption) (*RepositoryModelLoadResponse, error) {
out := new(RepositoryModelLoadResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/RepositoryModelLoad", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) RepositoryModelUnload(ctx context.Context, in *RepositoryModelUnloadRequest, opts ...grpc.CallOption) (*RepositoryModelUnloadResponse, error) {
out := new(RepositoryModelUnloadResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/RepositoryModelUnload", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) SystemSharedMemoryStatus(ctx context.Context, in *SystemSharedMemoryStatusRequest, opts ...grpc.CallOption) (*SystemSharedMemoryStatusResponse, error) {
out := new(SystemSharedMemoryStatusResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/SystemSharedMemoryStatus", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) SystemSharedMemoryRegister(ctx context.Context, in *SystemSharedMemoryRegisterRequest, opts ...grpc.CallOption) (*SystemSharedMemoryRegisterResponse, error) {
out := new(SystemSharedMemoryRegisterResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/SystemSharedMemoryRegister", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) SystemSharedMemoryUnregister(ctx context.Context, in *SystemSharedMemoryUnregisterRequest, opts ...grpc.CallOption) (*SystemSharedMemoryUnregisterResponse, error) {
out := new(SystemSharedMemoryUnregisterResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/SystemSharedMemoryUnregister", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) CudaSharedMemoryStatus(ctx context.Context, in *CudaSharedMemoryStatusRequest, opts ...grpc.CallOption) (*CudaSharedMemoryStatusResponse, error) {
out := new(CudaSharedMemoryStatusResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/CudaSharedMemoryStatus", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) CudaSharedMemoryRegister(ctx context.Context, in *CudaSharedMemoryRegisterRequest, opts ...grpc.CallOption) (*CudaSharedMemoryRegisterResponse, error) {
out := new(CudaSharedMemoryRegisterResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/CudaSharedMemoryRegister", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) CudaSharedMemoryUnregister(ctx context.Context, in *CudaSharedMemoryUnregisterRequest, opts ...grpc.CallOption) (*CudaSharedMemoryUnregisterResponse, error) {
out := new(CudaSharedMemoryUnregisterResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/CudaSharedMemoryUnregister", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) TraceSetting(ctx context.Context, in *TraceSettingRequest, opts ...grpc.CallOption) (*TraceSettingResponse, error) {
out := new(TraceSettingResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/TraceSetting", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *gRPCInferenceServiceClient) LogSettings(ctx context.Context, in *LogSettingsRequest, opts ...grpc.CallOption) (*LogSettingsResponse, error) {
out := new(LogSettingsResponse)
err := c.cc.Invoke(ctx, "/inference.v1.GRPCInferenceService/LogSettings", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// GRPCInferenceServiceServer is the server API for GRPCInferenceService service.
// All implementations should embed UnimplementedGRPCInferenceServiceServer
// for forward compatibility
type GRPCInferenceServiceServer interface {
// @@ .. cpp:var:: rpc ServerLive(ServerLiveRequest) returns
// @@ (ServerLiveResponse)
// @@
// @@ Check liveness of the inference server.
// @@
ServerLive(context.Context, *ServerLiveRequest) (*ServerLiveResponse, error)
// @@ .. cpp:var:: rpc ServerReady(ServerReadyRequest) returns
// @@ (ServerReadyResponse)
// @@
// @@ Check readiness of the inference server.
// @@
ServerReady(context.Context, *ServerReadyRequest) (*ServerReadyResponse, error)
// @@ .. cpp:var:: rpc ModelReady(ModelReadyRequest) returns
// @@ (ModelReadyResponse)
// @@
// @@ Check readiness of a model in the inference server.
// @@
ModelReady(context.Context, *ModelReadyRequest) (*ModelReadyResponse, error)
// @@ .. cpp:var:: rpc ServerMetadata(ServerMetadataRequest) returns
// @@ (ServerMetadataResponse)
// @@
// @@ Get server metadata.
// @@
ServerMetadata(context.Context, *ServerMetadataRequest) (*ServerMetadataResponse, error)
// @@ .. cpp:var:: rpc ModelMetadata(ModelMetadataRequest) returns
// @@ (ModelMetadataResponse)
// @@
// @@ Get model metadata.
// @@
ModelMetadata(context.Context, *ModelMetadataRequest) (*ModelMetadataResponse, error)
// @@ .. cpp:var:: rpc ModelInfer(ModelInferRequest) returns
// @@ (ModelInferResponse)
// @@
// @@ Perform inference using a specific model.
// @@
ModelInfer(context.Context, *ModelInferRequest) (*ModelInferResponse, error)
// @@ .. cpp:var:: rpc ModelStreamInfer(stream ModelInferRequest) returns
// @@ (stream ModelStreamInferResponse)
// @@
// @@ Perform streaming inference.
// @@
ModelStreamInfer(GRPCInferenceService_ModelStreamInferServer) error
// @@ .. cpp:var:: rpc ModelConfig(ModelConfigRequest) returns
// @@ (ModelConfigResponse)
// @@
// @@ Get model configuration.
// @@
ModelConfig(context.Context, *ModelConfigRequest) (*ModelConfigResponse, error)
// @@ .. cpp:var:: rpc ModelStatistics(
// @@ ModelStatisticsRequest)
// @@ returns (ModelStatisticsResponse)
// @@
// @@ Get the cumulative inference statistics for a model.
// @@
ModelStatistics(context.Context, *ModelStatisticsRequest) (*ModelStatisticsResponse, error)
// @@ .. cpp:var:: rpc RepositoryIndex(RepositoryIndexRequest) returns
// @@ (RepositoryIndexResponse)
// @@
// @@ Get the index of model repository contents.
// @@
RepositoryIndex(context.Context, *RepositoryIndexRequest) (*RepositoryIndexResponse, error)
// @@ .. cpp:var:: rpc RepositoryModelLoad(RepositoryModelLoadRequest) returns
// @@ (RepositoryModelLoadResponse)
// @@
// @@ Load or reload a model from a repository.
// @@
RepositoryModelLoad(context.Context, *RepositoryModelLoadRequest) (*RepositoryModelLoadResponse, error)
// @@ .. cpp:var:: rpc RepositoryModelUnload(RepositoryModelUnloadRequest)
// @@ returns (RepositoryModelUnloadResponse)
// @@
// @@ Unload a model.
// @@
RepositoryModelUnload(context.Context, *RepositoryModelUnloadRequest) (*RepositoryModelUnloadResponse, error)
// @@ .. cpp:var:: rpc SystemSharedMemoryStatus(
// @@ SystemSharedMemoryStatusRequest)
// @@ returns (SystemSharedMemoryStatusRespose)
// @@
// @@ Get the status of all registered system-shared-memory regions.
// @@
SystemSharedMemoryStatus(context.Context, *SystemSharedMemoryStatusRequest) (*SystemSharedMemoryStatusResponse, error)
// @@ .. cpp:var:: rpc SystemSharedMemoryRegister(
// @@ SystemSharedMemoryRegisterRequest)
// @@ returns (SystemSharedMemoryRegisterResponse)
// @@
// @@ Register a system-shared-memory region.
// @@
SystemSharedMemoryRegister(context.Context, *SystemSharedMemoryRegisterRequest) (*SystemSharedMemoryRegisterResponse, error)
// @@ .. cpp:var:: rpc SystemSharedMemoryUnregister(
// @@ SystemSharedMemoryUnregisterRequest)
// @@ returns (SystemSharedMemoryUnregisterResponse)
// @@
// @@ Unregister a system-shared-memory region.
// @@
SystemSharedMemoryUnregister(context.Context, *SystemSharedMemoryUnregisterRequest) (*SystemSharedMemoryUnregisterResponse, error)
// @@ .. cpp:var:: rpc CudaSharedMemoryStatus(
// @@ CudaSharedMemoryStatusRequest)
// @@ returns (CudaSharedMemoryStatusRespose)
// @@
// @@ Get the status of all registered CUDA-shared-memory regions.
// @@
CudaSharedMemoryStatus(context.Context, *CudaSharedMemoryStatusRequest) (*CudaSharedMemoryStatusResponse, error)
// @@ .. cpp:var:: rpc CudaSharedMemoryRegister(
// @@ CudaSharedMemoryRegisterRequest)
// @@ returns (CudaSharedMemoryRegisterResponse)
// @@
// @@ Register a CUDA-shared-memory region.
// @@
CudaSharedMemoryRegister(context.Context, *CudaSharedMemoryRegisterRequest) (*CudaSharedMemoryRegisterResponse, error)
// @@ .. cpp:var:: rpc CudaSharedMemoryUnregister(
// @@ CudaSharedMemoryUnregisterRequest)
// @@ returns (CudaSharedMemoryUnregisterResponse)
// @@
// @@ Unregister a CUDA-shared-memory region.
// @@
CudaSharedMemoryUnregister(context.Context, *CudaSharedMemoryUnregisterRequest) (*CudaSharedMemoryUnregisterResponse, error)
// @@ .. cpp:var:: rpc TraceSetting(TraceSettingRequest)
// @@ returns (TraceSettingResponse)
// @@
// @@ Update and get the trace setting of the Triton server.
// @@
TraceSetting(context.Context, *TraceSettingRequest) (*TraceSettingResponse, error)
// @@ .. cpp:var:: rpc LogSettings(LogSettingsRequest)
// @@ returns (LogSettingsResponse)
// @@
// @@ Update and get the log settings of the Triton server.
// @@
LogSettings(context.Context, *LogSettingsRequest) (*LogSettingsResponse, error)
}
// UnimplementedGRPCInferenceServiceServer should be embedded to have forward compatible implementations.
type UnimplementedGRPCInferenceServiceServer struct {
}
func (UnimplementedGRPCInferenceServiceServer) ServerLive(context.Context, *ServerLiveRequest) (*ServerLiveResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ServerLive not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ServerReady(context.Context, *ServerReadyRequest) (*ServerReadyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ServerReady not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ModelReady(context.Context, *ModelReadyRequest) (*ModelReadyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ModelReady not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ServerMetadata(context.Context, *ServerMetadataRequest) (*ServerMetadataResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ServerMetadata not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ModelMetadata(context.Context, *ModelMetadataRequest) (*ModelMetadataResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ModelMetadata not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ModelInfer(context.Context, *ModelInferRequest) (*ModelInferResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ModelInfer not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ModelStreamInfer(GRPCInferenceService_ModelStreamInferServer) error {
return status.Errorf(codes.Unimplemented, "method ModelStreamInfer not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ModelConfig(context.Context, *ModelConfigRequest) (*ModelConfigResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ModelConfig not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) ModelStatistics(context.Context, *ModelStatisticsRequest) (*ModelStatisticsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ModelStatistics not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) RepositoryIndex(context.Context, *RepositoryIndexRequest) (*RepositoryIndexResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RepositoryIndex not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) RepositoryModelLoad(context.Context, *RepositoryModelLoadRequest) (*RepositoryModelLoadResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RepositoryModelLoad not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) RepositoryModelUnload(context.Context, *RepositoryModelUnloadRequest) (*RepositoryModelUnloadResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RepositoryModelUnload not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) SystemSharedMemoryStatus(context.Context, *SystemSharedMemoryStatusRequest) (*SystemSharedMemoryStatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SystemSharedMemoryStatus not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) SystemSharedMemoryRegister(context.Context, *SystemSharedMemoryRegisterRequest) (*SystemSharedMemoryRegisterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SystemSharedMemoryRegister not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) SystemSharedMemoryUnregister(context.Context, *SystemSharedMemoryUnregisterRequest) (*SystemSharedMemoryUnregisterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method SystemSharedMemoryUnregister not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) CudaSharedMemoryStatus(context.Context, *CudaSharedMemoryStatusRequest) (*CudaSharedMemoryStatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CudaSharedMemoryStatus not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) CudaSharedMemoryRegister(context.Context, *CudaSharedMemoryRegisterRequest) (*CudaSharedMemoryRegisterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CudaSharedMemoryRegister not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) CudaSharedMemoryUnregister(context.Context, *CudaSharedMemoryUnregisterRequest) (*CudaSharedMemoryUnregisterResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CudaSharedMemoryUnregister not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) TraceSetting(context.Context, *TraceSettingRequest) (*TraceSettingResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method TraceSetting not implemented")
}
func (UnimplementedGRPCInferenceServiceServer) LogSettings(context.Context, *LogSettingsRequest) (*LogSettingsResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method LogSettings not implemented")
}
// UnsafeGRPCInferenceServiceServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to GRPCInferenceServiceServer will
// result in compilation errors.
type UnsafeGRPCInferenceServiceServer interface {
mustEmbedUnimplementedGRPCInferenceServiceServer()
}
func RegisterGRPCInferenceServiceServer(s grpc.ServiceRegistrar, srv GRPCInferenceServiceServer) {
s.RegisterService(&GRPCInferenceService_ServiceDesc, srv)
}
func _GRPCInferenceService_ServerLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ServerLiveRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ServerLive(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ServerLive",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ServerLive(ctx, req.(*ServerLiveRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_ServerReady_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ServerReadyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ServerReady(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ServerReady",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ServerReady(ctx, req.(*ServerReadyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_ModelReady_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModelReadyRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ModelReady(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ModelReady",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ModelReady(ctx, req.(*ModelReadyRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_ServerMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ServerMetadataRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ServerMetadata(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ServerMetadata",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ServerMetadata(ctx, req.(*ServerMetadataRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_ModelMetadata_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModelMetadataRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ModelMetadata(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ModelMetadata",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ModelMetadata(ctx, req.(*ModelMetadataRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_ModelInfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModelInferRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ModelInfer(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ModelInfer",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ModelInfer(ctx, req.(*ModelInferRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_ModelStreamInfer_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(GRPCInferenceServiceServer).ModelStreamInfer(&gRPCInferenceServiceModelStreamInferServer{stream})
}
type GRPCInferenceService_ModelStreamInferServer interface {
Send(*ModelStreamInferResponse) error
Recv() (*ModelInferRequest, error)
grpc.ServerStream
}
type gRPCInferenceServiceModelStreamInferServer struct {
grpc.ServerStream
}
func (x *gRPCInferenceServiceModelStreamInferServer) Send(m *ModelStreamInferResponse) error {
return x.ServerStream.SendMsg(m)
}
func (x *gRPCInferenceServiceModelStreamInferServer) Recv() (*ModelInferRequest, error) {
m := new(ModelInferRequest)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _GRPCInferenceService_ModelConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModelConfigRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ModelConfig(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ModelConfig",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ModelConfig(ctx, req.(*ModelConfigRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_ModelStatistics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ModelStatisticsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).ModelStatistics(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/ModelStatistics",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).ModelStatistics(ctx, req.(*ModelStatisticsRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_RepositoryIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RepositoryIndexRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).RepositoryIndex(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/RepositoryIndex",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).RepositoryIndex(ctx, req.(*RepositoryIndexRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_RepositoryModelLoad_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RepositoryModelLoadRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).RepositoryModelLoad(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/RepositoryModelLoad",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).RepositoryModelLoad(ctx, req.(*RepositoryModelLoadRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_RepositoryModelUnload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RepositoryModelUnloadRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).RepositoryModelUnload(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/RepositoryModelUnload",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).RepositoryModelUnload(ctx, req.(*RepositoryModelUnloadRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_SystemSharedMemoryStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SystemSharedMemoryStatusRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).SystemSharedMemoryStatus(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/SystemSharedMemoryStatus",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).SystemSharedMemoryStatus(ctx, req.(*SystemSharedMemoryStatusRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_SystemSharedMemoryRegister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SystemSharedMemoryRegisterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).SystemSharedMemoryRegister(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/SystemSharedMemoryRegister",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).SystemSharedMemoryRegister(ctx, req.(*SystemSharedMemoryRegisterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_SystemSharedMemoryUnregister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(SystemSharedMemoryUnregisterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).SystemSharedMemoryUnregister(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/SystemSharedMemoryUnregister",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).SystemSharedMemoryUnregister(ctx, req.(*SystemSharedMemoryUnregisterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_CudaSharedMemoryStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CudaSharedMemoryStatusRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).CudaSharedMemoryStatus(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/CudaSharedMemoryStatus",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).CudaSharedMemoryStatus(ctx, req.(*CudaSharedMemoryStatusRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_CudaSharedMemoryRegister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CudaSharedMemoryRegisterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).CudaSharedMemoryRegister(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/CudaSharedMemoryRegister",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).CudaSharedMemoryRegister(ctx, req.(*CudaSharedMemoryRegisterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_CudaSharedMemoryUnregister_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CudaSharedMemoryUnregisterRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).CudaSharedMemoryUnregister(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/CudaSharedMemoryUnregister",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).CudaSharedMemoryUnregister(ctx, req.(*CudaSharedMemoryUnregisterRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_TraceSetting_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(TraceSettingRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).TraceSetting(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/TraceSetting",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).TraceSetting(ctx, req.(*TraceSettingRequest))
}
return interceptor(ctx, in, info, handler)
}
func _GRPCInferenceService_LogSettings_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(LogSettingsRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(GRPCInferenceServiceServer).LogSettings(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/inference.v1.GRPCInferenceService/LogSettings",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(GRPCInferenceServiceServer).LogSettings(ctx, req.(*LogSettingsRequest))
}
return interceptor(ctx, in, info, handler)
}
// GRPCInferenceService_ServiceDesc is the grpc.ServiceDesc for GRPCInferenceService service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var GRPCInferenceService_ServiceDesc = grpc.ServiceDesc{
ServiceName: "inference.v1.GRPCInferenceService",
HandlerType: (*GRPCInferenceServiceServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "ServerLive",
Handler: _GRPCInferenceService_ServerLive_Handler,
},
{
MethodName: "ServerReady",
Handler: _GRPCInferenceService_ServerReady_Handler,
},
{
MethodName: "ModelReady",
Handler: _GRPCInferenceService_ModelReady_Handler,
},
{
MethodName: "ServerMetadata",
Handler: _GRPCInferenceService_ServerMetadata_Handler,
},
{
MethodName: "ModelMetadata",
Handler: _GRPCInferenceService_ModelMetadata_Handler,
},
{
MethodName: "ModelInfer",
Handler: _GRPCInferenceService_ModelInfer_Handler,
},
{
MethodName: "ModelConfig",
Handler: _GRPCInferenceService_ModelConfig_Handler,
},
{
MethodName: "ModelStatistics",
Handler: _GRPCInferenceService_ModelStatistics_Handler,
},
{
MethodName: "RepositoryIndex",
Handler: _GRPCInferenceService_RepositoryIndex_Handler,
},
{
MethodName: "RepositoryModelLoad",
Handler: _GRPCInferenceService_RepositoryModelLoad_Handler,
},
{
MethodName: "RepositoryModelUnload",
Handler: _GRPCInferenceService_RepositoryModelUnload_Handler,
},
{
MethodName: "SystemSharedMemoryStatus",
Handler: _GRPCInferenceService_SystemSharedMemoryStatus_Handler,
},
{
MethodName: "SystemSharedMemoryRegister",
Handler: _GRPCInferenceService_SystemSharedMemoryRegister_Handler,
},
{
MethodName: "SystemSharedMemoryUnregister",
Handler: _GRPCInferenceService_SystemSharedMemoryUnregister_Handler,
},
{
MethodName: "CudaSharedMemoryStatus",
Handler: _GRPCInferenceService_CudaSharedMemoryStatus_Handler,
},
{
MethodName: "CudaSharedMemoryRegister",
Handler: _GRPCInferenceService_CudaSharedMemoryRegister_Handler,
},
{
MethodName: "CudaSharedMemoryUnregister",
Handler: _GRPCInferenceService_CudaSharedMemoryUnregister_Handler,
},
{
MethodName: "TraceSetting",
Handler: _GRPCInferenceService_TraceSetting_Handler,
},
{
MethodName: "LogSettings",
Handler: _GRPCInferenceService_LogSettings_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ModelStreamInfer",
Handler: _GRPCInferenceService_ModelStreamInfer_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "pkg/apis/inference/v1/grpc_service.proto",
}