mirror of
https://github.com/OneUptime/oneuptime.git
synced 2026-01-11 19:56:44 +00:00
feat: Add MCP service configuration and related environment variables
This commit is contained in:
parent
73d2cab46e
commit
83107857bd
9 changed files with 256 additions and 4 deletions
|
|
@ -131,6 +131,8 @@ Usage:
|
|||
value: {{ $.Release.Name }}-admin-dashboard.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
|
||||
- name: SERVER_DOCS_HOSTNAME
|
||||
value: {{ $.Release.Name }}-docs.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
|
||||
- name: SERVER_MCP_HOSTNAME
|
||||
value: {{ $.Release.Name }}-mcp.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
|
||||
|
||||
- name: APP_PORT
|
||||
value: {{ $.Values.app.ports.http | squote }}
|
||||
|
|
@ -164,6 +166,8 @@ Usage:
|
|||
value: {{ $.Values.apiReference.ports.http | squote }}
|
||||
- name: DOCS_PORT
|
||||
value: {{ $.Values.docs.ports.http | squote }}
|
||||
- name: MCP_PORT
|
||||
value: {{ $.Values.mcp.ports.http | squote }}
|
||||
{{- end }}
|
||||
|
||||
|
||||
|
|
|
|||
19
HelmChart/Public/oneuptime/templates/mcp.yaml
Normal file
19
HelmChart/Public/oneuptime/templates/mcp.yaml
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
{{- if $.Values.mcp.enabled }}
|
||||
# OneUptime MCP Deployment
|
||||
{{- $mcpEnv := dict "PORT" $.Values.mcp.ports.http "DISABLE_TELEMETRY" $.Values.mcp.disableTelemetryCollection -}}
|
||||
{{- $mcpPorts := $.Values.mcp.ports -}}
|
||||
{{- $mcpDeploymentArgs := dict "ServiceName" "mcp" "Ports" $mcpPorts "Release" $.Release "Values" $.Values "Env" $mcpEnv "Resources" $.Values.mcp.resources "NodeSelector" $.Values.mcp.nodeSelector "PodSecurityContext" $.Values.mcp.podSecurityContext "ContainerSecurityContext" $.Values.mcp.containerSecurityContext "DisableAutoscaler" $.Values.mcp.disableAutoscaler "ReplicaCount" $.Values.mcp.replicaCount -}}
|
||||
{{- include "oneuptime.deployment" $mcpDeploymentArgs }}
|
||||
---
|
||||
|
||||
# OneUptime MCP Service
|
||||
{{- $mcpPorts := $.Values.mcp.ports -}}
|
||||
{{- $mcpServiceArgs := dict "ServiceName" "mcp" "Ports" $mcpPorts "Release" $.Release "Values" $.Values -}}
|
||||
{{- include "oneuptime.service" $mcpServiceArgs }}
|
||||
---
|
||||
|
||||
# OneUptime MCP autoscaler
|
||||
{{- $mcpAutoScalerArgs := dict "ServiceName" "mcp" "Release" $.Release "Values" $.Values "DisableAutoscaler" $.Values.mcp.disableAutoscaler -}}
|
||||
{{- include "oneuptime.autoscaler" $mcpAutoScalerArgs }}
|
||||
---
|
||||
{{- end }}
|
||||
|
|
@ -1921,6 +1921,45 @@
|
|||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"mcp": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"enabled": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"replicaCount": {
|
||||
"type": "integer"
|
||||
},
|
||||
"disableTelemetryCollection": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"disableAutoscaler": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"ports": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"http": {
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"resources": {
|
||||
"type": ["object", "null"]
|
||||
},
|
||||
"nodeSelector": {
|
||||
"type": "object"
|
||||
},
|
||||
"podSecurityContext": {
|
||||
"type": "object"
|
||||
},
|
||||
"containerSecurityContext": {
|
||||
"type": "object"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false
|
||||
},
|
||||
"serverMonitorIngest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
|
|
|
|||
|
|
@ -765,6 +765,18 @@ isolatedVM:
|
|||
podSecurityContext: {}
|
||||
containerSecurityContext: {}
|
||||
|
||||
mcp:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
disableTelemetryCollection: false
|
||||
disableAutoscaler: false
|
||||
ports:
|
||||
http: 3405
|
||||
resources:
|
||||
nodeSelector: {}
|
||||
podSecurityContext: {}
|
||||
containerSecurityContext: {}
|
||||
|
||||
serverMonitorIngest:
|
||||
enabled: true
|
||||
replicaCount: 1
|
||||
|
|
|
|||
125
MCP/Index.ts
125
MCP/Index.ts
|
|
@ -2,6 +2,7 @@
|
|||
|
||||
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
|
||||
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
||||
import { SSEServerTransport } from "@modelcontextprotocol/sdk/server/sse.js";
|
||||
import {
|
||||
CallToolRequestSchema,
|
||||
ErrorCode,
|
||||
|
|
@ -9,6 +10,13 @@ import {
|
|||
McpError,
|
||||
} from "@modelcontextprotocol/sdk/types.js";
|
||||
import dotenv from "dotenv";
|
||||
import Express, {
|
||||
ExpressApplication,
|
||||
ExpressRequest,
|
||||
ExpressResponse,
|
||||
NextFunction,
|
||||
ExpressJson,
|
||||
} from "Common/Server/Utils/Express";
|
||||
import DynamicToolGenerator from "./Utils/DynamicToolGenerator";
|
||||
import OneUptimeApiService, {
|
||||
OneUptimeApiConfig,
|
||||
|
|
@ -16,6 +24,7 @@ import OneUptimeApiService, {
|
|||
import { McpToolInfo, OneUptimeToolCallArgs } from "./Types/McpTypes";
|
||||
import OneUptimeOperation from "./Types/OneUptimeOperation";
|
||||
import MCPLogger from "./Utils/MCPLogger";
|
||||
import http from "http";
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
|
|
@ -214,10 +223,22 @@ class OneUptimeMCPServer {
|
|||
}
|
||||
|
||||
public async run(): Promise<void> {
|
||||
const port: string | undefined = process.env["PORT"];
|
||||
|
||||
if (port) {
|
||||
// HTTP mode - run as web server with SSE transport
|
||||
await this.runHttpServer(parseInt(port, 10));
|
||||
} else {
|
||||
// Stdio mode - for CLI usage
|
||||
await this.runStdioServer();
|
||||
}
|
||||
}
|
||||
|
||||
private async runStdioServer(): Promise<void> {
|
||||
const transport: StdioServerTransport = new StdioServerTransport();
|
||||
await this.server.connect(transport);
|
||||
|
||||
MCPLogger.info("OneUptime MCP Server is running!");
|
||||
MCPLogger.info("OneUptime MCP Server is running in stdio mode!");
|
||||
MCPLogger.info(`Available tools: ${this.tools.length} total`);
|
||||
|
||||
// Log some example tools
|
||||
|
|
@ -228,6 +249,108 @@ class OneUptimeMCPServer {
|
|||
});
|
||||
MCPLogger.info(`Example tools: ${exampleTools.join(", ")}`);
|
||||
}
|
||||
|
||||
private async runHttpServer(port: number): Promise<void> {
|
||||
Express.setupExpress();
|
||||
const app: ExpressApplication = Express.getExpressApp();
|
||||
|
||||
// Store active SSE transports
|
||||
const transports: Map<string, SSEServerTransport> = new Map();
|
||||
|
||||
// Health check endpoint
|
||||
app.get("/health", (_req: ExpressRequest, res: ExpressResponse) => {
|
||||
res.json({
|
||||
status: "healthy",
|
||||
service: "oneuptime-mcp",
|
||||
tools: this.tools.length,
|
||||
});
|
||||
});
|
||||
|
||||
// SSE endpoint for MCP connections
|
||||
app.get("/sse", async (req: ExpressRequest, res: ExpressResponse) => {
|
||||
MCPLogger.info("New SSE connection established");
|
||||
|
||||
// Set SSE headers
|
||||
res.setHeader("Content-Type", "text/event-stream");
|
||||
res.setHeader("Cache-Control", "no-cache");
|
||||
res.setHeader("Connection", "keep-alive");
|
||||
res.setHeader("Access-Control-Allow-Origin", "*");
|
||||
|
||||
// Create SSE transport
|
||||
const transport: SSEServerTransport = new SSEServerTransport(
|
||||
"/message",
|
||||
res,
|
||||
);
|
||||
|
||||
// Store transport with session ID
|
||||
const sessionId: string = `session-${Date.now()}-${Math.random().toString(36).substring(7)}`;
|
||||
transports.set(sessionId, transport);
|
||||
|
||||
// Handle connection close
|
||||
req.on("close", () => {
|
||||
MCPLogger.info(`SSE connection closed: ${sessionId}`);
|
||||
transports.delete(sessionId);
|
||||
});
|
||||
|
||||
// Connect server to transport
|
||||
await this.server.connect(transport);
|
||||
});
|
||||
|
||||
// Message endpoint for client-to-server messages
|
||||
app.post(
|
||||
"/message",
|
||||
ExpressJson(),
|
||||
async (req: ExpressRequest, res: ExpressResponse, next: NextFunction) => {
|
||||
try {
|
||||
// Find the transport for this session
|
||||
// In a real implementation, you'd use session management
|
||||
const transport: SSEServerTransport | undefined = Array.from(
|
||||
transports.values(),
|
||||
)[0];
|
||||
if (transport) {
|
||||
await transport.handlePostMessage(req, res);
|
||||
} else {
|
||||
res.status(400).json({ error: "No active SSE connection" });
|
||||
}
|
||||
} catch (error) {
|
||||
next(error);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
// List tools endpoint (REST API)
|
||||
app.get("/tools", (_req: ExpressRequest, res: ExpressResponse) => {
|
||||
const toolsList: Array<{
|
||||
name: string;
|
||||
description: string;
|
||||
}> = this.tools.map((tool: McpToolInfo) => {
|
||||
return {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
};
|
||||
});
|
||||
res.json({ tools: toolsList, count: toolsList.length });
|
||||
});
|
||||
|
||||
// Create HTTP server
|
||||
const httpServer: http.Server = http.createServer(app);
|
||||
|
||||
httpServer.listen(port, () => {
|
||||
MCPLogger.info(`OneUptime MCP Server is running in HTTP mode on port ${port}`);
|
||||
MCPLogger.info(`Available tools: ${this.tools.length} total`);
|
||||
MCPLogger.info(`Health check: http://localhost:${port}/health`);
|
||||
MCPLogger.info(`Tools list: http://localhost:${port}/tools`);
|
||||
MCPLogger.info(`SSE endpoint: http://localhost:${port}/sse`);
|
||||
|
||||
// Log some example tools
|
||||
const exampleTools: string[] = this.tools
|
||||
.slice(0, 5)
|
||||
.map((t: McpToolInfo) => {
|
||||
return t.name;
|
||||
});
|
||||
MCPLogger.info(`Example tools: ${exampleTools.join(", ")}`);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Start the server
|
||||
|
|
|
|||
|
|
@ -72,6 +72,10 @@ upstream opentelemetry-collector-grpc {
|
|||
server ${SERVER_OTEL_COLLECTOR_HOSTNAME}:4317;
|
||||
}
|
||||
|
||||
upstream mcp {
|
||||
server ${SERVER_MCP_HOSTNAME}:${MCP_PORT} weight=10 max_fails=3 fail_timeout=30s;
|
||||
}
|
||||
|
||||
# Status Pages
|
||||
|
||||
server {
|
||||
|
|
@ -915,12 +919,12 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
|
|||
}
|
||||
|
||||
location /workers {
|
||||
# This is for nginx not to crash when service is not available.
|
||||
# This is for nginx not to crash when service is not available.
|
||||
resolver 127.0.0.1 valid=30s;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# enable WebSockets (for ws://sockjs not connected error in the accounts source: https://stackoverflow.com/questions/41381444/websocket-connection-failed-error-during-websocket-handshake-unexpected-respon)
|
||||
proxy_http_version 1.1;
|
||||
|
|
@ -928,4 +932,31 @@ ${PROVISION_SSL_CERTIFICATE_KEY_DIRECTIVE}
|
|||
proxy_set_header Connection "upgrade";
|
||||
proxy_pass http://app/api/workers;
|
||||
}
|
||||
|
||||
location /mcp/ {
|
||||
# This is for nginx not to crash when service is not available.
|
||||
resolver 127.0.0.1 valid=30s;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
|
||||
# enable WebSockets and SSE (for MCP Server-Sent Events)
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
# SSE specific settings for long-lived connections
|
||||
proxy_buffering off;
|
||||
proxy_cache off;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 86400s;
|
||||
chunked_transfer_encoding on;
|
||||
|
||||
proxy_pass http://mcp/;
|
||||
}
|
||||
|
||||
location = /mcp {
|
||||
return 301 /mcp/;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -109,6 +109,7 @@ SERVER_OTEL_COLLECTOR_HOSTNAME=otel-collector
|
|||
SERVER_API_REFERENCE_HOSTNAME=reference
|
||||
SERVER_WORKER_HOSTNAME=worker
|
||||
SERVER_DOCS_HOSTNAME=docs
|
||||
SERVER_MCP_HOSTNAME=mcp
|
||||
|
||||
#Ports. Usually they don't need to change.
|
||||
|
||||
|
|
@ -129,6 +130,7 @@ WORKER_PORT=1445
|
|||
WORKFLOW_PORT=3099
|
||||
API_REFERENCE_PORT=1446
|
||||
DOCS_PORT=1447
|
||||
MCP_PORT=3405
|
||||
|
||||
# Plans
|
||||
# This is in the format of PlanName,PlanIdFromBillingProvider,MonthlySubscriptionPlanAmountInUSD,YearlySubscriptionPlanAmountInUSD,Order,TrialPeriodInDays
|
||||
|
|
@ -314,6 +316,7 @@ DISABLE_TELEMETRY_FOR_ISOLATED_VM=true
|
|||
DISABLE_TELEMETRY_FOR_INGRESS=true
|
||||
DISABLE_TELEMETRY_FOR_WORKER=true
|
||||
DISABLE_TELEMETRY_FOR_SERVER_MONITOR_INGEST=true
|
||||
DISABLE_TELEMETRY_FOR_MCP=true
|
||||
|
||||
|
||||
# OPENTELEMETRY_COLLECTOR env vars
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ x-common-variables: &common-variables
|
|||
SERVER_API_REFERENCE_HOSTNAME: api-reference
|
||||
SERVER_DOCS_HOSTNAME: docs
|
||||
SERVER_SERVER_MONITOR_INGEST_HOSTNAME: server-monitor-ingest
|
||||
SERVER_MCP_HOSTNAME: mcp
|
||||
|
||||
#Ports. Usually they don't need to change.
|
||||
APP_PORT: ${APP_PORT}
|
||||
|
|
@ -64,6 +65,7 @@ x-common-variables: &common-variables
|
|||
API_REFERENCE_PORT: ${API_REFERENCE_PORT}
|
||||
DOCS_PORT: ${DOCS_PORT}
|
||||
SERVER_MONITOR_INGEST_PORT: ${SERVER_MONITOR_INGEST_PORT}
|
||||
MCP_PORT: ${MCP_PORT}
|
||||
|
||||
OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT: ${OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT}
|
||||
OPENTELEMETRY_EXPORTER_OTLP_HEADERS: ${OPENTELEMETRY_EXPORTER_OTLP_HEADERS}
|
||||
|
|
@ -508,9 +510,22 @@ services:
|
|||
options:
|
||||
max-size: "1000m"
|
||||
|
||||
mcp:
|
||||
networks:
|
||||
- oneuptime
|
||||
restart: always
|
||||
environment:
|
||||
<<: *common-runtime-variables
|
||||
PORT: ${MCP_PORT}
|
||||
DISABLE_TELEMETRY: ${DISABLE_TELEMETRY_FOR_MCP}
|
||||
logging:
|
||||
driver: "local"
|
||||
options:
|
||||
max-size: "1000m"
|
||||
|
||||
e2e:
|
||||
restart: "no"
|
||||
network_mode: host # This is needed to access the host network,
|
||||
network_mode: host # This is needed to access the host network,
|
||||
environment:
|
||||
<<: *common-variables
|
||||
E2E_TEST_IS_USER_REGISTERED: ${E2E_TEST_IS_USER_REGISTERED}
|
||||
|
|
|
|||
|
|
@ -132,6 +132,12 @@ services:
|
|||
file: ./docker-compose.base.yml
|
||||
service: isolated-vm
|
||||
|
||||
mcp:
|
||||
image: oneuptime/mcp:${APP_TAG}
|
||||
extends:
|
||||
file: ./docker-compose.base.yml
|
||||
service: mcp
|
||||
|
||||
ingress:
|
||||
image: oneuptime/nginx:${APP_TAG}
|
||||
extends:
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue