fix: all examples use test endpoints (deck-api-test, sless-api.kube5s.ru) 2026-03-17

This commit is contained in:
Naeel 2026-03-17 18:53:49 +03:00
parent bc0f688874
commit a768454c08
22 changed files with 666 additions and 12 deletions

View File

@ -0,0 +1,3 @@
# 2026-03-17 00:00
# requirements.txt — зависимости для функции запуска SQL.
psycopg2-binary==2.9.9

View File

@ -0,0 +1,39 @@
# 2026-03-17 00:00
# sql_runner.py — функция для выполнения SQL-операторов из входного события.
import os
import psycopg2
def run_sql(event):
# Выполняет список SQL-операторов в одной транзакции для атомарной инициализации схемы.
# Параметры подключения передаются раздельно, чтобы избежать ошибок парсинга DSN при спецсимволах.
pg_host = os.environ["PGHOST"]
pg_port = os.environ.get("PGPORT", "5432")
pg_database = os.environ["PGDATABASE"]
pg_user = os.environ["PGUSER"]
pg_password = os.environ["PGPASSWORD"]
pg_sslmode = os.environ.get("PGSSLMODE", "require")
statements = event.get("statements", [])
if not statements:
return {"error": "no statements provided"}
connection = psycopg2.connect(
host=pg_host,
port=pg_port,
dbname=pg_database,
user=pg_user,
password=pg_password,
sslmode=pg_sslmode,
)
try:
cursor = connection.cursor()
for statement in statements:
cursor.execute(statement)
connection.commit()
return {"ok": True, "executed": len(statements)}
except Exception as error:
connection.rollback()
return {"error": str(error)}
finally:
connection.close()

73
POSTGRES/luceUNDnode.tf Normal file
View File

@ -0,0 +1,73 @@
# resource "nubes_lucee" "app1" {
# # Lucee-приложение, зависит от Postgres
# resource_name = "lucy_teststand_0"
# # resource_realm = "k8s-3.ext.nubes.ru"
# resource_realm = nubes_postgres.db2.resource_realm
# # resource_realm = "k8s-4-sandbox-nubes-ru"
# domain = "web-test-stand"
# git_path = "https://gitea-naeel.giteak8s.services.ngcloud.ru/naeel/testlucee"
# json_env = jsonencode({
# # 🔗 Настройки Data Source 'testds' для Lucee (Application.cfc)
# testds_class = "org.postgresql.Driver" # 📂 Драйвер БД
# testds_bundleName = "org.postgresql.jdbc" # 📦 Имя бандла JDBC
# testds_bundleVersion = "42.6.0" # 🔢 Версия драйвера
# testds_connectionString = "jdbc:postgresql://${nubes_postgres.db2.state_out_flat["internalConnect.master"]}:5432/postgres?sslmode=require" # 🚀 Строка подключения
# testds_username = nubes_postgres_user.db2_user.username # 👤 Логин
# testds_password = jsondecode(nubes_postgres.db2.vault_secrets["users"])[nubes_postgres_user.db2_user.username]["password"] # 🔑 Пароль
# testds_connectionLimit = "5" # 🚦 Лимит соединений
# testds_liveTimeout = "15" # Таймаут жизни
# testds_validate = "false" # Валидация при запросе
# })
# resource_c_p_u = 300
# resource_memory = 512
# resource_instances = 1
# app_version = "5.4"
# depends_on = [nubes_postgres.db2]
# }
# resource "nubes_nodejs" "app3" {
# # NodeJS демо, работающий с тем же Postgres.
# resource_name = "node_01"
# resource_realm = nubes_postgres.db2.resource_realm
# domain = "node07"
# git_path = "https://gitea-naeel.giteak8s.services.ngcloud.ru/naeel/testnode.git"
# health_path = "/healthz"
# app_version = "23"
# json_env = jsonencode({
# # Переменные подключения к Postgres.
# PGHOST = nubes_postgres.db2.state_out_flat["internalConnect.master"]
# PGPORT = "5432"
# PGUSER = nubes_postgres_user.db2_user.username
# PGPASSWORD = jsondecode(nubes_postgres.db2.vault_secrets["users"])[nubes_postgres_user.db2_user.username]["password"]
# PGDATABASE = nubes_postgres_database.db2_app.db_name
# PGSSLMODE = "require"
# DATABASE_URL = format(
# "postgresql://%s:%s@%s:5432/%s?sslmode=require",
# nubes_postgres_user.db2_user.username,
# jsondecode(nubes_postgres.db2.vault_secrets["users"])[nubes_postgres_user.db2_user.username]["password"],
# nubes_postgres.db2.state_out_flat["internalConnect.master"],
# nubes_postgres_database.db2_app.db_name
# )
# })
# resource_c_p_u = 300
# resource_memory = 256
# resource_instances = 1
# depends_on = [nubes_postgres.db2]
# }
# output "pg_vault_secrets" {
# value = nubes_postgres.db2.vault_secrets
# sensitive = true
# }
# terraform output -json pg_vault_secrets

54
POSTGRES/main.tf Normal file
View File

@ -0,0 +1,54 @@
// 2026-03-17 17:05
// main.tf провайдеры и переменные для Nubes + sless.
terraform {
required_providers {
nubes = {
source = "terra.k8c.ru/nubes/nubes"
version = "5.0.19"
}
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.18"
}
}
}
variable "api_token" {
type = string
sensitive = true
description = "Nubes API token"
}
variable "s3_uid" {
type = string
sensitive = true
description = "Nubes S3 UID"
}
variable "realm" {
type = string
sensitive = true
description = "resource_realm parameter for nubes_postgres resource"
}
variable "pg_user" {
type = string
sensitive = true
description = "PostgreSQL username used by sless SQL runner"
}
variable "pg_password" {
type = string
sensitive = true
description = "PostgreSQL password used by sless SQL runner"
}
provider "nubes" {
api_token = var.api_token
api_endpoint = "https://deck-api-test.ngcloud.ru/api/v1/index.cfm"
}
provider "sless" {
endpoint = "https://sless-api.kube5s.ru"
token = var.api_token
nubes_endpoint = "https://deck-api-test.ngcloud.ru/api/v1"
}

81
POSTGRES/resources.tf Normal file
View File

@ -0,0 +1,81 @@
// 2026-03-17 17:30
// resources.tf ресурсы Postgres и однократный запуск SQL-инициализации через sless_job.
resource "nubes_postgres" "npg" {
resource_name = "teststand-pg-2"
# s3_uid = "s01325"
s3_uid = var.s3_uid
resource_realm = var.realm
resource_instances = 1
resource_memory = 512
resource_c_p_u = 500
resource_disk = "1"
app_version = "17"
json_parameters = jsonencode({
log_connections = "off"
log_disconnections = "off"
})
enable_pg_pooler_master = false
enable_pg_pooler_slave = false
allow_no_s_s_l = false
auto_scale = false
auto_scale_percentage = 10
auto_scale_tech_window = 0
auto_scale_quota_gb = "1"
need_external_address_master = false
# suspend_on_destroy = false
operation_timeout = "11m"
adopt_existing_on_create = true
}
resource "nubes_postgres_user" "pg_user" {
postgres_id = nubes_postgres.npg.id
username = "u-user0"
role = "ddl_user"
adopt_existing_on_create = true
}
resource "nubes_postgres_database" "db" {
postgres_id = nubes_postgres.npg.id
db_name = "db_terra"
db_owner = nubes_postgres_user.pg_user.username
adopt_existing_on_create = true
# suspend_on_destroy = false
}
# Служебная функция выполняет SQL-операторы из event_json.
resource "sless_function" "postgres_sql_runner_create_table" {
name = "pg-create-table-runner"
runtime = "python3.11"
entrypoint = "sql_runner.run_sql"
memory_mb = 128
timeout_sec = 30
env_vars = {
PGHOST = nubes_postgres.npg.state_out_flat["internalConnect.master"]
PGPORT = "5432"
PGDATABASE = nubes_postgres_database.db.db_name
PGUSER = var.pg_user
PGPASSWORD = var.pg_password
PGSSLMODE = "require"
}
source_dir = "${path.module}/code/sql-runner"
}
resource "sless_job" "postgres_table_init_job" {
name = "pg-create-table-job-main-v12"
function = sless_function.postgres_sql_runner_create_table.name
wait_timeout_sec = 180
run_id = 12
event_json = jsonencode({
statements = [
"CREATE TABLE IF NOT EXISTS terraform_demo_table (id serial PRIMARY KEY, title text NOT NULL, created_at timestamp DEFAULT now())"
]
})
depends_on = [
nubes_postgres_database.db
]
}

View File

@ -0,0 +1,40 @@
# 2026-03-17 13:05
# read_pg_user_secret.py — читает пароль пользователя managed PostgreSQL из k8s Secret.
# Используется из Terraform external data source, чтобы apply сам получал актуальный пароль
# даже для уже существующего пользователя, созданного вне текущего state.
import base64
import json
import subprocess
import sys
def main():
# Читаем query от Terraform external provider из stdin.
query = json.load(sys.stdin)
namespace = query["namespace"]
secret_name = query["secret"]
# kubectl уже настроен на удалённой машине; читаем ровно поле data.password.
result = subprocess.run(
[
"kubectl",
"get",
"secret",
"-n",
namespace,
secret_name,
"-o",
"jsonpath={.data.password}",
],
check=True,
capture_output=True,
text=True,
)
password = base64.b64decode(result.stdout.strip()).decode()
json.dump({"password": password}, sys.stdout)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,45 @@
# Изменено: 2026-03-14
# Функция event-cleaner: удаляет N самых старых строк из таблицы events.
# Вызывается через HTTP POST из Node-RED (который слушает RabbitMQ).
# Env: POSTGRES_DSN — строка подключения к PostgreSQL.
import os
import json
import psycopg2
def handle(request):
"""Удаляет N старейших строк из таблицы events."""
dsn = os.environ["POSTGRES_DSN"]
body = {}
if request.get_data():
try:
body = json.loads(request.get_data())
except Exception:
pass
# Количество строк для удаления — из тела запроса или дефолт 10
delete_n = int(body.get("delete_n", 10))
# Защита от случайного удаления слишком большого количества строк
delete_n = min(delete_n, 100)
conn = psycopg2.connect(dsn)
try:
with conn.cursor() as cur:
cur.execute("""
DELETE FROM events
WHERE id IN (
SELECT id FROM events ORDER BY created_at ASC LIMIT %s
)
""", (delete_n,))
deleted = cur.rowcount
cur.execute("SELECT COUNT(*) FROM events")
remaining = cur.fetchone()[0]
conn.commit()
return json.dumps({
"ok": True,
"deleted": deleted,
"remaining": remaining
}), 200, {"Content-Type": "application/json"}
finally:
conn.close()

View File

@ -0,0 +1,47 @@
# Изменено: 2026-03-14
# Функция event-monitor: считает строки в events.
# Если больше 50 — публикует сообщение в RabbitMQ queue "cleanup-needed".
# Запускается по cron (каждую минуту).
# Env:
# POSTGRES_DSN — строка подключения к PostgreSQL
# RABBITMQ_URL — amqp://sless:sless123@rabbitmq.sless.svc.cluster.local:5672/
import os
import json
import psycopg2
import pika
THRESHOLD = 50
def handle(request):
"""Мониторит таблицу events. При переполнении шлёт в RabbitMQ."""
dsn = os.environ["POSTGRES_DSN"]
rabbit_url = os.environ["RABBITMQ_URL"]
conn = psycopg2.connect(dsn)
try:
with conn.cursor() as cur:
# Считаем количество событий
cur.execute("SELECT COUNT(*) FROM events")
count = cur.fetchone()[0]
finally:
conn.close()
result = {"count": count, "threshold": THRESHOLD, "action": "none"}
if count > THRESHOLD:
# Публикуем в очередь — event-cleaner получит и удалит старые строки
params = pika.URLParameters(rabbit_url)
connection = pika.BlockingConnection(params)
channel = connection.channel()
channel.queue_declare(queue="cleanup-needed", durable=True)
channel.basic_publish(
exchange="",
routing_key="cleanup-needed",
body=json.dumps({"count": count, "delete_n": 10}),
properties=pika.BasicProperties(delivery_mode=2) # persistent
)
connection.close()
result["action"] = "cleanup_requested"
return json.dumps(result), 200, {"Content-Type": "application/json"}

View File

@ -0,0 +1,49 @@
# Изменено: 2026-03-14
# Функция event-writer: принимает HTTP POST, пишет одну строку в таблицу events.
# Таблица создаётся автоматически при первом запуске.
# Env: POSTGRES_DSN — строка подключения к PostgreSQL.
import os
import json
import psycopg2
from datetime import datetime, timezone
def handle(request):
"""Записывает одно событие в таблицу events."""
dsn = os.environ["POSTGRES_DSN"]
body = {}
if request.get_data():
try:
body = json.loads(request.get_data())
except Exception:
pass
source = body.get("source", "node-red")
message = body.get("message", "ping")
conn = psycopg2.connect(dsn)
try:
with conn.cursor() as cur:
# Создаём таблицу если нет — безопасно вызывать при каждом запросе
cur.execute("""
CREATE TABLE IF NOT EXISTS events (
id SERIAL PRIMARY KEY,
source VARCHAR(100) NOT NULL DEFAULT 'unknown',
message TEXT NOT NULL DEFAULT '',
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
)
""")
cur.execute(
"INSERT INTO events (source, message) VALUES (%s, %s) RETURNING id, created_at",
(source, message)
)
row = cur.fetchone()
conn.commit()
return json.dumps({
"ok": True,
"id": row[0],
"created_at": row[1].isoformat()
}), 200, {"Content-Type": "application/json"}
finally:
conn.close()

View File

@ -0,0 +1,26 @@
# Изменено: 2026-03-14
# event-cleaner: HTTP функция для демонстрации контролируемого изменения логики.
# Работает без внешних пакетов.
import json
from datetime import datetime, timezone
def event_cleaner_handle(event_cleaner_event):
"""Принимает delete_n и возвращает подтверждение обработки."""
event_cleaner_payload = event_cleaner_event if isinstance(event_cleaner_event, dict) else {}
event_cleaner_delete_n = int(event_cleaner_payload.get("delete_n", 10))
event_cleaner_delete_n = max(1, min(event_cleaner_delete_n, 100))
event_cleaner_generated_at = datetime.now(timezone.utc).isoformat()
# Здесь intentionally имитируем очистку, чтобы показать реакцию на входные параметры.
return json.dumps(
{
"ok": True,
"accepted_delete_n": event_cleaner_delete_n,
"status": "simulated-cleanup",
"generated_at": event_cleaner_generated_at,
}
), 200, {"Content-Type": "application/json"}

View File

@ -0,0 +1,25 @@
# Изменено: 2026-03-14
# event-monitor: cron-функция для демонстрации расписания и управления кодом.
# Работает без внешних библиотек и возвращает диагностический JSON.
import json
import os
from datetime import datetime, timezone
EVENT_MONITOR_THRESHOLD = 50
def event_monitor_handle(event_monitor_event):
"""Отдаёт heartbeat для cron-запуска и видимой проверки после apply."""
event_monitor_rabbitmq_url = os.environ.get("RABBITMQ_URL", "not-set")
event_monitor_generated_at = datetime.now(timezone.utc).isoformat()
return json.dumps(
{
"ok": True,
"monitor": "alive",
"threshold": EVENT_MONITOR_THRESHOLD,
"rabbitmq_configured": event_monitor_rabbitmq_url != "not-set",
"generated_at": event_monitor_generated_at,
}
), 200, {"Content-Type": "application/json"}

View File

@ -0,0 +1,28 @@
# Изменено: 2026-03-14
# event-writer: простая HTTP функция без внешних зависимостей.
# Правка поля response_tag в коде сразу видна в ответе после terraform apply.
import json
import os
from datetime import datetime, timezone
def event_writer_handle(event_writer_event):
"""Возвращает полезный JSON-ответ для визуальной проверки выката кода."""
writer_default_message = os.environ.get("DEFAULT_MESSAGE", "writer-default")
writer_payload = event_writer_event if isinstance(event_writer_event, dict) else {}
writer_message_value = writer_payload.get("message", writer_default_message)
writer_source_name = writer_payload.get("source", "event-writer")
writer_generated_at = datetime.now(timezone.utc).isoformat()
# response_tag удобно менять для демонстрации hot-update кода через terraform apply.
writer_response_tag = "writer-v2"
return {
"ok": True,
"source": writer_source_name,
"message": writer_message_value,
"response_tag": writer_response_tag,
"generated_at": writer_generated_at,
}

View File

@ -0,0 +1,77 @@
# 2026-03-14
# function.tf ресурсы managed serverless функций для демонстрации.
# Пользователь правит код в code/* и запускает terraform apply провайдер сам пересобирает и выкатывает функции.
resource "sless_function" "event_writer" {
name = "event-writer"
runtime = "python3.11"
entrypoint = "event_writer_handler.event_writer_handle"
memory_mb = 128
timeout_sec = 20
source_dir = "${path.module}/code/event-writer"
env_vars = {
POSTGRES_DSN = var.pg_dsn
DEFAULT_MESSAGE = var.writer_message
}
}
resource "sless_trigger" "event_writer_http" {
name = "event-writer-http"
type = "http"
function = sless_function.event_writer.name
enabled = true
}
resource "sless_function" "event_monitor" {
name = "event-monitor"
runtime = "python3.11"
entrypoint = "event_monitor_handler.event_monitor_handle"
memory_mb = 128
timeout_sec = 20
source_dir = "${path.module}/code/event-monitor"
env_vars = {
POSTGRES_DSN = var.pg_dsn
RABBITMQ_URL = var.rabbitmq_url
}
}
resource "sless_trigger" "event_monitor_cron" {
name = "event-monitor-cron"
type = "cron"
function = sless_function.event_monitor.name
enabled = true
schedule = "*/1 * * * *"
}
resource "sless_function" "event_cleaner" {
name = "event-cleaner"
runtime = "python3.11"
entrypoint = "event_cleaner_handler.event_cleaner_handle"
memory_mb = 128
timeout_sec = 20
source_dir = "${path.module}/code/event-cleaner"
env_vars = {
POSTGRES_DSN = var.pg_dsn
}
}
resource "sless_trigger" "event_cleaner_http" {
name = "event-cleaner-http"
type = "http"
function = sless_function.event_cleaner.name
enabled = true
}
output "event_writer_url" {
value = sless_trigger.event_writer_http.url
}
output "event_cleaner_url" {
value = sless_trigger.event_cleaner_http.url
}

View File

@ -0,0 +1,18 @@
# 2026-03-14
# Terraform demo: managed serverless functions.
# Здесь управляем ТОЛЬКО функциями/триггерами, внешние сервисы считаем уже поднятыми.
terraform {
required_providers {
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.18"
}
}
}
provider "sless" {
endpoint = var.sless_endpoint
token = var.token
nubes_endpoint = var.nubes_endpoint
}

View File

@ -0,0 +1,11 @@
# 2026-03-14
# Скопируй в terraform.tfvars и подставь актуальный токен.
token = "PUT_TOKEN_HERE"
sless_endpoint = "http://sless-api.185.247.187.147.nip.io"
nubes_endpoint = "https://deck-test.ngcloud.ru/api/v1"
pg_dsn = "postgres://sless:sless-pg-password@postgres.sless.svc.cluster.local:5432/sless?sslmode=disable"
rabbitmq_url = "amqp://sless:sless123@rabbitmq.sless.svc.cluster.local:5672/"
writer_message = "writer-default-v1"

View File

@ -0,0 +1,38 @@
# 2026-03-14
# Переменные для demo-managed-functions.
variable "token" {
description = "JWT токен API"
type = string
sensitive = true
}
variable "sless_endpoint" {
description = "Endpoint sless API"
type = string
default = "https://sless-api.kube5s.ru"
}
variable "nubes_endpoint" {
description = "Nubes endpoint (нужен провайдеру)"
type = string
default = "https://deck-api-test.ngcloud.ru/api/v1"
}
variable "pg_dsn" {
description = "PostgreSQL DSN для функций"
type = string
default = "postgres://sless:sless-pg-password@postgres.sless.svc.cluster.local:5432/sless?sslmode=disable"
}
variable "rabbitmq_url" {
description = "RabbitMQ URL для функций"
type = string
default = "amqp://sless:sless123@rabbitmq.sless.svc.cluster.local:5672/"
}
variable "writer_message" {
description = "Сообщение по умолчанию, которое пишет event-writer"
type = string
default = "writer-default-v1"
}

View File

@ -5,7 +5,7 @@ terraform {
required_providers {
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.16"
version = "~> 0.1.18"
}
}
}
@ -13,5 +13,5 @@ terraform {
provider "sless" {
endpoint = "https://sless-api.kube5s.ru"
token = var.token
nubes_endpoint = "https://deck-api.ngcloud.ru/api/v1"
nubes_endpoint = "https://deck-api-test.ngcloud.ru/api/v1"
}

View File

@ -11,7 +11,7 @@ terraform {
required_providers {
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.16"
version = "~> 0.1.18"
}
}
}
@ -19,6 +19,6 @@ terraform {
provider "sless" {
endpoint = "https://sless-api.kube5s.ru"
token = var.token
nubes_endpoint = "https://deck-api.ngcloud.ru/api/v1"
nubes_endpoint = "https://deck-api-test.ngcloud.ru/api/v1"
}

View File

@ -14,7 +14,7 @@ terraform {
# Провайдер для управления serverless функциями через sless API
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.16"
version = "~> 0.1.18"
}
}
}
@ -23,5 +23,5 @@ terraform {
provider "sless" {
endpoint = "https://sless-api.kube5s.ru"
token = var.token
nubes_endpoint = "https://deck-api.ngcloud.ru/api/v1"
nubes_endpoint = "https://deck-api-test.ngcloud.ru/api/v1"
}

View File

@ -5,7 +5,7 @@ terraform {
required_providers {
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.16"
version = "~> 0.1.18"
}
}
}
@ -13,5 +13,5 @@ terraform {
provider "sless" {
endpoint = "https://sless-api.kube5s.ru"
token = var.token
nubes_endpoint = "https://deck-api.ngcloud.ru/api/v1"
nubes_endpoint = "https://deck-api-test.ngcloud.ru/api/v1"
}

View File

@ -19,7 +19,7 @@ terraform {
required_providers {
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.16"
version = "~> 0.1.18"
}
}
}
@ -27,5 +27,5 @@ terraform {
provider "sless" {
endpoint = "https://sless-api.kube5s.ru"
token = var.token
nubes_endpoint = "https://deck-api.ngcloud.ru/api/v1"
nubes_endpoint = "https://deck-api-test.ngcloud.ru/api/v1"
}

View File

@ -18,7 +18,7 @@ terraform {
required_providers {
sless = {
source = "terra.k8c.ru/naeel/sless"
version = "~> 0.1.16"
version = "~> 0.1.18"
}
}
}
@ -26,5 +26,5 @@ terraform {
provider "sless" {
endpoint = "https://sless-api.kube5s.ru"
token = var.token
nubes_endpoint = "https://deck-api.ngcloud.ru/api/v1"
nubes_endpoint = "https://deck-api-test.ngcloud.ru/api/v1"
}