first commit

This commit is contained in:
2026-04-05 15:07:53 +08:00
commit d1bac85f28
30 changed files with 1071 additions and 0 deletions
+20
View File
@@ -0,0 +1,20 @@
lsblk -o NAME,HCTL,SIZE,MOUNTPOINT | grep -i "sd"
sudo parted /dev/sdb --script mklabel gpt mkpart xfspart xfs 0% 100%
sudo partprobe /dev/sdb
sudo mkfs.xfs /dev/sdb1
sudo mkdir -p /BlockVolume1
sudo mount /dev/sdb1 /BlockVolume1
df -h | grep BlockVolume1
lsblk
sudo cp /etc/fstab /etc/fstab.bak
UUID=$(sudo blkid -s UUID -o value /dev/sdb1)
echo "UUID=$UUID /BlockVolume1 xfs defaults,nofail 1 2" | sudo tee -a /etc/fstab
sudo mount -a
df -h | grep BlockVolume1
lsblk
+34
View File
@@ -0,0 +1,34 @@
sudo apt update
sudo apt install -y ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt update
sudo apt install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
sudo usermod -aG docker eujin
newgrp docker
sudo mkdir -p /etc/docker
cat <<'EOF' | sudo tee /etc/docker/daemon.json
{
"data-root": "/BlockVolume1/docker"
}
EOF
sudo systemctl enable docker
sudo systemctl restart docker
sudo systemctl stop docker
sudo rsync -aP /var/lib/docker/ /BlockVolume1/docker/
sudo systemctl start docker
docker info | grep "Docker Root Dir"
View File
+24
View File
@@ -0,0 +1,24 @@
{
admin 0.0.0.0:2019
}
:80 {
handle {
respond "Hello from Caddy!" 200
}
}
lejin2000.ooguy.com {
handle {
reverse_proxy portainer:9000
}
}
lejin82.ooguy.com {
reverse_proxy openclaw-openclaw-1:8080
}
ejlai.ooguy.com {
reverse_proxy litellm:4000
}
+53
View File
@@ -0,0 +1,53 @@
services:
caddy:
image: caddy:latest
container_name: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- "2019:2019"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
- caddy_config:/config
networks:
- caddy_net
portainer:
image: portainer/portainer-ce:latest
container_name: portainer
restart: unless-stopped
ports:
- "9000:9000"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- portainer_data:/data
networks:
- caddy_net
volumes:
caddy_data:
driver: local
driver_opts:
type: none
o: bind
device: ./caddy/data
caddy_config:
driver: local
driver_opts:
type: none
o: bind
device: ./caddy/config
portainer_data:
driver: local
driver_opts:
type: none
o: bind
device: ./portainer
networks:
caddy_net:
driver: bridge
+4
View File
@@ -0,0 +1,4 @@
sudo mkdir -p /BlockVolume1/projects
sudo chown -R eujin:eujin /BlockVolume1/dockercompose/code-server/projects
sudo find /BlockVolume1/dockercompose/code-server/projects -type d -exec chmod 755 {} \;
sudo find /BlockVolume1/dockercompose/code-server/projects -type f -exec chmod 644 {} \;
@@ -0,0 +1,26 @@
services:
code-server:
image: lscr.io/linuxserver/code-server:latest
container_name: code-server
restart: unless-stopped
ports:
- "8443:8443"
environment:
- PUID=1000
- PGID=1000
- PASSWORD=lejin2000
- SUDO_PASSWORD=lejin2000
- DEFAULT_WORKSPACE=/projects
- PROXY_DOMAIN=lejin82.ooguy.com
- TZ=Asia/Kuala_Lumpur
volumes:
- /BlockVolume1/dockercompose/code-server/config:/config
- /BlockVolume1/dockercompose/code-server/projects:/projects
networks:
- caddy_caddy_net
networks:
caddy_caddy_net:
external: true
name: caddy_caddy_net
+5
View File
@@ -0,0 +1,5 @@
OPENAI_BASE_URL=http://ollama:11434/v1
OPENAI_API_KEY=ollama
LLM_MODEL=qwen3.5:0.8b
TERMINAL_LOCAL_PERSISTENT=true
TZ=Asia/Kuala_Lumpur
+25
View File
@@ -0,0 +1,25 @@
FROM python:3.11-slim
ENV PYTHONUNBUFFERED=1 \
VIRTUAL_ENV=/opt/venv \
PATH=/opt/venv/bin:/root/.local/bin:$PATH
RUN apt-get update && apt-get install -y --no-install-recommends \
git curl ca-certificates build-essential ffmpeg ripgrep \
&& rm -rf /var/lib/apt/lists/*
RUN curl -LsSf https://astral.sh/uv/install.sh | sh \
&& ln -sf /root/.local/bin/uv /usr/local/bin/uv
WORKDIR /opt
RUN git clone --recurse-submodules https://github.com/NousResearch/hermes-agent.git
WORKDIR /opt/hermes-agent
RUN uv venv /opt/venv --python 3.11 \
&& uv pip install --python /opt/venv/bin/python -e ".[all]" \
&& mkdir -p /root/.hermes/{cron,sessions,logs,memories,skills,pairing,hooks,image_cache,audio_cache,whatsapp/session} \
&& cp cli-config.yaml.example /root/.hermes/config.yaml \
&& touch /root/.hermes/.env
WORKDIR /workspace
CMD ["sleep", "infinity"]
+26
View File
@@ -0,0 +1,26 @@
services:
hermes:
build:
context: .
dockerfile: Dockerfile
container_name: hermes-agent
restart: unless-stopped
init: true
working_dir: /workspace
env_file:
- ./.env
volumes:
- hermes_data:/root/.hermes
- ./workspace:/workspace
networks:
- caddy_caddy_net
stdin_open: true
tty: true
command: ["sleep", "infinity"]
volumes:
hermes_data:
networks:
caddy_caddy_net:
external: true
+24
View File
@@ -0,0 +1,24 @@
mkdir -p ~/hermes-prod/workspace
cd ~/hermes-prod
sudo docker compose -f docker-compose.yml up -d --build
docker exec -it hermes-agent hermes doctor
docker exec -it hermes-agent hermes status
docker exec -it hermes-agent hermes chat -q "Hello, confirm you are connected to Ollama"
docker exec -it hermes-agent bash -lc 'curl -s http://ollama:11434/api/tags'
## install
docker exec -it hermes-agent bash
cd /opt/hermes-agent
git submodule update --init --recursive
uv pip install -e "./tinker-atropos"
hermes doctor
docker exec -it hermes-agent hermes chat \
--provider custom \
--model qwen3.5:0.8b \
-q "Reply with exactly: connected"
sudo docker exec -it hermes-agent hermes chat --provider auto -m qwen3.5:0.8b -q "Reply with exactly: connected"
+54
View File
@@ -0,0 +1,54 @@
# =============================================================================
# LiteLLM Stack — .env.example
# Copy to .env and fill in values
# =============================================================================
# ── Postgres (DB for UI, users, virtual keys, spend) ─────────────────────────
POSTGRES_DB=litellm
POSTGRES_USER=llmproxy
POSTGRES_PASSWORD=lejin2000
# ── LiteLLM Logging ──────────────────────────────────────────────────────────
LITELLM_LOG=INFO
# ── Admin UI / Security ──────────────────────────────────────────────────────
# Master key used by LiteLLM for admin auth (UI + admin APIs).
# Must start with "sk-" and match general_settings.master_key.
LITELLM_MASTER_KEY=sk-lejin2000
# Salt key used to encrypt provider API keys & secrets in the DB.
# Generate once, keep secret, NEVER change after first run (or you lose decryption).
LITELLM_SALT_KEY=sk-lejin2000
# Admin UI credentials (fallback / simple login) per UI quick start docs.
UI_USERNAME=admin
UI_PASSWORD=lejin2000
# Disable admin UI? (set True if you want API-only mode)
DISABLE_ADMIN_UI=False
# ── Provider API keys ────────────────────────────────────────────────────────
# OpenAI
#OPENAI_API_KEY=sk-...
# Anthropic
# ANTHROPIC_API_KEY=sk-ant-...
# Azure OpenAI
AZURE_API_KEY=1kUm7k9xWjSKv9BkGTODlUKUYelonOKovMORtGHPpntJ8WhkkomGJQQJ99CCACHYHv6XJ3w3AAAAACOGF9jZ
AZURE_API_BASE=https://eujin-mmt0hj4x-eastus2.cognitiveservices.azure.com/
AZURE_API_VERSION=2024-12-01-preview
# Groq
# GROQ_API_KEY=gsk_...
# OpenRouter
# OPENROUTER_API_KEY=sk-or-...
# Google Gemini
# GEMINI_API_KEY=AIza...
# HuggingFace
# HUGGINGFACE_API_KEY=hf_...
# ── Optional: allow Caddy to forward headers, etc. (no special vars needed) ─
@@ -0,0 +1,120 @@
# =============================================================================
# LiteLLM Proxy Config (DB-backed UI, no Redis)
# - Single-node deployment (1 CPU / 2 GB RAM)
# - Postgres used for:
# - Users / login
# - Virtual keys & teams
# - Spend tracking (optional)
# - No Redis caching (cache: false)
#
# Docs:
# https://docs.litellm.ai/docs/proxy/configs
# https://docs.litellm.ai/docs/proxy/config_settings
# https://docs.litellm.ai/docs/proxy/virtual_keys
# =============================================================================
# -----------------------------------------------------------------------------#
# Models exposed by the proxy
# -----------------------------------------------------------------------------#
model_list:
# --- OpenAI examples -------------------------------------------------------
#- model_name: gpt-4o
# litellm_params:
# model: openai/gpt-4o
# api_key: os.environ/OPENAI_API_KEY
#- model_name: gpt-4o-mini
# litellm_params:
# model: openai/gpt-4o-mini
# api_key: os.environ/OPENAI_API_KEY
# --- Anthropic example -----------------------------------------------------
# - model_name: claude-3-5-sonnet
# litellm_params:
# model: anthropic/claude-3-5-sonnet-20241022
# api_key: os.environ/ANTHROPIC_API_KEY
# --- Groq example ----------------------------------------------------------
# - model_name: groq-llama-3.3-70b
# litellm_params:
# model: groq/llama-3.3-70b-versatile
# api_key: os.environ/GROQ_API_KEY
# --- Azure OpenAI example --------------------------------------------------
# - model_name: azure-gpt-4o
# litellm_params:
# model: azure/my_azure_deployment
# api_base: os.environ/AZURE_API_BASE
# api_key: os.environ/AZURE_API_KEY
# api_version: "2025-01-01-preview"
# --- Local Ollama example --------------------------------------------------
# - model_name: ollama-llama3
# litellm_params:
# model: ollama/llama3
# api_base: http://host.docker.internal:11434
# -----------------------------------------------------------------------------#
# Core LiteLLM behavior (no Redis cache)
# -----------------------------------------------------------------------------#
litellm_settings:
# Retries & timeouts
num_retries: 2
request_timeout: 60 # seconds
# Disable caching entirely to avoid Redis
cache: false # <- IMPORTANT: no Redis in your stack
# Drop unsupported provider-specific params instead of erroring
drop_params: true
# If you later enable caching, add cache_params here with Redis config.
# cache_params:
# type: redis
# host: litellm-redis
# port: 6379
# password: os.environ/REDIS_PASSWORD
# -----------------------------------------------------------------------------#
# Router settings (all in-memory, single-node)
# -----------------------------------------------------------------------------#
router_settings:
routing_strategy: "simple-shuffle" # simple-shuffle | least-busy | latency-based-routing
num_retries: 1
timeout: 30
retry_after: 5
# No Redis routing / transaction buffer on this small single-node setup
# redis_host: litellm-redis
# redis_port: 6379
# redis_password: os.environ/REDIS_PASSWORD
# -----------------------------------------------------------------------------#
# General settings (DB-backed UI, no Redis)
# -----------------------------------------------------------------------------#
general_settings:
# Admin master key (used for UI + admin API).
# Must match LITELLM_MASTER_KEY from .env and start with "sk-".
master_key: os.environ/LITELLM_MASTER_KEY
# Postgres connection (from DATABASE_URL env var).
# Required for:
# - /ui login and user management
# - virtual keys
# - spend tracking
database_url: os.environ/DATABASE_URL
# Store model definitions in DB (used by the UI)
store_model_in_db: true
# Telemetry & metrics
telemetry: false
enable_prometheus: true
# On a small box, you may want to reduce some DB-heavy features.
# You can still track spend, but disable some advanced cost mgmt.
disable_end_user_cost_tracking: false
disable_reset_budget: false
# No Redis transaction buffer here
# use_redis_transaction_buffer: false
@@ -0,0 +1,18 @@
# =============================================================================
# Prometheus Configuration — scrapes LiteLLM /metrics
# =============================================================================
global:
scrape_interval: 15s # how often to scrape targets
evaluation_interval: 15s # how often to evaluate rules
scrape_timeout: 10s
scrape_configs:
- job_name: "litellm"
static_configs:
- targets: ["litellm:4000"] # internal service name + port
metrics_path: "/metrics"
scrape_interval: 15s
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
+99
View File
@@ -0,0 +1,99 @@
name: litellm-stack
services:
litellm:
image: ghcr.io/berriai/litellm:main-stable
container_name: litellm
restart: unless-stopped
# Caddy will reverse proxy to this container on caddy_caddy_net.
# For local debugging without Caddy, you can uncomment:
# ports:
# - "4000:4000"
command: ["--config", "/app/config.yaml", "--port", "4000", "--num_workers", "1"]
env_file:
- .env
environment:
# Logging
LITELLM_LOG: "${LITELLM_LOG:-INFO}"
# DB connection string used by LiteLLM for UI, users, virtual keys, spend, etc.
# LiteLLM reads this via general_settings.database_url: os.environ/DATABASE_URL
DATABASE_URL: "postgresql://${POSTGRES_USER:-llmproxy}:${POSTGRES_PASSWORD:-dbpassword9090}@litellm-db:5432/${POSTGRES_DB:-litellm}"
volumes:
- ./config/litellm_config.yaml:/app/config.yaml:ro
- litellm_logs:/app/logs
depends_on:
litellm-db:
condition: service_healthy
networks:
- caddy_caddy_net
healthcheck:
# LiteLLM image is minimal (no curl/wget) → use Python
test: ["CMD-SHELL", "python -c \"import urllib.request; urllib.request.urlopen('http://localhost:4000/health/liveliness')\""]
interval: 30s
timeout: 10s
retries: 5
start_period: 120s
logging:
driver: "json-file"
options:
max-size: "20m"
max-file: "3"
litellm-db:
image: postgres:16-alpine
container_name: litellm-db
restart: unless-stopped
environment:
POSTGRES_DB: "${POSTGRES_DB:-litellm}"
POSTGRES_USER: "${POSTGRES_USER:-llmproxy}"
POSTGRES_PASSWORD: "${POSTGRES_PASSWORD:-dbpassword9090}"
PGDATA: /var/lib/postgresql/data/pgdata
volumes:
- litellm_postgres_data:/var/lib/postgresql/data
# No host port mapped → only other containers on the network can reach it.
# For debugging from host, you can temporarily uncomment:
# ports:
# - "127.0.0.1:5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB:-litellm} -U ${POSTGRES_USER:-llmproxy}"]
interval: 5s
timeout: 5s
retries: 10
start_period: 10s
networks:
- caddy_caddy_net
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
networks:
# External network created/owned by your Caddy stack
caddy_caddy_net:
external: true
volumes:
litellm_logs:
name: litellm_logs
driver: local
litellm_postgres_data:
name: litellm_postgres_data
driver: local
+30
View File
@@ -0,0 +1,30 @@
services:
ollama:
image: ollama/ollama:latest
container_name: ollama
restart: unless-stopped
environment:
# Ensures Ollama listens on all network interfaces so Caddy can route to it
- OLLAMA_HOST=0.0.0.0
# Prevents Ollama from trying to load models into VRAM, forcing CPU mode
- OLLAMA_NOHIST=1
# Optional: Tune these based on your server's available resources
- OLLAMA_NUM_PARALLEL=1 # Process one request at a time to prevent CPU thrashing
- OLLAMA_KEEP_ALIVE=5m # Keep the model in memory for 5 mins after a request
- OLLAMA_CONTEXT_LENGTH=1024 # Limit the context length to reduce memory usage
cpus: 0.50
mem_limit: 2500m
mem_reservation: 2g
volumes:
# Persistent storage for downloaded LLM models
- ollama_data:/root/.ollama
networks:
- caddy_caddy_net
networks:
caddy_caddy_net :
external: true
volumes:
ollama_data:
name: ollama_data
+1
View File
@@ -0,0 +1 @@
docker compose exec ollama bash
+23
View File
@@ -0,0 +1,23 @@
PORT=8080
AUTH_USERNAME=admin
AUTH_PASSWORD=P@ssw0rd123!@#
OPENCLAW_GATEWAY_TOKEN=ogt_cc2a54883345b1dfc409f716b08d4104d6dfd94cea47da80ee99c98fda1a1384
TZ=Asia/Kuala_Lumpur
OPENROUTER_API_KEY= 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
OPENCLAW_PRIMARY_MODEL=openrouter/free
# Optional alternatives
# OPENAI_API_KEY=
# ANTHROPIC_API_KEY=
# OPENCODE_API_KEY=replace-with-your-key
# GEMINI_API_KEY=
# GROQ_API_KEY=
# OLLAMA_BASE_URL=http://host.docker.internal:11434
TELEGRAM_BOT_TOKEN=8602405814:AAF09-zauk8VnPYpRw9GNIwCCMFJYoMz6vI
TELEGRAM_DM_POLICY=pairing
+97
View File
@@ -0,0 +1,97 @@
########################### skills/brevo/SKILL.md
---
name: brevo
description: Manage Brevo contacts, lists, transactional emails, and campaigns.
---
# Brevo Email Marketing API
Use this skill when you need to manage Brevo contacts, lists, transactional emails, and campaigns.
## Authentication
Use this shell pattern:
`BREVO_KEY=$(cat ~/.config/brevo/api_key)`
All Brevo API requests require:
`api-key: $BREVO_KEY`
## Base URL
`https://api.brevo.com/v3`
## Common endpoints
- Create contact: `POST /contacts`
- Get contact: `GET /contacts/{email}`
- Update contact: `PUT /contacts/{email}`
- List contacts: `GET /contacts?limit=50&offset=0`
- Get all lists: `GET /contacts/lists`
- Add contacts to list: `POST /contacts/lists/{listId}/contacts/add`
- Send transactional email: `POST /smtp/email`
- Get templates: `GET /smtp/templates`
## Best practices
- Always check blacklisted or unsubscribed contacts before importing.
- Use `updateEnabled: true` when creating contacts.
- Prefer templates for transactional emails.
- Store list IDs in config, not hardcoded prompts.
########################### skills/brevo/SKILL.md
##########################/data/state/.config/brevo/api_key
{
"skills": {
"entries": {
"brevo": {
"enabled": true,
"env": {
"BREVO_KEY": "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"
}
}
}
}
}
##########################/data/state/.config/brevo/api_key
export BREVO_KEY="$(cat ~/.config/brevo/api_key)"
curl "https://api.brevo.com/v3/account" \
-H "api-key: $BREVO_KEY"
curl "https://api.brevo.com/v3/contacts/lists?limit=50" \
-H "api-key: $BREVO_KEY"
################################### config
📧 Brevo Email Configuration
SMTP Relay
• Server: smtp-relay.brevo.com
• Port: 587
• Login: a5d823001@smtp-brevo.com
• Password: xsmtpsib-dc1bd72ebb127d5b026e510adeaa2261b3044585afaef2fe0c12b7868beaf445-eQi9LSnFiKX6WXyg
API (v3)
• Base URL: https://api.brevo.com/v3
• API Key: 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
Test Send (Working)
• Sender: eujin.lai.82@gmail.com
• Recipient: lejin2000@hotmail.com
• Last Message ID: <202603300718.31101727365@smtp-relay.mailin.fr>
• Status: ✅ Verified
Reusable Scripts
• send_brevo_api.py: Basic test
• send_test_email_custom.py: Custom to/from
Notes
• Stored in /data/state/.config/brevo/api_key for secure access
• Local backup: /data/workspace/MEMORY.md and /data/workspace/brevo_notes.md
########################################
+53
View File
@@ -0,0 +1,53 @@
services:
openclaw:
image: coollabsio/openclaw:latest
restart: unless-stopped
env_file:
- .env
environment:
PORT: "${PORT:-8080}"
AUTH_USERNAME: "${AUTH_USERNAME}"
AUTH_PASSWORD: "${AUTH_PASSWORD}"
OPENCLAW_GATEWAY_TOKEN: "${OPENCLAW_GATEWAY_TOKEN}"
OPENROUTER_API_KEY: "${OPENROUTER_API_KEY}"
OPENCLAW_PRIMARY_MODEL: "${OPENCLAW_PRIMARY_MODEL}"
BROWSER_CDP_URL: "http://browser:9223"
BROWSER_DEFAULT_PROFILE: "openclaw"
BROWSER_EVALUATE_ENABLED: "true"
OPENCLAW_STATE_DIR: "/data/.openclaw"
OPENCLAW_WORKSPACE_DIR: "/data/workspace"
OPENCLAW_CONFIG_JSON: '{"gateway":{"mode":"local"}}'
volumes:
- openclaw-data:/data
depends_on:
- browser
networks:
- internal
- caddy_caddy_net
# Optional: only keep this if you also want direct host access for testing
# ports:
# - "${PORT:-8080}:${PORT:-8080}"
browser:
image: coollabsio/openclaw-browser:latest
restart: unless-stopped
environment:
PUID: "1000"
PGID: "1000"
TZ: "${TZ:-Asia/Kuala_Lumpur}"
CHROME_CLI: "--remote-debugging-port=9222"
volumes:
- browser-data:/config
shm_size: 2g
networks:
- internal
volumes:
openclaw-data:
browser-data:
networks:
internal:
driver: bridge
caddy_caddy_net:
external: true
+29
View File
@@ -0,0 +1,29 @@
####################### /data/state/openclaw.json
"messages": {
"tts": {
"auto": "inbound",
"provider": "elevenlabs",
"providers": {
"elevenlabs": {
"apiKey": "cd1d07445c67ac457d87ab44d0a677fe684810f6ea8923944147939d7f5a7427",
"voiceId": "tMvyQtpCVQ0DkixuYm6J",
"modelId": "eleven_multilingual_v2",
"baseUrl": "https://api.elevenlabs.io",
"languageCode": "en",
"voiceSettings": {
"stability": 0.5,
"similarityBoost": 0.75,
"style": 0.0,
"useSpeakerBoost": true,
"speed": 1.0
}
}
}
}
}
###############################
export ELEVENLABS_API_KEY=cd1d07445c67ac457d87ab44d0a677fe684810f6ea8923944147939d7f5a7427
+117
View File
@@ -0,0 +1,117 @@
############ /data/workspace/skills/notion/SKILL.md
---
name: notion
description: Work with Notion pages and databases via the official Notion API.
homepage: https://developers.notion.com
metadata:
clawdbot:
emoji: 🧠
requires:
env:
- NOTION_API_KEY
install:
- id: node
kind: note
label: "Requires notion-cli (Node.js) or notion-cli-py (Python). See docs below."
---
# Notion
This skill lets the agent work with **Notion pages and databases** using the official Notion API.
The skill is declarative: it documents **safe, recommended operations** and assumes a local CLI
(`notion-cli`) that actually performs API calls.
## Authentication
- Create a Notion Integration at https://www.notion.so/my-integrations
- Copy the Internal Integration Token.
- Export it as:
```bash
export NOTION_API_KEY=secret_xxx
```
Share the integration with the pages or databases you want to access.
Unshared content is invisible to the API.
## Profiles (personal / work)
You may define multiple profiles (e.g. personal, work) via env or config.
Default profile: personal
Override via:
```bash
export NOTION_PROFILE=work
```
## Pages
**Read page:**
```bash
notion-cli page get <page_id>
```
**Append blocks:**
```bash
notion-cli block append <page_id> --markdown "..."
```
Prefer appending over rewriting content.
**Create page:**
```bash
notion-cli page create --parent <page_id> --title "..."
```
## Databases
**Inspect schema:**
```bash
notion-cli db get <database_id>
```
**Query database:**
```bash
notion-cli db query <database_id> --filter <json> --sort <json>
```
**Create row:**
```bash
notion-cli page create --database <database_id> --props <json>
```
**Update row:**
```bash
notion-cli page update <page_id> --props <json>
```
## Schema changes (advanced)
Always inspect diffs before applying schema changes.
Never modify database schema without explicit confirmation.
Recommended flow:
```bash
notion-cli db schema diff <database_id> --desired <json>
notion-cli db schema apply <database_id> --desired <json>
```
## Safety notes
- Notion API is rate-limited; batch carefully.
- Prefer append and updates over destructive operations.
- IDs are opaque; store them explicitly, do not infer from URLs.
############ /data/workspace/skills/notion/SKILL.md
+76
View File
@@ -0,0 +1,76 @@
sudo docker exec -it openclaw-openclaw-1 /bin/bash
docker exec -it openclaw /bin/bash
docker exec openclaw openclaw config set gateway.auth.token "ogt_cc2a54883345b1dfc409f716b08d4104d6dfd94cea47da80ee99c98fda1a1384"
docker exec openclaw openclaw config set agents.defaults.model.primary "openrouter/free"
docker exec openclaw openclaw config get agents.defaults.model.primary
docker exec openclaw openclaw config set gateway.controlUi.allowInsecureAuth false
docker compose logs openclaw --tail=25
docker exec -it openclaw openclaw onboard --auth-choice gemini-api-key
# Step 1 — List pending pairing requests first
docker exec -it openclaw openclaw pairing list telegram
# Step 2 — Approve with your code
docker exec -it openclaw openclaw pairing approve telegram PW864DGY
docker exec -it --user root openclaw bash -c "apt-get update && apt-get install -y at cron"
docker exec -it --user root openclaw bash -c "apt-get update && apt-get install -y sh"
### use nvidia
# Set NVIDIA API key
docker exec openclaw openclaw config set models.providers.nvidia.baseUrl "https://integrate.api.nvidia.com/v1"
docker exec openclaw openclaw config set models.providers.nvidia.apiKey "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"
docker exec openclaw openclaw config set models.providers.nvidia.api "openai-completions"
# Set as primary model
docker exec openclaw openclaw config set agents.defaults.model.primary "nvidia/meta/llama-4-scout-17b-16e-instruct"
# If accessing via IP
docker exec openclaw openclaw config set gateway.controlUi.allowedOrigins '["*"]'
# OR specific domain (more secure)
docker exec openclaw openclaw config set gateway.controlUi.allowedOrigins '["https://lejin82.ooguy.com/", "http://openclaw:8080"]'
http://ollama:11434
openclaw configure
openclaw gateway restart
openclaw onboard
openclaw models status
# remove heartbeat
openclaw config set agents.defaults.heartbeat.every "0m"
openclaw config set channels.defaults.heartbeat.showOk false
openclaw config set channels.defaults.heartbeat.showAlerts false
openclaw config set channels.defaults.heartbeat.useIndicator false
openclaw gateway restart
openclaw config set gateway.controlUi.dangerouslyAllowHostHeaderOriginFallback true
openclaw gateway restart
openclaw skills list --eligible
openclaw config set gateway.bind lan
openclaw gateway status
openclaw status
openclaw config set gateway.trustedProxies '["127.0.0.1","::1"]'
openclaw config set channels.telegram.groupAllowFrom '[795409110]'
openclaw devices list
openclaw devices approve 30310d90-e843-48b8-a32c-f44a73c94409
openclaw config set gateway.mode local
openclaw doctor --fix
openclaw gateway probe
+8
View File
@@ -0,0 +1,8 @@
TAVILY_API_KEY=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
export TAVILY_API_KEY="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"
"tavily-search": {
"enabled": true,
"apiKey": "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"
}
@@ -0,0 +1,24 @@
version: "3.8"
services:
centos-openclaw:
image: quay.io/centos/centos:stream9
container_name: centos-openclaw
restart: unless-stopped
networks:
- caddy_caddy_net
ports:
- "18789:18789"
volumes:
- openclaw_data:/root/openclaw
command: /bin/bash -c "tail -f /dev/null"
volumes:
openclaw_data:
networks:
caddy_caddy_net:
external: true
+15
View File
@@ -0,0 +1,15 @@
docker compose exec centos-openclaw bash
docker exec -it centos-openclaw /bin/bash
openclaw config set gateway.controlUi.allowedOrigins '["https://lejin82.ooguy.com/"]'
openclaw config set gateway.controlUi.dangerouslyAllowHostHeaderOriginFallback true
openclaw gateway restart
openclaw devices list
openclaw devices approve <THE_DEVICE_ID>
Notion
mkdir -p ~/.config/notion
echo "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" > ~/.config/notion/api_key
+18
View File
@@ -0,0 +1,18 @@
scp -i /home/eujin/.azuressh/vm-data-lej002_key.pem -r /mnt/d/WindowsPartition/AzureVM/dockercompose/caddy eujin@104.214.184.38:/home/eujin/BlockVolume1/dockercompose/
scp -i /home/eujin/.azuressh/vm-data-lej002_key.pem -r /mnt/d/WindowsPartition/AzureVM/dockercompose/openclaw eujin@104.214.184.38:/home/eujin/BlockVolume1/dockercompose/
scp -i /home/eujin/.azuressh/vm-data-lej002_key.pem -r /mnt/d/WindowsPartition/AzureVM/dockercompose/openclawinstall eujin@104.214.184.38:/home/eujin/BlockVolume1/dockercompose/
scp -i /home/eujin/.azuressh/vm-data-lej002_key.pem -r /mnt/d/WindowsPartition/AzureVM/dockercompose/ollama eujin@104.214.184.38:/home/eujin/BlockVolume1/dockercompose/
scp -i /home/eujin/.azuressh/vm-data-lej002_key.pem -r /mnt/d/WindowsPartition/AzureVM/dockercompose/hermes eujin@104.214.184.38:/home/eujin/BlockVolume1/dockercompose/
scp -i /home/eujin/.azuressh/vm-data-lej002_key.pem -r /mnt/d/WindowsPartition/AzureVM/dockercompose/litellm eujin@104.214.184.38:/home/eujin/BlockVolume1/dockercompose/
#Remote
sudo chown -R eujin:eujin /BlockVolume1/dockercompose
sudo chown eujin:eujin /BlockVolume1/dockercompose
sudo chmod 700 /BlockVolume1/dockercompose
+9
View File
@@ -0,0 +1,9 @@
ssh -i /home/eujin/.azuressh/vm-data-lej002_key.pem eujin@104.214.184.38
chmod 400 /home/eujin/.azuressh/vm-data-lej002_key.pem
portainer
docker exec -it caddy caddy hash-password --plaintext "lejin2000"
+39
View File
@@ -0,0 +1,39 @@
-----BEGIN RSA PRIVATE KEY-----
MIIG4gIBAAKCAYEAp5E0Qu5pBvDtuZA5w69sSeb3TK5YXLoC9MwoAP57BF7v9z4F
VxheuVyn/ezwhZPDcR8H3UQh4GqLEn0af9l8sXgCXyaJhuWI8a5wCDn/kE5UhQ7T
On+X5XQM7MblFhh66hJm27G7jziM9g2HPNKL7Toujt1+/ajLOSZW3j4hBmszJW4w
c7jnugLClnndSxE1oxbRgmV/Jph/HqyCmOroMIfraZdKd+MnNcSyG4iop65L5vXU
JJ7kDAhbhy7FdWqpYvmWLZqBi4I9xsP51ETnZiPi/SpCh1K+h0x7AFf0RuBJ6Ob6
/lX3Ov3N+agPbICEZaf3c3ycX+e56PgFg+HqvajBmNk/6XhjSKmMkZDvW3LXp10Y
b27EauhI6PXmW814G+1T+KFhLPYvw5LA+W2eJm9bW1mYqbPs6ihUJHLskHedkoyu
PV4cQGLmYQrPpbC3kqTDyRwZGEp6Mc0P7cL/qQGFgbBMo4ALldsQkpJbbCJZEZcT
QRbPvf75RuJHtqaJAgMBAAECggGAK6plCgQCjnJ+IFr7EolGGMP9MZhFInS+jHge
/JNUD0GGGtGoppeNJ5SDmmICE2z8AyBo9jno8ggRkZHUM/jCy6RQSedK80IhO4mP
XDVw6XPgBoRketwBAyQIsjLqQ6XBcfm1oA5ip+G+qefAlBLxr7H3q/cAPq2bt5rM
DEH4bSnGPDzIl39Zs1lAx+Gdzw//AC7UDbzRM4eNqLqflaWxN+RTbkKkfzVMZHCL
US19daSv1tG8pIZbOt96qnF/tydB7LxXzBAcqJ9F9r2eKR5TYDd9SeitZODlONCf
wL4FpLQbXc7erRTg03xXwj9OMFkDVuMHlsGcVUr1AEYYt8Xr2AI9x1uqHvLlOoR3
zdl5LtEnZ0wtiG5oyy65gEU8UenE7qutKnQOCKn5VI/zgweA1lDbxPtVUUGukUYg
BZ4+kIA+F7cuvMVX19vLg7+sMVnADvz9XEwoC8DKRnT9vZWfgkg83xN17aLx//s1
XCJ9my/fmI8s2sqqqU4KL3bTF6Y5AoHBANB39UNtOzjgsszhscWlAFE/BaqJesp1
Yi0CBIvB3no9FJWQvjaMfvxXzPU+UYXWuphePDtvRIwqU3odPu6Sg0d1f1YMFlPh
89zTm5PPgGczx2NB6rW7F6Ch2qi55WvUYorcU2StFfEO8bZZ6AcrUrG49ck38UBm
5Ri5mREdFez9aKpGJkMmmwqh9q1IVfGc9YhMCjjyhnkF3HesJOgnY9U1TvC/QIEe
lrUpzJX7IlWY8B3zyNKIaUSGAKbdJ51VlwKBwQDNxeKgAemksAO0qojz9wwg/jSD
jlpY2MW1A0+7CvjEi/FU+3NQn3BibVpZXFWLp9giRqqNoJbFkeUFP43NixH9UbjH
O1jajnzkHtFyifRrzUkpVxirlQRWjybxfuFhkPNh/RFlE7xWjjOcTD5TBPzKdcnT
nxYPGxYasYl2WwWyNymLpdxumSuwUVRxL6JAS3/0BpL9TBMeQExurgBb7hj3NT8A
OQO5MosMkK/lFqwYs//8JAEKhRWtt6MhGWIyqN8CgcBDlC+IzRceqBCuRBe553RD
4jc3Alww136c8oOAvrcdT0Gl/ob67e6YZOoHMk2BIovYquvBpJ9HBM2Mz4BAHHS8
LIW74i02wg+orVxIvUJ1sh5Xud2ZTe4p324ftit+Ghpbw3KxNz7epyaKI3kJIe0r
ubbLpPvrEbAApOHO32vlKHzOffEXYaNu/fQWVlWt/mDF70h9D3myL8k5bi+EpohB
bIeLmaQSd1UJo2Qz1nZZ4zsrqoSWYywkz3/Ul+qooT8CgcAbfsaSZM8Ib3B/Uo4s
MyWOvwjbT0kpOAXq8v9TWdr1wFFF7rdw3W0R6dS5vgm8OHg71D9z3zJmQeFdOynC
fkOwGpxbGsX7pjBv9uI7sM1VHkAZYwbr3mSPAF/OwwpwlALBZCRP4jw3QJcK2nE1
fKQelXCbc1VqPU9Nz80zYGQ5dP97JnV23calr6hd22SNvMjjHYHEkMa5MxRDccKg
hgcV+Spq6DNkKqnDyw2FyerC/kJFObZd5nyIEtKcWJzzcfcCgcATtIm/hHNB1E6p
DdgtiJyZVJvId/9n/LM6Q5y77CDO4Gvh/n8Hm3uGRVTGrC9gzJH3DFAvSI2sk+0B
c9ZiUFyHQOPe3/DWnyCGPWS5W6OSKABOJGjifjm6yWx3Pgu6GdO91qaFNV1STM/D
0uiBWrocbWrMgSicAC3s2DmaYJ27U64NY9gFA6iPi84bZpDFzYQ2+zlLKplpUPwp
WaOjbzuAU4dEePC41lvxACZx8wJ/fHQtcOBck91Fm4GFdeuaMAY=
-----END RSA PRIVATE KEY-----