Setting CloudFlare Worker for CORS
addEventListener("fetch", (event) => {
	event.respondWith(handleRequest(event.request));
});

async function handleRequest(request) {
	let response = await fetch(request);
	response = new Response(response.body, response);
	response.headers.set(
		"Access-Control-Allow-Origin",
		"frontend-h5.shyc883.com"
	);
	response.headers.set("Access-Control-Allow-Methods", "GET, OPTIONS, POST");
	response.headers.set(
		"Access-Control-Allow-Headers",
		"Content-Type, Authorization"
	);
	response.headers.set("Access-Control-Allow-Credentials", true);
	return response;
}
Terraform_create_record
terraform {
  required_providers {
    cloudflare = {
      source = "cloudflare/cloudflare"
      version = "~> 2.0"
    }
  }
}

provider "cloudflare" {
  email   = "cloudflare@gmail.com"
  api_key = "1488ed0d2082ed36c010b773431fd9dcacde1"
  account_id = "06ae012a1ba907df24a220cd14a4fa8b"
}

resource "cloudflare_record" "gitlab" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  name    = "gitlab"
  value   = "192.123.168.234"
  type    = "A"
  ttl     = 300
  proxied = true
}
Terraform_create_page_rule
# Add a page rule to the domain
resource "cloudflare_page_rule" "page_rule_png" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  target = "www.example.com/*.png*"
  status   = "active"

  actions {
    always_use_https = "true"
    browser_cache_ttl = 86400
    cache_level = "cache_everything"
    # edge_cache_ttl = 86400
    cache_key_fields {
      cookie {}
      header {}
      host {}
      query_string {
        ignore = true
      }
      user {}
    }
  #   cache_ttl_by_status {
  #           codes = "200-299"
  #           ttl = 300
  #       }
  #       cache_ttl_by_status {
  #           codes = "300-399"
  #           ttl = 60
  #       }
  #       cache_ttl_by_status {
  #           codes = "400-403"
  #           ttl = -1
  #       }
  #       cache_ttl_by_status {
  #           codes = "404"
  #           ttl = 30
  #       }
  #       cache_ttl_by_status {
  #           codes = "405-499"
  #           ttl = -1
  #       }
  #       cache_ttl_by_status {
  #           codes = "500-599"
  #           ttl = 0
  #       }
  # }
}

# resource "cloudflare_page_rule" "rules" {
#   count = "${length(keys("${var.targets}"))}"
#   lifecycle {
#     create_before_destroy = true
#   }

#   zone_id = "92c6d5010fbacab27d464f4d79c11fce"
#   target = "${var.targets[element(keys(var.targets),count.index)]}"
#   actions {
#     always_use_https = "true"
#     cache_level = "cache_everything"
#   }
#   priority = "${count.index + 1}"
# }
Terraform_create_rate_limit_rule
# Create rate limit rule
resource "cloudflare_rate_limit" "wss_rate_limit" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  threshold = 50
  period = 60
  match {
    request {
      url_pattern = "*wss*/*"
    }
  }
  action {
    mode = "ban"
    timeout = 3600
  }
  correlate {
    by = "nat"
  }
}

resource "cloudflare_rate_limit" "frontend_rate_limit" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  threshold = 50
  period = 10
  match {
    request {
      url_pattern = "*h5*/*"
    }
  }
  action {
    mode = "ban"
    timeout = 3600
  }
  correlate {
    by = "nat"
  }
}
docker-compose
cAdvisor
Elasticsearch
local dev
NodeJS
rstudio
rsyncd
Dockerfile
awscli
buildx
  • Dockerfile
  • docker buildx build --push --platform linux/arm64,linux/amd64 -t zeyanlin/app .
dind
golang
  • Dockerfile
  • docker build --secret id=mysecret,src=id_rsa -t app .
goproxy
  • Dockerfile
  • docker buildx build -f goproxy/Dockerfile --platform linux/amd64,linux/arm64 -t zeyanlin/goproxy:latest --push .
Gitlab-ci
nginx
rstudio
supervisord
add member by project
Admin Area -> Settings -> General -> LDAP settings -> Lock memberships to LDAP synchronization -> Cancel
backup cronjob
# Backup Gitlab configs
1 0 * * * /usr/bin/tar -zcf /var/opt/gitlab/backups/`date +%Y_%m_%d`_gitlab_config.tar.gz /etc/gitlab &> /tmp/backup.log
# Backup Gitlab data
1 1 * * * /usr/bin/gitlab-backup create STRATEGY=copy BACKUP=`date +%Y_%m_%d` &>> /tmp/backup.log
# Rotate
0 2 * * * /usr/bin/rm -f `find /data/backups/ -name "*.tar*" -mtime +15`
gitlab-ci.yml template
config
gitlab-runner
issue
console output while install
[execute] psql: could not connect to server: Connection refused
            Is the server running locally and accepting
            connections on Unix domain socket "/var/opt/gitlab/postgresql/.s.PGSQL.5432"?
solve
# stop service
sudo gitlab-ctl stop
sudo systemctl stop gitlab-runsvdir.service

# check if there are any postgres processes; shouldn't be
ps aux | grep postgre

# remove process pid
sudo rm /var/opt/gitlab/postgresql/data/postmaster.pid

# start service
sudo systemctl start gitlab-runsvdir.service
sudo gitlab-ctl reconfigure
issue1
解決 Gitlab Pages 限制訪問權限後的 redirect invalid url。
  1. Remove “gitlab_pages” block from /etc/gitlab/gitlab-secrets.json
  2. gitlab-ctl reconfigure
issue2
console output
# Gitlab Container Registry
Error response from daemon: Get https://registry.knowhow.fun/v2/: x509: certificate has expired or is not yet valid
/etc/gitlab/gitlab.rb
solve
yum install ca-certificates
cd /etc/gitlab
openssl genrsa -out ca.key 4096
openssl req -new -x509 -days 3650 -key ca.key -out ca.crt
openssl genrsa -out server.key 4096
openssl req -new -key server.key -out server.csr
openssl x509 -req -days 3650 -in server.csr -CA ca.crt -CAkey ca.key -set_serial 01 -out server.crt
cp server.crt /etc/pki/ca-trust/source/anchors/
cp ca.crt /etc/pki/ca-trust/source/anchors/
update-ca-trust
issue3
console output
# Gitlab Container Registry
received unexpected HTTP status: 500 Internal Server Error
solve

/etc/gitlab/gitlab.rb

gitlab_rails['ldap_servers'] = {
    'main' => {
        'encryption' => 'plain',
    }
}
docker.service
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --bip 10.255.0.1/16 --containerd=/run/containerd/containerd.sock --insecure-registry hub.srjob.co:8888 --insecure-registry registry.knowhow.fun
gd.service
[Unit]
Description=Fetch DNS
After=network.target
After=mysql.service

[Service]
WorkingDirectory=/data/dns
ExecStart=/data/dns/gd -o hourly
ExecReload=/bin/kill -s HUP $MAINPID
Restart=always

[Install]
WantedBy=multi-user.target
openresty.service
[Unit]
Description=The OpenResty Application Platform
After=syslog.target network-online.target remote-fs.target nss-lookup.target
Wants=network-online.target

[Service]
Type=forking
WorkingDirectory=/data/config/nginx
PIDFile=/data/config/nginx/logs/nginx.pid
ExecStartPre=/usr/bin/chown -R root:root /data/nginx
ExecStartPre=/usr/bin/rm -f /data/nginx/logs/nginx.pid
ExecStartPre=/usr/local/openresty/nginx/sbin/nginx -p /data/nginx -t
ExecStart=/usr/local/openresty/nginx/sbin/nginx -p /data/nginx
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=-/sbin/start-stop-daemon --quiet --stop --retry QUIT/5 --pidfile /data/nginx/logs/nginx.pid
#ExecStop=/bin/kill -s QUIT $MAINPID
KillSignal=SIGQUIT
TimeoutStopSec=5
KillMode=process
PrivateTmp=true
LimitNOFILE=1048576

[Install]
WantedBy=multi-user.target
pm2.service
[Unit]
Description=PM2 process manager
Documentation=https://pm2.keymetrics.io/
After=network.target

[Service]
Type=forking
User=root
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Environment=PM2_HOME=/root/.pm2
PIDFile=/root/.pm2/pm2.pid
WorkingDirectory=/game/publish
ExecStart=/lib/node_modules/pm2/bin/pm2 start game_api.json manage.json
ExecReload=/lib/node_modules/pm2/bin/pm2 reload all
ExecStop=/lib/node_modules/pm2/bin/pm2 kill

[Install]
WantedBy=multi-user.target
logrotate
/data/gameapi/logs/*.log {
    create 0644 nobody root
    daily
    rotate 30
    dateext
    missingok
    notifempty
    compress
    sharedscripts
    postrotate
    /bin/kill -USR1 `cat /data/gameapi/logs/nginx.pid 2>/dev/null` 2>/dev/null || true
    endscript
}
details
看我你看不到我 看不到我
class
classDiagram
Class01 <|-- AveryLongClass : Cool
Class03 _-- Class04
Class05 o-- Class06
Class07 .. Class08
Class09 --> C2 : Where am i?
Class09 --_ C3
Class09 --|> Class07
Class07 : equals()
Class07 : Object[] elementData
Class01 : size()
Class01 : int chimp
Class01 : int gorilla
Class08 <--> C2: Cool label
flow-link
flowchart LR
A --o B
B --x C

D o--o E
E <--> F
F x--x G
flow-link1
flow-shapes
graph LR
id1[方框]
id2(帶有圓角的方框)
id3([體育場形狀])
id4[[子例程]]
id5[(圓柱狀)]
id6((圓形))
id7>非對稱形狀]
id8{菱形}
id9{{六角形}}
id10[/平行四邊形 1/]
id11[\平行四邊形 2\]
id12[/梯形 1\]
id13[\梯形 2/]
id14(((雙圓)))
flow-subgraphs
flowchart TD
c1-->a2

    subgraph one
    a1-->a2
    end

    subgraph "`**two**`"
    b1-->b2
    end

    subgraph three
    c1-->c2
    end
gantt
gantt
dateFormat YYYY-MM-DD
title Adding GANTT diagram functionality to mermaid
section A section
Completed task :done, des1, 2014-01-06,2014-01-08
Active task :active, des2, 2014-01-09, 3d
Future task : des3, after des2, 5d
Future task2 : des4, after des3, 5d
section Critical tasks
Completed task in the critical line :crit, done, 2014-01-06,24h
Implement parser and jison :crit, done, after des1, 2d
Create tests for parser :crit, active, 3d
Future task in critical line :crit, 5d
Create tests for renderer :2d
Add to mermaid :1d
git
gitGraph
commit
commit
branch develop
checkout develop
commit
commit
checkout main
merge develop
commit
commit

er
erDiagram
CUSTOMER }|..|{ DELIVERY-ADDRESS : has
CUSTOMER ||--o{ ORDER : places
CUSTOMER ||--o{ INVOICE : "liable for"
DELIVERY-ADDRESS ||--o{ ORDER : receives
INVOICE ||--|{ ORDER : covers
ORDER ||--|{ ORDER-ITEM : includes
PRODUCT-CATEGORY ||--|{ PRODUCT : contains
PRODUCT ||--o{ ORDER-ITEM : "ordered in"
journey
journey
title My working day
section Go to work
Make tea: 5: Me
Go upstairs: 3: Me
Do work: 1: Me, Cat
section Go home
Go downstairs: 5: Me
Sit down: 3: Me

pie
pie title Pets adopted by volunteers
"Dogs" : 386
"Cats" : 85
"Rats" : 15
sequence
sequenceDiagram
participant Alice
participant Bob
Alice->>John: Hello John, how are you?
loop Healthcheck
John->John: Fight against hypochondria
end
Note right of John: Rational thoughts <br/>prevail...
John-->Alice: Great!
John->Bob: How about you?
Bob-->John: Jolly good!
state
stateDiagram-v2
open: Open Door
closed: Closed Door
locked: Locked Door
open --> closed: Close
closed --> locked: Lock
locked --> closed: Unlock
closed --> open: Open
Synology Active Backup for Bussiness backup task failed
Due to IP change last week
  1. Firewall policy create NAS_to_ESXi。
  2. 虛擬機器 -> 任務清單 -> 刪除任務。
  3. 虛擬機器 -> VMware vSphere -> 管理 Hypervisor -> 刪除舊的 IP,新增新的 IP。
Set LACP for Synology NAS and NETGEAR switch
NETGEAR
  1. Switching -> LAG -> LAG Configuration -> ch1 -> 41、42 -> Apply。
  2. ch1 -> Description: NAS、LAG Type:LACP -> Apply。
  3. Switching -> VLAN -> Port PVID Configuration -> g41、g42 PVID:99、VLAN Member:10-14,17-23,99,101、VLAN Tag:10-14,17-23,99,101 -> Apply。
Synology
  • 控制台 -> 網路 -> 網路介面 -> 新增 Bond。
Set NAT in FortiGate
1. 政策&物件 -> 虛擬 IP -> 新增
  • 名稱: IT-VPN
  • 介面: wan2
  • 對外 IP: 0.0.0.0
埠號轉發
  • 協定: TCP
  • 外部服務埠號: 19979
  • 對應到埠號: 19979
2. 政策&物件 -> IPv4 政策
  1. From zone wan2 to zone Knowhow_Vlan
  2. From any to IT-VPN
Juniper SRX 320
# 查看當前軟體版本號
show system software

# 查看系統啟動時間
show system uptime

# 查看硬體板卡及序號
show chassis haredware

# 查看硬體板卡當前狀態
show chassis environment

# 查看主控板(RE)資源使用及狀態
show chassis routing-engine


# 查看當前防火牆併發會話數
show security flow session summary

# 查看當前防火牆具體併發會話
show security flow session

# 清除當前 session
clear security flow session all

# 檢查全域 ALG 開啟情況
show security alg status

# 查OID
show snmp mib walk decimal 1.3.6.1.2.1.2.2.1.2

# 設定政策
set security policy zones from-zone to-zone

# 查看路由表
show route

# 查看 ARP 表
show arp

# 查看系統日誌
show log messages

# 查看所有介面運行狀態
show interface terse

# 查看介面運行細節資訊
show interface ge-x/y/z detail

# 比較修改
show | compare rollback ?
show | compare rollback 1

# 查看系統
show system

# 查看設定
show configuration

# 動態統計介面資料包轉發資訊
monitor interface ge-x/y/z

# 動態報文抓取(Tcpdump,類似 ScreenOS snoop命令)
monitor traffic interface ge-x/y/z
map
# map
map $remote_addr $limit_key {
    35.229.201.209 "";
    default $binary_remote_addr;
}
# wss.conf
limit_req_zone $limit_key zone=websocket:10m rate=20r/s;
limit_req_status 499;

server {
    location = / {
        limit_req zone=websocket nodelay;
        limit_req_log_level warn;
    }
}
rewrite
1
# https://localhost/img/nginx.svg can access /data/nginxconfig.io/src/static/nginx.svg
location /img {
    rewrite '^/img/(.*)$' /static/$1;
  }

location /static {
    root /data/nginxconfig.io/src;
    index nginx.svg;
}
2
# https://localhost/photo/nginx.svg can access /data/nginxconfig.io/src/static/nginx.svg

location /photo {
    root /data/nginxconfig.io/src;
    try_files $uri /$uri @pic;
}

location @pic {
    rewrite '^/photo/(.*)$' /static/$1;
}
3
# remove prefix path and allow proxy_pass POST
location /upload/ {
    proxy_set_header Host $host;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    root /data/nginx/html;
    # Remove path
    rewrite ^/upload/(.*) /$1  break;
    proxy_pass https://logo$uri$is_args$args;
    # Proxy_pass POST
    proxy_http_version 1.1;
    proxy_set_header Upgrade $http_upgrade;
    proxy_set_header Connection 'upgrade';
    proxy_cache_bypass $http_upgrade;
    #proxy_redirect  https://logo/ /;
}

location / {
    proxy_set_header Host $host;
    proxy_set_header X-Real-IP $remote_addr;
    proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
    root /data/nginx/html;
    index  index.html index.htm;
}
grafana behind nginx
server/ssl.conf
ssl_certificate     /etc/ssl/go2cloudten.com.crt;
ssl_certificate_key /etc/ssl/go2cloudten.com.key;
ssl_ciphers "EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EECDH+aRSA+SHA256:EECDH+aRSA+RC4:EECDH:EDH+aRSA:HIGH:!RC2:!RC4:!aNULL:!eNULL:!LOW:!IDEA:!DES:!TDES:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!EXPORT:!ANON";
ssl_prefer_server_ciphers on;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_session_timeout 50m;
server/proxy.conf
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
grafana.conf
server {
    listen       443 ssl;
    server_name  grafana-test.go2cloudten.com;
    server_name  grafana.go2cloudten.com;
    include server/ssl.conf;
    include server/proxy.conf;
    access_log  logs/grafana.log json;
    error_log   logs/grafana.error.log warn;
    location / {
        proxy_pass   http://grafana;
        proxy_connect_timeout 300;
        proxy_read_timeout 700;
        proxy_send_timeout 700;
        proxy_set_header Host $host;
        proxy_set_header Upgrade $http_upgrade;
        proxy_set_header Connection "Upgrade";
    }
}
Vagrantfile template
others
common
Metasploitable3
send alert
  1. Use Webhook, Create Channel and Webhook in Mattermost, and put script to $(grep AlertScriptsPath /etc/zabbix/zabbix_server.conf).
  2. Create Media types in Zabbix(Administration -> Medai types).
  3. Add media to user(Administration -> Users -> Media).
  4. Create action(Configuration -> Actions -> Trigger actions)。
  5. Debug(Write log in script).
    1. Media types:
      1. PROBLEM:\nProblem started at {EVENT.TIME} on {EVENT.DATE}\n 問題: {EVENT.NAME}\n 主機: {HOST.NAME}\nSeverity: {EVENT.SEVERITY}\n 目前數值: {EVENT.OPDATA}\n 問題 ID: {EVENT.ID}\n{TRIGGER.URL}
      2. RECOVERY:\nProblem has been resolved at {EVENT.RECOVERY.TIME} on {EVENT.RECOVERY.DATE}\n 問題: {EVENT.NAME}\n 持續時間: {EVENT.DURATION}\n 主機: {HOST.NAME}\nSeverity: {EVENT.SEVERITY}\n 問題 ID: {EVENT.ID}\n{TRIGGER.URL}
zabbix server
/etc/zabbix/zabbix_server.conf

Zabbix Server perform high loading, and slow query. Increase ValueCacheSize solve this problem.

LogFile=/var/log/zabbix/zabbix_server.log
LogFileSize=5
PidFile=/var/run/zabbix/zabbix_server.pid
SocketDir=/var/run/zabbix
DBHost=localhost
DBName=zabbix_db
DBUser=zabbix_user
DBPassword=zabbix
DBSocket=/data/mysql/mysql.sock
StartPollers=200
StartPreprocessors=30
StartPollersUnreachable=30
StartTrappers=100
StartDiscoverers=30
SNMPTrapperFile=/var/log/snmptrap/snmptrap.log
CacheSize=4G
HistoryCacheSize=2G
HistoryIndexCacheSize=2G
TrendCacheSize=2G
ValueCacheSize=24G
Timeout=30
UnavailableDelay=120
AlertScriptsPath=/usr/lib/zabbix/alertscripts
ExternalScripts=/usr/lib/zabbix/externalscripts
LogSlowQueries=3000
StatsAllowedIP=127.0.0.1
/etc/my.cnf
[client-server]
socket=/data/mysql/mysql.sock

[mysqld]
socket=/data/mysql/mysql.sock
datadir=/data/mysql

character_set_server=utf8mb4
character_set_filesystem=utf8
max_allowed_packet=32M
event_scheduler=1
default_storage_engine=innodb
open_files_limit=65535
local_infile=1
sysdate_is_now=1
back_log=256
##error log format
# connection
interactive_timeout=28800
wait_timeout=28800
lock_wait_timeout=28800
skip_name_resolve=1
max_connections=2000
max_user_connections=1000
max_connect_errors=1000000

# table cache performance settings #
table_open_cache=8192
table_definition_cache=8192
table_open_cache_instances=16

# session memory settings #
read_buffer_size=131072
read_rnd_buffer_size=262144
sort_buffer_size=262144
tmp_table_size=67108864
join_buffer_size=8M
thread_cache_size=256

# log settings #
###slow log  ###
slow_query_log=1
log_queries_not_using_indexes=0
log_slow_admin_statements=1
#log_slow_slave_statements = 1
log_throttle_queries_not_using_indexes=1
long_query_time=0.5
log_bin_trust_function_creators=1

###binlog ###
binlog_cache_size=32K
max_binlog_cache_size=1G
max_binlog_size=2G
expire_logs_days=31
log_slave_updates=1
#binlog_format=STATEMENT
binlog_format=ROW
slave_compressed_protocol = 1
# innodb settings #
#innodb_data_file_path=ibdata1:4G;ibdata2:4G:autoextend
innodb_page_size=16384
innodb_buffer_pool_size=4G
innodb_buffer_pool_instances=1
innodb_buffer_pool_load_at_startup=1
innodb_buffer_pool_dump_at_shutdown=1
innodb_lock_wait_timeout=50
innodb_io_capacity=100
innodb_io_capacity_max=200
innodb_flush_neighbors=1
innodb_file_per_table=1
innodb_log_files_in_group=3
innodb_log_file_size=2G
innodb_log_buffer_size=33554432
innodb_purge_threads=2
innodb_large_prefix=1
innodb_thread_concurrency=64
innodb_print_all_deadlocks=1
innodb_strict_mode=1
innodb_sort_buffer_size=67108864
innodb_write_io_threads=4
innodb_read_io_threads=4
innodb_online_alter_log_max_size=1G
innodb_open_files=60000
innodb_max_dirty_pages_pct=75
innodb_adaptive_flushing=on
innodb_flush_log_at_trx_commit=1

sync_binlog =1

[mysqld_safe]
log-error=/var/log/mariadb/mariadb.log
#
# include *.cnf from the config directory
#
!includedir /etc/my.cnf.d