Lookup
# List all plugins
ansible-doc -t lookup -l

# Use `ansible-doc -t lookup <plugin>` to see detail
ansible-doc -t lookup ping
winrm
- console output

在 hyper-v 那台機器 Enable Winrm 之後 一直出現下面的錯誤。 在 group 加上一行即可

ansible_winrm_transport=ntlm

hyper-v01 | UNREACHABLE! => {
    "changed": false,
    "msg": "ssl: the specified credentials were rejected by the server",
    "unreachable": true

}
- /etc/ansible/hosts
ansible_user=administrator
ansible_password=password
ansible_port=5986
ansible_connection=winrm
ansible_winrm_server_cert_validation=ignore
ansible_winrm_transport=ntlm
CloudFront
# list distributions
aws cloudfront list-distributions --query '*.Items[*].[Comment,Id,Aliases.Items[0],DefaultCacheBehavior.TargetOriginId]' --output table

# create invalidation
aws cloudfront create-invalidation --distribution-id  EATDVGD171BHDS1  --paths "/*"

## check cloudfornt log enable or not
for i in $(aws cloudfront list-distributions --output table --query 'DistributionList.Items[*].Id' --profile route53 | sed '1,3d;$d' | awk '{print $2}')
do
  result=$(aws cloudfront get-distribution --id ${i} --query 'Distribution.DistributionConfig.Logging' --profile route53 | jq .Enabled)
  if [[ "${result}" != "true" ]];then
    echo ${i}
  fi
done
EC2
# list
aws ec2 describe-instances --query 'Reservations[*].Instances[*].[Tags[0].Value,InstanceId]' --output table --page-size 100
ECR
# Get password and login to 12345.dkr.ecr.ap-northeast-1.amazonaws.com
aws ecr get-login-password | docker login --username AWS --password-stdin 12345.dkr.ecr.ap-northeast-1.amazonaws.com
S3
# Copy local file to S3
aws s3 cp ./pic.png s3://bucket_name/dir/

# Sync local local_dir to S3
aws s3 sync local_dir s3://bucket_name --exclude 'gameConfig.json' --acl public-read --delete
snapshot
# list
aws ec2 describe-snapshots \
        --owner-ids self \
        --query "Snapshots[?(Tags[0].Value=='backend')].[SnapshotId,VolumeId]" \
        --region ap-northeast-1

# create
aws ec2 create-snapshot --volume-id vol-02468851c2bc3bc4b --description "gitlab-$(date +%F)" --region ap-northeast-1

# delete
aws ec2 delete-snapshot --snapshot-id snap-1234567890abcdef0 --region ap-northeast-1
sns
region='ap-east-1'
account_id='888886666321'
topic='sa'

# create topic
aws sns create-topic --name ${topic}

# subscribe
aws sns subscribe --topic-arn arn:aws:sns:${region}:${account_id}:${topic} --protocol email --notification-endpoint ricky@gmail.com

# list
aws sns list-subscriptions-by-topic --topic-arn arn:aws:sns:${region}:${account_id}:${topic}

# create alarm
### metric-name
##CPUUtilization -->percent
##NetworkIn -->bytes
##NetworkOut -->bytes
for line in $(aws ec2 describe-instances --query 'Reservations[*].Instances[*].[Tags[0].Value,InstanceId]' --output table --page-size 100)
do
    ID=$(echo ${line}|awk -F ',' '{print $1}')
    VALUE=$(echo ${line}|awk -F ',' '{print $2}')
    aws cloudwatch put-metric-alarm \
        --alarm-name ${ID}_netout \
        --metric-name NetworkOut \
        --namespace AWS/EC2 \
        --statistic Average \
        --period 300 \
        --threshold 2560000 \
        --comparison-operator GreaterThanOrEqualToThreshold \
        --dimensions  "Name=InstanceId,Value=${VALUE}" \
        --evaluation-periods 3 \
        --alarm-actions arn:aws:sns:${region}:${account_id}:${topic}
        ##--unit Bytes
    echo "$ID done"
done
WAF
aws wafv2 create-web-acl \
  --name acl_name \
  --scope CLOUDFRONT \
  --default-action Allow={} \
  --visibility-config SampledRequestsEnabled=true,CloudWatchMetricsEnabled=true,MetricName=metric_acl_name \
  --rule
Mount S3 Bucket on EC2

references: How to Mount S3 Bucket on Ubuntu 22.04 with S3FS Fuse

# Installing s3fs-fuse
sudo apt-get update && sudo apt-get install s3fs

# Configuring AWS Credentials
echo ACCESS_KEY_ID:SECRET_ACCESS_KEY > ${HOME}/.passwd-s3fs
chmod 600 ${HOME}/.passwd-s3fs

# Mounting the S3 Bucket
s3fs mybucketname:/path/to/dir /path/to/local/mountpoint -o passwd_file=${HOME}/.passwd-s3fs

# Ensuring Persistent Mounting
echo 's3fs#mybucketname:/path/to/dir /path/to/local/mountpoint fuse _netdev,allow_other 0 0' | sudo tee -a /etc/fstab
S3 Bucket Policy
{
	"Version": "2012-10-17",
	"Statement": [
		{
			"Sid": "AllowPublicRead",
			"Effect": "Allow",
			"Principal": "*",
			"Action": "s3:GetObject",
			"Resource": "arn:aws:s3:::bucketName/*"
		}
	]
}
S3 CORS
[
	{
		"AllowedHeaders": ["*"],
		"AllowedMethods": ["GET", "PUT", "POST", "DELETE"],
		"AllowedOrigins": ["*"],
		"ExposeHeaders": [
			"x-amz-server-side-encryption",
			"x-amz-request-id",
			"x-amz-id-2"
		],
		"MaxAgeSeconds": 3000
	}
]
ECR Lifecycle Policy
{
	"rules": [
		{
			"rulePriority": 1,
			"description": "Keep only the last 100 images",
			"selection": {
				"tagStatus": "any",
				"countType": "imageCountMoreThan",
				"countNumber": 100
			},
			"action": {
				"type": "expire"
			}
		}
	]
}
ECR Lifecycle Policy1
{
	"rules": [
		{
			"rulePriority": 1,
			"description": "Remove images with certain tag",
			"selection": {
				"tagStatus": "tagged",
				"tagPrefixList": ["tag1", "tag2"],
				"countType": "imageCountMoreThan",
				"countNumber": 0
			},
			"action": {
				"type": "expire"
			}
		}
	]
}
ECR Lifecycle Policy2
{
	"rules": [
		{
			"rulePriority": 1,
			"description": "Remove untagged images older than 14 days",
			"selection": {
				"tagStatus": "untagged",
				"countType": "sinceImagePushed",
				"countUnit": "days",
				"countNumber": 14
			},
			"action": {
				"type": "expire"
			}
		}
	]
}
tf
Provider
EC2
Elastic IP
MQ
RDS
Security Group
VPC
WAF
Setting CloudFlare Worker for CORS
addEventListener("fetch", (event) => {
	event.respondWith(handleRequest(event.request));
});

async function handleRequest(request) {
	let response = await fetch(request);
	response = new Response(response.body, response);
	response.headers.set(
		"Access-Control-Allow-Origin",
		"frontend-h5.shyc883.com"
	);
	response.headers.set("Access-Control-Allow-Methods", "GET, OPTIONS, POST");
	response.headers.set(
		"Access-Control-Allow-Headers",
		"Content-Type, Authorization"
	);
	response.headers.set("Access-Control-Allow-Credentials", true);
	return response;
}
Terraform_create_record
terraform {
  required_providers {
    cloudflare = {
      source = "cloudflare/cloudflare"
      version = "~> 2.0"
    }
  }
}

provider "cloudflare" {
  email   = "cloudflare@gmail.com"
  api_key = "1488ed0d2082ed36c010b773431fd9dcacde1"
  account_id = "06ae012a1ba907df24a220cd14a4fa8b"
}

resource "cloudflare_record" "gitlab" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  name    = "gitlab"
  value   = "192.123.168.234"
  type    = "A"
  ttl     = 300
  proxied = true
}
Terraform_create_page_rule
# Add a page rule to the domain
resource "cloudflare_page_rule" "page_rule_png" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  target = "www.example.com/*.png*"
  status   = "active"

  actions {
    always_use_https = "true"
    browser_cache_ttl = 86400
    cache_level = "cache_everything"
    # edge_cache_ttl = 86400
    cache_key_fields {
      cookie {}
      header {}
      host {}
      query_string {
        ignore = true
      }
      user {}
    }
  #   cache_ttl_by_status {
  #           codes = "200-299"
  #           ttl = 300
  #       }
  #       cache_ttl_by_status {
  #           codes = "300-399"
  #           ttl = 60
  #       }
  #       cache_ttl_by_status {
  #           codes = "400-403"
  #           ttl = -1
  #       }
  #       cache_ttl_by_status {
  #           codes = "404"
  #           ttl = 30
  #       }
  #       cache_ttl_by_status {
  #           codes = "405-499"
  #           ttl = -1
  #       }
  #       cache_ttl_by_status {
  #           codes = "500-599"
  #           ttl = 0
  #       }
  # }
}

# resource "cloudflare_page_rule" "rules" {
#   count = "${length(keys("${var.targets}"))}"
#   lifecycle {
#     create_before_destroy = true
#   }

#   zone_id = "92c6d5010fbacab27d464f4d79c11fce"
#   target = "${var.targets[element(keys(var.targets),count.index)]}"
#   actions {
#     always_use_https = "true"
#     cache_level = "cache_everything"
#   }
#   priority = "${count.index + 1}"
# }
Terraform_create_rate_limit_rule
# Create rate limit rule
resource "cloudflare_rate_limit" "wss_rate_limit" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  threshold = 50
  period = 60
  match {
    request {
      url_pattern = "*wss*/*"
    }
  }
  action {
    mode = "ban"
    timeout = 3600
  }
  correlate {
    by = "nat"
  }
}

resource "cloudflare_rate_limit" "frontend_rate_limit" {
  zone_id = "92c6d5010fbacab27d464f4d79c11fce"
  threshold = 50
  period = 10
  match {
    request {
      url_pattern = "*h5*/*"
    }
  }
  action {
    mode = "ban"
    timeout = 3600
  }
  correlate {
    by = "nat"
  }
}
ab
ab -n 20 -c 20 -k https://default.hddv1.com/error
age
# generate public and private keys
age-keygen -o key.txt

# encrypt file with public key
age -r public_key -o file.txt.enc file.txt

# encrypt file with ssh key
age -R ~/.ssh/id_ed25519.pub file.txt > file.txt.enc

# decrypt file
age --decrypt -i key.txt file.txt.enc > file.txt
awk
# To lowercase
uuidgen|awk '{print tolower($0)}' # output: 649612b0-0fa4-4b50-9b13-17279f602a43

# To uppercase
echo 'hello world'|awk '{print toupper($0)}' # output: HELLO WORLD

# 提取子字符串: `substr(string, start, length)`
echo "hello world" | awk '{print substr($0, 1, 5)}' # output: hello

# 全局替換字符串中的正則表達式匹配項: `gsub(regex, replacement, string)`
# 替換字符串中首次匹配的正則表達式: `sub(regex, replacement, string)`
echo "hello world" | awk '{gsub(/world/, "everyone"); print $0}' # output: hello everyone

# 將數字轉換為整數
echo "3.14" | awk '{print int($0)}' # output: 3

# 返回平方根
echo "99" | awk '{print sqrt($0)}' # output: 9.94987

# 指數和對數
echo "2" | awk '{print exp($0), log($0)}' # output: 7.38906 0.693147
certbot
# Install
sudo apt install certbot python3-certbot-nginx python3-certbot-dns-route53

# 1. Generating Wildcard Certificates
sudo certbot certonly --manual --preferred-challenges=dns --server https://acme-v02.api.letsencrypt.org/directory --agree-tos -d *.example.com
### add txt record then press enter to continue

# 2. Generating Wildcard Certificates
sudo certbot certonly -d example.com -d *.example.com --dns-route53 --agree-tos --server https://acme-v02.api.letsencrypt.org/directory

# Automating Renewal
0 0 * * 1 /usr/bin/certbot certonly --dns-route53 -d *.example.com --quiet --post-hook "systemctl reload nginx"
cutycapt
# Capture website page as picture
xvfb-run --server-args="-screen 0, 1024x768x24" cutycapt --url=https://www.google.com --out="/tmp/google.png"
dnscontrol
dnscontrol get-zones --format=js --out=example.com.js r53 ROUTE53 example.com
dnscontrol get-zones --format=js --out=example.com.js cloudflare CLOUDFLAREAPI example.com
k6
k6 run k6.js
hey
hey -n 200000 -c 500 -h2 -z 30s https://a8-wss.hddv1.com/test
openfortivpn
# https://github.com/adrienverge/openfortivpn
sudo openfortivpn ip:port --username=ricky --pppd-use-peerdns=1
openssl
# 自簽名證書,要把 ca.p7b 匯入 certmgr.msc 的受信任的根憑證授權單位,Chrome 才吃的到。
openssl crl2pkcs7 -nocrl -certfile ca.crt -out ca.p7b
prlimit
# 更改 Max_open_files 遇到參數錯誤,原因為 CentOS6 與 CentOS7 指令不同
# CentOS6
for i in $(ps -ef | grep 'publish/server/game_server' | egrep -v 'grep|startall' | awk '{print $2}'); do echo -n "Max open files=1024000:1024000" > /proc/$i/limits; done

# CentOS7
for i in $(ps -ef | grep gateway | grep -v grep | awk '{print $2}'); do prlimit --pid $i --nofile=1024000:1024000 ; done
siege
siege --time=3s --concurrent=30000 https://a8-h5.hddv1.com/index.html
tr
# cat krypton2
YRIRY GJB CNFFJBEQ EBGGRA
# cat krypton2 | tr a-zA-Z n-za-mN-ZA-M
LEVEL TWO PASSWORD ROTTEN
vegeta
#!/usr/bin/env bash

attack() {
    echo "GET ${1}" |
        vegeta attack -duration=100s -header="User-Agent: baidu" -header="X-Forwarded-For: 47.0.0.1" -rate=500 -timeout=1s |
        vegeta encode |
        jaggr @count=rps \
            hist\[100,200,300,400,500\]:code \
            p25,p50,p95:latency \
            sum:bytes_in \
            sum:bytes_out |
        jplot rps+code.hist.100+code.hist.200+code.hist.300+code.hist.400+code.hist.500 \
            latency.p95+latency.p50+latency.p25 \
            bytes_in.sum+bytes_out.sum
}

if [[ -n ${1} ]]; then
    attack ${1}
fi

## -header="Connection: Upgrade" -header="Upgrade: websocket"
wrk
wrk -t10 -c1000 -d30s -H "User-Agent: baidu" "https://default.hddv1.com/error"
Vagrant with hyper-v provider
Enable-WindowsOptionalFeature -Online -FeatureName Microsoft-Hyper-V -All
Common
# Find out processes swap usage command
for file in /proc/*/status ; do awk '/VmSwap|Name/{printf $2 " " $3}END{ print ""}' $file; done | sort -k 2 -n -r | less
Gitbook

Hide gitbook sidebar default.

raw_path=$(pwd)
npm install -g gitbook-cli
gitbook install
cd ~/.gitbook/versions/3.2.3/node_modules/gitbook-plugin-theme-default
sed -i "25i\ \ \ \ gitbook.storage.set('sidebar', false);" src/js/theme/sidebar.js
npm install -g browserify uglify-js less less-plugin-clean-css
npm install
src/build.sh
Install
- autocorrect

A linter and formatter for help you improve copywriting, to correct spaces, words, punctuations between CJK (Chinese, Japanese, Korean). Github

wget https://github.com/huacnlee/autocorrect/releases/download/v1.7.4/autocorrect-darwin-amd64.tar.gz
- bpf

BCC - Tools for BPF-based Linux IO analysis, networking, monitoring, and more Kernel should higher than 4.1 Install from source is better

# `/usr/share/bcc/`
# https://github.com/iovisor/bcc
# https://github.com/iovisor/bcc/blob/master/docs/reference_guide.md#1-kernel-source-directory
# https://github.com/iovisor/bpftrace
yum install bcc-tools
- flamegraph

Stack trace visualizer

# https://github.com/brendangregg/FlameGraph
brew install flamegraph
- git-split-diffs

GitHub style split diffs in your terminal

npm install -g git-split-diffs
- glci

Test your Gitlab CI Pipelines changes locally using Docker. blog

yarn global add glci
- openresty
wget https://openresty.org/package/centos/openresty.repo -O /etc/yum.repos.d/openresty.repo
yum install -y openresty openresty-resty
- perf

Performance monitoring for the Linux kernel

# https://github.com/brendangregg/Misc/blob/master/perf_events/perf.md
# http://www.brendangregg.com/perf.html
yum install perf
- pptx2md

A pptx to markdown converter

pip3 install pptx2md
- sockperf

Network Benchmarking Utility

# https://github.com/Mellanox/sockperf
yum install sockperf
- upx

UPX - the Ultimate Packer for eXecutables

brew install upx
- wrk

Modern HTTP benchmarking tool

brew install wrk
LVM
# 確認 resize 在哪個 disk
lsblk

# 確認 `/dev/sde` 是否為該新增 disk 路徑
pvresize /dev/sde

# vgdisplay [vg 編號]
# 查 free  PE / Size 的編號
vgdisplay vg3

# 要升級的 lvm 硬碟路徑
lvdisplay

# lvresize -l +[free 的編號] 升級的 lvm 硬碟路徑
lvresize -l +38400 /dev/vg3/disklvm4

# resize
xfs_growfs /dev/vg3/disklvm4

# 檢查擴充是否成功
df -h
Migration zabbix
mysqldump -uroot --opt zabbix > zabbix.sql
rsync -az zabbix.sql newserver:/root
mysql -uroot zabbix < zabbix.sql
Re-create /dev/null
rm -f /dev/null
mknod /dev/null c 1 3
Script
Script Optimization

Advanced Shell Scripting Techniques: Automating Complex Tasks with Bash

  1. Use Built-in Commands: Built-in commands execute faster because they don’t require loading an external process.
  2. Minimize Subshells: Subshells can be expensive in terms of performance.
# Inefficient
output=$(cat file.txt)

# Efficient
output=$(<file.txt)
  1. Use Arrays for Bulk Data: When handling a large amount of data, arrays can be more efficient and easier to manage than multiple variables.
# Inefficient
item1="apple"
item2="banana"
item3="cherry"

# Efficient
items=("apple" "banana" "cherry")
for item in "${items[@]}"; do
    echo "$item"
done
  1. Enable Noclobber: To prevent accidental overwriting of files.
set -o noclobber
  1. Use Functions: Functions allow you to encapsulate and reuse code, making scripts cleaner and reducing redundancy.
  2. Efficient File Operations: When performing file operations, use efficient techniques to minimize resource usage.
# Inefficient
while read -r line; do
    echo "$line"
done < file.txt

# Efficient
while IFS= read -r line; do
    echo "$line"
done < file.txt
  1. Parallel Processing: Tools like xargs and GNU parallel can be incredibly useful.
  2. Error Handling: Robust error handling is critical for creating reliable and maintainable scripts.
# Exit on Error: Using set -e ensures that your script exits immediately if any command fails, preventing cascading errors.
set -e

# Custom Error Messages: Implement custom error messages to provide more context when something goes wrong.
command1 || { echo "command1 failed"; exit 1; }

# Trap Signals: Use the `trap` command to catch and handle signals and errors gracefully.
trap 'echo "Error occurred"; cleanup; exit 1' ERR

function cleanup() {
    # Cleanup code
}

# Validate Inputs: Always validate user inputs and script arguments to prevent unexpected behavior.
if [[ -z "$1" ]]; then
    echo "Usage: $0 <argument>"
    exit 1
fi

# Logging: Implement logging to keep track of script execution and diagnose issues.
logfile="script.log"
exec > >(tee -i $logfile)
exec 2>&1

echo "Script started"
  1. Automating Complex System Administration Tasks:
    1. Automated Backups
    2. System Monitoring
    3. User Management
    4. Automated Updates
    5. Network Configuration
Build with secret
  • Dockerfile
# syntax = docker/dockerfile:1.6
FROM golang:1.21.1-alpine3.18
RUN --mount=type=secret,id=mysecret,target=/root/.ssh/id_rsa git clone git@gitlab.com:ricky/repo.git
  • Command
export DOCKER_BUILDKIT=1
docker build --secret id=mysecret,src=id_rsa -t image .
Compose
# Force pull image
docker-compose up -d --pull always
Create buildx instance
# create buildx instance
docker buildx create --name builder --bootstrap --driver docker-container
# install emulators
docker run --privileged --rm tonistiigi/binfmt --install all
Create Network
docker network create -d bridge --subnet 172.100.0.0/24 --gateway 172.100.0.1 backend_dev
Multiple build-arg
docker build . -f ./scripts/Dockerfile \
  --build-arg Date=$(date) \
  --build-arg Tag=$(git rev-list -n 1 --tags) \
  --build-arg Commit=$(git describe --tags --abbrev=0)  \
  -t ops-cli
Multiple platform
# create and use buildx instance
docker buildx create --use --name builder
# build multiple platform
docker buildx build --push --platform linux/arm64,linux/amd64 -t zeyanlin/ops-cli .
Run container in different platform
finch run -it --rm --platform=linux/arm64 zeyanlin/ops-cli /bin/sh
docker-compose
cAdvisor
Elasticsearch
local dev
NodeJS
rstudio
rsyncd
Dockerfile
awscli
buildx
  • Dockerfile
  • docker buildx build --push --platform linux/arm64,linux/amd64 -t zeyanlin/app .
dind
golang
  • Dockerfile
  • docker build --secret id=mysecret,src=id_rsa -t app .
goproxy
  • Dockerfile
  • docker buildx build -f goproxy/Dockerfile --platform linux/amd64,linux/arm64 -t zeyanlin/goproxy:latest --push .
Gitlab-ci
nginx
rstudio
supervisord
File create time

1. Find Inode

$ stat dns.yaml
  File: dns.yaml
  Size: 1003        Blocks: 8          IO Block: 4096   regular file
Device: ca01h/51713d    Inode: 3595636     Links: 1
Access: (0644/-rw-r--r--)  Uid: ( 1000/  ubuntu)   Gid: ( 1000/  ubuntu)
Access: 2022-05-03 12:59:59.996755279 +0800
Modify: 2021-12-10 18:27:54.157585209 +0800
Change: 2022-01-07 14:57:58.619727878 +0800
 Birth: -

or

$ ls -i dns.yaml
3585173 dns.yaml

2. Find Filesystem

$ df dns.yaml
Filesystem     1K-blocks     Used Available Use% Mounted on
/dev/root      101583780 25703988  75863408  26% /

3. Get Create Time

$ sudo debugfs -R 'stat <3595636>' /dev/root
Inode: 3595636   Type: regular    Mode:  0644   Flags: 0x80000
Generation: 449657737    Version: 0x00000000:00000001
User:  1000   Group:  1000   Project:     0   Size: 1003
File ACL: 0
Links: 1   Blockcount: 8
Fragment:  Address: 0    Number: 0    Size: 0
 ctime: 0x61d7e476:93c13018 -- Fri Jan  7 14:57:58 2022
 atime: 0x6270b6cf:eda51d3c -- Tue May  3 12:59:59 2022
 mtime: 0x61b32baa:25923ce4 -- Fri Dec 10 18:27:54 2021
crtime: 0x61b32baa:25923ce4 -- Fri Dec 10 18:27:54 2021
Size of extra inode fields: 32
Inode checksum: 0x5b176bb2
EXTENTS:
(0):2665902
Display Ubuntu's Message of the Day
sudo chmod +x /etc/update-motd.d/*
List domains
sed 's/ //g' domains-info.md | awk -F '|' '{if($3 ~ /.*\.com/)print $3}' | sort | uniq