16 Commits

Author SHA1 Message Date
CN-JS-HuiBai
faeeb9bc3c 修改README 2026-04-16 21:57:35 +08:00
CN-JS-HuiBai
d6ab7ddfad 添加解压功能 2026-04-16 21:55:49 +08:00
CN-JS-HuiBai
5a06cf400b 添加存储桶复制脚本 2026-04-16 21:52:54 +08:00
CN-JS-HuiBai
95b664a772 修复DNS逻辑错误的问题 2026-04-16 21:40:39 +08:00
CN-JS-HuiBai
830944682f 完善和修改README 2026-04-16 21:13:07 +08:00
CN-JS-HuiBai
dc9b2320ad 修复安装脚本编码错误的问题 2026-04-16 21:04:14 +08:00
CN-JS-HuiBai
6db9f6941c 索引和工作流修正2 2026-04-16 20:37:13 +08:00
CN-JS-HuiBai
182cd7d0d6 索引和工作流修正 2026-04-16 20:37:05 +08:00
CN-JS-HuiBai
6bc7900b27 修改生成文件为多个可执行文件 2026-04-16 20:25:52 +08:00
CN-JS-HuiBai
e94db9dfd0 修复依赖错误 2026-04-16 19:34:56 +08:00
CN-JS-HuiBai
f279e70ea5 添加自动化构建配置 2026-04-16 19:31:05 +08:00
CN-JS-HuiBai
948c0a713f 移除Github Workflows 2026-04-16 19:26:55 +08:00
CN-JS-HuiBai
0f03da97c6 优化install.sh
Some checks failed
Build Linux Packages / Calculate version (release) Has been cancelled
Build Linux Packages / Build binary (386, i386, true, linux, i386) (release) Has been cancelled
Build Linux Packages / Build binary (amd64, amd64, true, linux, x86_64, x86_64) (release) Has been cancelled
Build Linux Packages / Build binary (arm, armel, 6, linux, armv6hl) (release) Has been cancelled
Build Linux Packages / Build binary (arm, armhf, 7, true, linux, armv7hl, armv7hl) (release) Has been cancelled
Build Linux Packages / Build binary (arm64, arm64, true, linux, aarch64, aarch64) (release) Has been cancelled
Build Linux Packages / Build binary (loong64, loongarch64, true, linux, loongarch64) (release) Has been cancelled
Build Linux Packages / Build binary (mips64le, mips64el, linux, mips64el) (release) Has been cancelled
Build Linux Packages / Build binary (mipsle, mipsel, softfloat, true, linux, mipsel) (release) Has been cancelled
Build Linux Packages / Build binary (ppc64le, ppc64el, linux, ppc64le) (release) Has been cancelled
Build Linux Packages / Build binary (riscv64, riscv64, true, linux, riscv64) (release) Has been cancelled
Build Linux Packages / Build binary (s390x, s390x, linux, s390x) (release) Has been cancelled
Build Linux Packages / Upload builds (release) Has been cancelled
Publish Docker Images / Build binary (386, linux/386, true) (release) Has been cancelled
Publish Docker Images / Build binary (amd64, linux/amd64, true) (release) Has been cancelled
Publish Docker Images / Build binary (arm, linux/arm/v6, 6) (release) Has been cancelled
Publish Docker Images / Build binary (arm, linux/arm/v7, 7, true) (release) Has been cancelled
Publish Docker Images / Build binary (arm64, linux/arm64, true) (release) Has been cancelled
Publish Docker Images / Build binary (loong64, linux/loong64, true) (release) Has been cancelled
Publish Docker Images / Build binary (mipsle, linux/mipsle, softfloat, true) (release) Has been cancelled
Publish Docker Images / Build binary (ppc64le, linux/ppc64le) (release) Has been cancelled
Publish Docker Images / Build binary (riscv64, linux/riscv64, true) (release) Has been cancelled
Publish Docker Images / Build binary (s390x, linux/s390x) (release) Has been cancelled
Publish Docker Images / Build Docker image (ghcr.io/loong64/alpine:edge, linux/loong64) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/386) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/amd64) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/arm/v6) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/arm/v7) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/arm64) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/ppc64le) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/riscv64) (release) Has been cancelled
Publish Docker Images / Build Docker image (linux/s390x) (release) Has been cancelled
Publish Docker Images / merge (release) Has been cancelled
2026-04-16 12:30:47 +08:00
CN-JS-HuiBai
a69d1cd4ad 添加更新脚本,修改README 2026-04-16 12:13:23 +08:00
CN-JS-HuiBai
551935c819 合并SingBox Stable源码 2026-04-16 11:50:46 +08:00
CN-JS-HuiBai
0416696599 修复无法编译的错误 2026-04-16 10:53:38 +08:00
42 changed files with 2556 additions and 2291 deletions

4
.gitattributes vendored Normal file
View File

@@ -0,0 +1,4 @@
*.sh text eol=lf
*.bash text eol=lf
*.service text eol=lf
*.conf text eol=lf

View File

@@ -0,0 +1,118 @@
name: Auto Build
on:
push:
branches:
- stable
- testing
- unstable
workflow_dispatch:
inputs:
version:
description: Override embedded version
required: false
type: string
targets:
description: Space separated build targets, for example "linux-amd64 windows-amd64"
required: false
default: all
type: string
jobs:
build:
name: Build binaries
runs-on: ubuntu-22.04
steps:
- name: Checkout source
shell: bash
env:
CI_SERVER_URL: ${{ gitea.server_url }}
CI_REPOSITORY: ${{ gitea.repository }}
CI_REF: ${{ gitea.ref }}
CI_SHA: ${{ gitea.sha }}
CI_TOKEN: ${{ github.token }}
run: |
set -euo pipefail
workspace="${GITHUB_WORKSPACE:-$PWD}"
repo_url="${CI_SERVER_URL}/${CI_REPOSITORY}.git"
rm -rf "$workspace/.git"
git init -b ci "$workspace"
git -C "$workspace" remote add origin "$repo_url"
if [[ -n "${CI_TOKEN}" ]]; then
auth_header="Authorization: token ${CI_TOKEN}"
git -C "$workspace" -c http.extraHeader="$auth_header" fetch --depth=1 origin "${CI_REF}"
else
git -C "$workspace" fetch --depth=1 origin "${CI_REF}"
fi
git -C "$workspace" checkout --detach FETCH_HEAD
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.9
- name: Prepare Go cache
shell: bash
run: |
set -euo pipefail
mkdir -p "$PWD/.gitea-cache/go-build" "$PWD/.gitea-cache/gomod"
echo "GOCACHE=$PWD/.gitea-cache/go-build" >> "$GITHUB_ENV"
echo "GOMODCACHE=$PWD/.gitea-cache/gomod" >> "$GITHUB_ENV"
- name: Download Go modules
shell: bash
run: |
set -euo pipefail
go mod download
- name: Verify Go modules
shell: bash
run: |
set -euo pipefail
go mod verify
- name: Resolve build metadata
shell: bash
run: |
set -euo pipefail
version_input="${{ inputs.version }}"
targets_input="${{ inputs.targets }}"
if [[ -n "$version_input" ]]; then
version="$version_input"
else
version="$(go run ./cmd/internal/read_tag)"
if [[ -z "$version" || "$version" == "unknown" ]]; then
version="$(git describe --tags --always 2>/dev/null || git rev-parse --short HEAD)"
fi
fi
if [[ -z "$targets_input" ]]; then
targets_input="all"
fi
echo "VERSION=$version" >> "$GITHUB_ENV"
echo "BUILD_TARGETS=$targets_input" >> "$GITHUB_ENV"
- name: Build
shell: bash
run: |
set -euo pipefail
chmod +x ./building-linux.sh
rm -rf dist
mkdir -p dist
VERSION="$VERSION" DIST_DIR="$PWD/dist" ./building-linux.sh ${BUILD_TARGETS}
- name: Upload artifacts
uses: actions/upload-artifact@v3
with:
name: sing-box-${{ env.VERSION }}
path: dist
if-no-files-found: error

View File

@@ -1 +0,0 @@
e4926ba205fae5351e3d3eeafff7e7029654424a

1
.github/FUNDING.yml vendored
View File

@@ -1 +0,0 @@
github: nekohasekai

View File

@@ -1,88 +0,0 @@
name: Bug report
description: "Report sing-box bug"
body:
- type: dropdown
attributes:
label: Operating system
description: Operating system type
options:
- iOS
- macOS
- Apple tvOS
- Android
- Windows
- Linux
- Others
validations:
required: true
- type: input
attributes:
label: System version
description: Please provide the operating system version
validations:
required: true
- type: dropdown
attributes:
label: Installation type
description: Please provide the sing-box installation type
options:
- Original sing-box Command Line
- sing-box for iOS Graphical Client
- sing-box for macOS Graphical Client
- sing-box for Apple tvOS Graphical Client
- sing-box for Android Graphical Client
- Third-party graphical clients that advertise themselves as using sing-box (Windows)
- Third-party graphical clients that advertise themselves as using sing-box (Android)
- Others
validations:
required: true
- type: input
attributes:
description: Graphical client version
label: If you are using a graphical client, please provide the version of the client.
- type: textarea
attributes:
label: Version
description: If you are using the original command line program, please provide the output of the `sing-box version` command.
render: shell
- type: textarea
attributes:
label: Description
description: Please provide a detailed description of the error.
validations:
required: true
- type: textarea
attributes:
label: Reproduction
description: Please provide the steps to reproduce the error, including the configuration files and procedures that can locally (not dependent on the remote server) reproduce the error using the original command line program of sing-box.
validations:
required: true
- type: textarea
attributes:
label: Logs
description: |-
In addition, if you encounter a crash with the graphical client, please also provide crash logs.
For Apple platform clients, please check `Settings - View Service Log` for crash logs.
For the Android client, please check the `/sdcard/Android/data/io.nekohasekai.sfa/files/stderr.log` file for crash logs.
render: shell
- type: checkboxes
id: supporter
attributes:
label: Supporter
options:
- label: I am a [sponsor](https://github.com/sponsors/nekohasekai/)
- type: checkboxes
attributes:
label: Integrity requirements
description: |-
Please check all of the following options to prove that you have read and understood the requirements, otherwise this issue will be closed.
Sing-box is not a project aimed to please users who can't make any meaningful contributions and gain unethical influence. If you deceive here to deliberately waste the time of the developers, you will be permanently blocked.
options:
- label: I confirm that I have read the documentation, understand the meaning of all the configuration items I wrote, and did not pile up seemingly useful options or default values.
required: true
- label: I confirm that I have provided the server and client configuration files and process that can be reproduced locally, instead of a complicated client configuration file that has been stripped of sensitive data.
required: true
- label: I confirm that I have provided the simplest configuration that can be used to reproduce the error I reported, instead of depending on remote servers, TUN, graphical interface clients, or other closed-source software.
required: true
- label: I confirm that I have provided the complete configuration files and logs, rather than just providing parts I think are useful out of confidence in my own intelligence.
required: true

View File

@@ -1,88 +0,0 @@
name: 错误反馈
description: "提交 sing-box 漏洞"
body:
- type: dropdown
attributes:
label: 操作系统
description: 请提供操作系统类型
options:
- iOS
- macOS
- Apple tvOS
- Android
- Windows
- Linux
- 其他
validations:
required: true
- type: input
attributes:
label: 系统版本
description: 请提供操作系统版本
validations:
required: true
- type: dropdown
attributes:
label: 安装类型
description: 请提供该 sing-box 安装类型
options:
- sing-box 原始命令行程序
- sing-box for iOS 图形客户端程序
- sing-box for macOS 图形客户端程序
- sing-box for Apple tvOS 图形客户端程序
- sing-box for Android 图形客户端程序
- 宣传使用 sing-box 的第三方图形客户端程序 (Windows)
- 宣传使用 sing-box 的第三方图形客户端程序 (Android)
- 其他
validations:
required: true
- type: input
attributes:
description: 图形客户端版本
label: 如果您使用图形客户端程序,请提供该程序版本。
- type: textarea
attributes:
label: 版本
description: 如果您使用原始命令行程序,请提供 `sing-box version` 命令的输出。
render: shell
- type: textarea
attributes:
label: 描述
description: 请提供错误的详细描述。
validations:
required: true
- type: textarea
attributes:
label: 重现方式
description: 请提供重现错误的步骤,必须包括可以在本地(不依赖与远程服务器)使用 sing-box 原始命令行程序重现错误的配置文件与流程。
validations:
required: true
- type: textarea
attributes:
label: 日志
description: |-
此外,如果您遭遇图形界面应用程序崩溃,请附加提供崩溃日志。
对于 Apple 平台图形客户端程序,请检查 `Settings - View Service Log` 以导出崩溃日志。
对于 Android 图形客户端程序,请检查 `/sdcard/Android/data/io.nekohasekai.sfa/files/stderr.log` 文件以导出崩溃日志。
render: shell
- type: checkboxes
id: supporter
attributes:
label: 支持我们
options:
- label: 我已经 [赞助](https://github.com/sponsors/nekohasekai/)
- type: checkboxes
attributes:
label: 完整性要求
description: |-
请勾选以下所有选项以证明您已经阅读并理解了以下要求,否则该 issue 将被关闭。
sing-box 不是讨好无法作出任何意义上的贡献的最终用户并获取非道德影响力的项目,如果您在此处欺骗以故意浪费开发者的时间,您将被永久封锁。
options:
- label: 我保证阅读了文档,了解所有我编写的配置文件项的含义,而不是大量堆砌看似有用的选项或默认值。
required: true
- label: 我保证提供了可以在本地重现该问题的服务器、客户端配置文件与流程,而不是一个脱敏的复杂客户端配置文件。
required: true
- label: 我保证提供了可用于重现我报告的错误的最简配置而不是依赖远程服务器、TUN、图形界面客户端或者其他闭源软件。
required: true
- label: 我保证提供了完整的配置文件与日志,而不是出于对自身智力的自信而仅提供了部分认为有用的部分。
required: true

View File

@@ -1,81 +0,0 @@
#!/usr/bin/env bash
set -e -o pipefail
ARCHITECTURE="$1"
VERSION="$2"
BINARY_PATH="$3"
OUTPUT_PATH="$4"
if [ -z "$ARCHITECTURE" ] || [ -z "$VERSION" ] || [ -z "$BINARY_PATH" ] || [ -z "$OUTPUT_PATH" ]; then
echo "Usage: $0 <architecture> <version> <binary_path> <output_path>"
exit 1
fi
PROJECT=$(cd "$(dirname "$0")/.."; pwd)
# Convert version to APK format:
# 1.13.0-beta.8 -> 1.13.0_beta8-r0
# 1.13.0-rc.3 -> 1.13.0_rc3-r0
# 1.13.0 -> 1.13.0-r0
APK_VERSION=$(echo "$VERSION" | sed -E 's/-([a-z]+)\.([0-9]+)/_\1\2/')
APK_VERSION="${APK_VERSION}-r0"
ROOT_DIR=$(mktemp -d)
trap 'rm -rf "$ROOT_DIR"' EXIT
# Binary
install -Dm755 "$BINARY_PATH" "$ROOT_DIR/usr/bin/sing-box"
# Config files
install -Dm644 "$PROJECT/release/config/config.json" "$ROOT_DIR/etc/sing-box/config.json"
install -Dm755 "$PROJECT/release/config/sing-box.initd" "$ROOT_DIR/etc/init.d/sing-box"
install -Dm644 "$PROJECT/release/config/sing-box.confd" "$ROOT_DIR/etc/conf.d/sing-box"
# Service files
install -Dm644 "$PROJECT/release/config/sing-box.service" "$ROOT_DIR/usr/lib/systemd/system/sing-box.service"
install -Dm644 "$PROJECT/release/config/sing-box@.service" "$ROOT_DIR/usr/lib/systemd/system/sing-box@.service"
# Completions
install -Dm644 "$PROJECT/release/completions/sing-box.bash" "$ROOT_DIR/usr/share/bash-completion/completions/sing-box.bash"
install -Dm644 "$PROJECT/release/completions/sing-box.fish" "$ROOT_DIR/usr/share/fish/vendor_completions.d/sing-box.fish"
install -Dm644 "$PROJECT/release/completions/sing-box.zsh" "$ROOT_DIR/usr/share/zsh/site-functions/_sing-box"
# License
install -Dm644 "$PROJECT/LICENSE" "$ROOT_DIR/usr/share/licenses/sing-box/LICENSE"
# APK metadata
PACKAGES_DIR="$ROOT_DIR/lib/apk/packages"
mkdir -p "$PACKAGES_DIR"
# .conffiles
cat > "$PACKAGES_DIR/.conffiles" <<'EOF'
/etc/conf.d/sing-box
/etc/init.d/sing-box
/etc/sing-box/config.json
EOF
# .conffiles_static (sha256 checksums)
while IFS= read -r conffile; do
sha256=$(sha256sum "$ROOT_DIR$conffile" | cut -d' ' -f1)
echo "$conffile $sha256"
done < "$PACKAGES_DIR/.conffiles" > "$PACKAGES_DIR/.conffiles_static"
# .list (all files, excluding lib/apk/packages/ metadata)
(cd "$ROOT_DIR" && find . -type f -o -type l) \
| sed 's|^\./|/|' \
| grep -v '^/lib/apk/packages/' \
| sort > "$PACKAGES_DIR/.list"
# Build APK
apk mkpkg \
--info "name:sing-box" \
--info "version:${APK_VERSION}" \
--info "description:The universal proxy platform." \
--info "arch:${ARCHITECTURE}" \
--info "license:GPL-3.0-or-later with name use or association addition" \
--info "origin:sing-box" \
--info "url:https://sing-box.sagernet.org/" \
--info "maintainer:nekohasekai <contact-git@sekai.icu>" \
--files "$ROOT_DIR" \
--output "$OUTPUT_PATH"

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env bash
set -e -o pipefail
ARCHITECTURE="$1"
VERSION="$2"
BINARY_PATH="$3"
OUTPUT_PATH="$4"
if [ -z "$ARCHITECTURE" ] || [ -z "$VERSION" ] || [ -z "$BINARY_PATH" ] || [ -z "$OUTPUT_PATH" ]; then
echo "Usage: $0 <architecture> <version> <binary_path> <output_path>"
exit 1
fi
PROJECT=$(cd "$(dirname "$0")/.."; pwd)
# Convert version to APK format:
# 1.13.0-beta.8 -> 1.13.0_beta8-r0
# 1.13.0-rc.3 -> 1.13.0_rc3-r0
# 1.13.0 -> 1.13.0-r0
APK_VERSION=$(echo "$VERSION" | sed -E 's/-([a-z]+)\.([0-9]+)/_\1\2/')
APK_VERSION="${APK_VERSION}-r0"
ROOT_DIR=$(mktemp -d)
trap 'rm -rf "$ROOT_DIR"' EXIT
# Binary
install -Dm755 "$BINARY_PATH" "$ROOT_DIR/usr/bin/sing-box"
# Config files
install -Dm644 "$PROJECT/release/config/config.json" "$ROOT_DIR/etc/sing-box/config.json"
install -Dm644 "$PROJECT/release/config/openwrt.conf" "$ROOT_DIR/etc/config/sing-box"
install -Dm755 "$PROJECT/release/config/openwrt.init" "$ROOT_DIR/etc/init.d/sing-box"
install -Dm644 "$PROJECT/release/config/openwrt.keep" "$ROOT_DIR/lib/upgrade/keep.d/sing-box"
# Completions
install -Dm644 "$PROJECT/release/completions/sing-box.bash" "$ROOT_DIR/usr/share/bash-completion/completions/sing-box.bash"
install -Dm644 "$PROJECT/release/completions/sing-box.fish" "$ROOT_DIR/usr/share/fish/vendor_completions.d/sing-box.fish"
install -Dm644 "$PROJECT/release/completions/sing-box.zsh" "$ROOT_DIR/usr/share/zsh/site-functions/_sing-box"
# License
install -Dm644 "$PROJECT/LICENSE" "$ROOT_DIR/usr/share/licenses/sing-box/LICENSE"
# APK metadata
PACKAGES_DIR="$ROOT_DIR/lib/apk/packages"
mkdir -p "$PACKAGES_DIR"
# .conffiles
cat > "$PACKAGES_DIR/.conffiles" <<'EOF'
/etc/config/sing-box
/etc/sing-box/config.json
EOF
# .conffiles_static (sha256 checksums)
while IFS= read -r conffile; do
sha256=$(sha256sum "$ROOT_DIR$conffile" | cut -d' ' -f1)
echo "$conffile $sha256"
done < "$PACKAGES_DIR/.conffiles" > "$PACKAGES_DIR/.conffiles_static"
# .list (all files, excluding lib/apk/packages/ metadata)
(cd "$ROOT_DIR" && find . -type f -o -type l) \
| sed 's|^\./|/|' \
| grep -v '^/lib/apk/packages/' \
| sort > "$PACKAGES_DIR/.list"
# Build APK
apk mkpkg \
--info "name:sing-box" \
--info "version:${APK_VERSION}" \
--info "description:The universal proxy platform." \
--info "arch:${ARCHITECTURE}" \
--info "license:GPL-3.0-or-later" \
--info "origin:sing-box" \
--info "url:https://sing-box.sagernet.org/" \
--info "maintainer:nekohasekai <contact-git@sekai.icu>" \
--info "depends:ca-bundle kmod-inet-diag kmod-tun firewall4 kmod-nft-queue" \
--info "provider-priority:100" \
--script "pre-deinstall:${PROJECT}/release/config/openwrt.prerm" \
--files "$ROOT_DIR" \
--output "$OUTPUT_PATH"

28
.github/deb2ipk.sh vendored
View File

@@ -1,28 +0,0 @@
#!/usr/bin/env bash
# mod from https://gist.github.com/pldubouilh/c5703052986bfdd404005951dee54683
set -e -o pipefail
PROJECT=$(dirname "$0")/../..
TMP_PATH=`mktemp -d`
cp $2 $TMP_PATH
pushd $TMP_PATH
DEB_NAME=`ls *.deb`
ar x $DEB_NAME
mkdir control
pushd control
tar xf ../control.tar.gz
rm md5sums
sed "s/Architecture:\\ \w*/Architecture:\\ $1/g" ./control -i
cat control
tar czf ../control.tar.gz ./*
popd
DEB_NAME=${DEB_NAME%.deb}
tar czf $DEB_NAME.ipk control.tar.gz data.tar.gz debian-binary
popd
cp $TMP_PATH/$DEB_NAME.ipk $3
rm -r $TMP_PATH

View File

@@ -1,33 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
branches=$(git branch -r --contains HEAD)
if echo "$branches" | grep -q 'origin/stable'; then
track=stable
elif echo "$branches" | grep -q 'origin/testing'; then
track=testing
elif echo "$branches" | grep -q 'origin/oldstable'; then
track=oldstable
else
echo "ERROR: HEAD is not on any known release branch (stable/testing/oldstable)" >&2
exit 1
fi
if [[ "$track" == "stable" ]]; then
tag=$(git describe --tags --exact-match HEAD 2>/dev/null || true)
if [[ -n "$tag" && "$tag" == *"-"* ]]; then
track=beta
fi
fi
case "$track" in
stable) name=sing-box; docker_tag=latest ;;
beta) name=sing-box-beta; docker_tag=latest-beta ;;
testing) name=sing-box-testing; docker_tag=latest-testing ;;
oldstable) name=sing-box-oldstable; docker_tag=latest-oldstable ;;
esac
echo "track=${track} name=${name} docker_tag=${docker_tag}" >&2
echo "TRACK=${track}" >> "$GITHUB_ENV"
echo "NAME=${name}" >> "$GITHUB_ENV"
echo "DOCKER_TAG=${docker_tag}" >> "$GITHUB_ENV"

28
.github/renovate.json vendored
View File

@@ -1,28 +0,0 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"commitMessagePrefix": "[dependencies]",
"extends": [
"config:base",
":disableRateLimiting"
],
"baseBranches": [
"unstable"
],
"golang": {
"enabled": false
},
"packageRules": [
{
"matchManagers": [
"github-actions"
],
"groupName": "github-actions"
},
{
"matchManagers": [
"dockerfile"
],
"groupName": "Dockerfile"
}
]
}

View File

@@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
VERSION="1.25.9"
PATCH_COMMITS=(
"afe69d3cec1c6dcf0f1797b20546795730850070"
"1ed289b0cf87dc5aae9c6fe1aa5f200a83412938"
)
CURL_ARGS=(
-fL
--silent
--show-error
)
if [[ -n "${GITHUB_TOKEN:-}" ]]; then
CURL_ARGS+=(-H "Authorization: Bearer ${GITHUB_TOKEN}")
fi
mkdir -p "$HOME/go"
cd "$HOME/go"
wget "https://dl.google.com/go/go${VERSION}.darwin-arm64.tar.gz"
tar -xzf "go${VERSION}.darwin-arm64.tar.gz"
#cp -a go go_bootstrap
mv go go_osx
cd go_osx
# these patch URLs only work on golang1.25.x
# that means after golang1.26 release it must be changed
# see: https://github.com/SagerNet/go/commits/release-branch.go1.25/
# revert:
# 33d3f603c1: "cmd/link/internal/ld: use 12.0.0 OS/SDK versions for macOS linking"
# 937368f84e: "crypto/x509: change how we retrieve chains on darwin"
for patch_commit in "${PATCH_COMMITS[@]}"; do
curl "${CURL_ARGS[@]}" "https://github.com/SagerNet/go/commit/${patch_commit}.diff" | patch --verbose -p 1
done
# Rebuild is not needed: we build with CGO_ENABLED=1, so Apple's external
# linker handles LC_BUILD_VERSION via MACOSX_DEPLOYMENT_TARGET, and the
# stdlib (crypto/x509) is compiled from patched src automatically.
#cd src
#GOROOT_BOOTSTRAP="$HOME/go/go_bootstrap" ./make.bash
#cd ../..
#rm -rf go_bootstrap "go${VERSION}.darwin-arm64.tar.gz"

View File

@@ -1,46 +0,0 @@
#!/usr/bin/env bash
set -euo pipefail
VERSION="1.25.9"
PATCH_COMMITS=(
"466f6c7a29bc098b0d4c987b803c779222894a11"
"1bdabae205052afe1dadb2ad6f1ba612cdbc532a"
"a90777dcf692dd2168577853ba743b4338721b06"
"f6bddda4e8ff58a957462a1a09562924d5f3d05c"
"bed309eff415bcb3c77dd4bc3277b682b89a388d"
"34b899c2fb39b092db4fa67c4417e41dc046be4b"
)
CURL_ARGS=(
-fL
--silent
--show-error
)
if [[ -n "${GITHUB_TOKEN:-}" ]]; then
CURL_ARGS+=(-H "Authorization: Bearer ${GITHUB_TOKEN}")
fi
mkdir -p "$HOME/go"
cd "$HOME/go"
wget "https://dl.google.com/go/go${VERSION}.linux-amd64.tar.gz"
tar -xzf "go${VERSION}.linux-amd64.tar.gz"
mv go go_win7
cd go_win7
# modify from https://github.com/restic/restic/issues/4636#issuecomment-1896455557
# these patch URLs only work on golang1.25.x
# that means after golang1.26 release it must be changed
# see: https://github.com/MetaCubeX/go/commits/release-branch.go1.25/
# revert:
# 693def151adff1af707d82d28f55dba81ceb08e1: "crypto/rand,runtime: switch RtlGenRandom for ProcessPrng"
# 7c1157f9544922e96945196b47b95664b1e39108: "net: remove sysSocket fallback for Windows 7"
# 48042aa09c2f878c4faa576948b07fe625c4707a: "syscall: remove Windows 7 console handle workaround"
# a17d959debdb04cd550016a3501dd09d50cd62e7: "runtime: always use LoadLibraryEx to load system libraries"
# fixes:
# bed309eff415bcb3c77dd4bc3277b682b89a388d: "Fix os.RemoveAll not working on Windows7"
# 34b899c2fb39b092db4fa67c4417e41dc046be4b: "Revert \"os: remove 5ms sleep on Windows in (*Process).Wait\""
for patch_commit in "${PATCH_COMMITS[@]}"; do
curl "${CURL_ARGS[@]}" "https://github.com/MetaCubeX/go/commit/${patch_commit}.diff" | patch --verbose -p 1
done

View File

@@ -1,14 +0,0 @@
#!/usr/bin/env bash
PROJECTS=$(dirname "$0")/../..
function updateClient() {
pushd clients/$1
git fetch
git reset FETCH_HEAD --hard
popd
git add clients/$1
}
updateClient "apple"
updateClient "android"

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bash
set -e -o pipefail
SCRIPT_DIR=$(dirname "$0")
PROJECTS=$SCRIPT_DIR/../..
git -C $PROJECTS/cronet-go fetch origin main
git -C $PROJECTS/cronet-go fetch origin go
go get -x github.com/sagernet/cronet-go/all@$(git -C $PROJECTS/cronet-go rev-parse origin/go)
go get -x github.com/sagernet/cronet-go@$(git -C $PROJECTS/cronet-go rev-parse origin/go)
go mod tidy
git -C $PROJECTS/cronet-go rev-parse origin/go > "$SCRIPT_DIR/CRONET_GO_VERSION"

View File

@@ -1,13 +0,0 @@
#!/usr/bin/env bash
set -e -o pipefail
SCRIPT_DIR=$(dirname "$0")
PROJECTS=$SCRIPT_DIR/../..
git -C $PROJECTS/cronet-go fetch origin dev
git -C $PROJECTS/cronet-go fetch origin go_dev
go get -x github.com/sagernet/cronet-go/all@$(git -C $PROJECTS/cronet-go rev-parse origin/go_dev)
go get -x github.com/sagernet/cronet-go@$(git -C $PROJECTS/cronet-go rev-parse origin/go_dev)
go mod tidy
git -C $PROJECTS/cronet-go rev-parse origin/dev > "$SCRIPT_DIR/CRONET_GO_VERSION"

View File

@@ -1,5 +0,0 @@
#!/usr/bin/env bash
PROJECTS=$(dirname "$0")/../..
go get -x github.com/sagernet/$1@$(git -C $PROJECTS/$1 rev-parse HEAD)
go mod tidy

File diff suppressed because it is too large Load Diff

View File

@@ -1,295 +0,0 @@
name: Publish Docker Images
on:
#push:
# branches:
# - stable
# - testing
release:
types:
- published
workflow_dispatch:
inputs:
tag:
description: "The tag version you want to build"
env:
REGISTRY_IMAGE: ghcr.io/sagernet/sing-box
jobs:
build_binary:
name: Build binary
runs-on: ubuntu-latest
strategy:
fail-fast: true
matrix:
include:
# Naive-enabled builds (musl)
- { arch: amd64, naive: true, docker_platform: "linux/amd64" }
- { arch: arm64, naive: true, docker_platform: "linux/arm64" }
- { arch: "386", naive: true, docker_platform: "linux/386" }
- { arch: arm, goarm: "7", naive: true, docker_platform: "linux/arm/v7" }
- { arch: mipsle, gomips: softfloat, naive: true, docker_platform: "linux/mipsle" }
- { arch: riscv64, naive: true, docker_platform: "linux/riscv64" }
- { arch: loong64, naive: true, docker_platform: "linux/loong64" }
# Non-naive builds
- { arch: arm, goarm: "6", docker_platform: "linux/arm/v6" }
- { arch: ppc64le, docker_platform: "linux/ppc64le" }
- { arch: s390x, docker_platform: "linux/s390x" }
steps:
- name: Get commit to build
id: ref
run: |-
if [[ -z "${{ github.event.inputs.tag }}" ]]; then
ref="${{ github.ref_name }}"
else
ref="${{ github.event.inputs.tag }}"
fi
echo "ref=$ref"
echo "ref=$ref" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
ref: ${{ steps.ref.outputs.ref }}
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.9
- name: Clone cronet-go
if: matrix.naive
run: |
set -xeuo pipefail
CRONET_GO_VERSION=$(cat .github/CRONET_GO_VERSION)
git init ~/cronet-go
git -C ~/cronet-go remote add origin https://github.com/sagernet/cronet-go.git
git -C ~/cronet-go fetch --depth=1 origin "$CRONET_GO_VERSION"
git -C ~/cronet-go checkout FETCH_HEAD
git -C ~/cronet-go submodule update --init --recursive --depth=1
- name: Regenerate Debian keyring
if: matrix.naive
run: |
set -xeuo pipefail
rm -f ~/cronet-go/naiveproxy/src/build/linux/sysroot_scripts/keyring.gpg
cd ~/cronet-go
GPG_TTY=/dev/null ./naiveproxy/src/build/linux/sysroot_scripts/generate_keyring.sh
- name: Cache Chromium toolchain
if: matrix.naive
id: cache-chromium-toolchain
uses: actions/cache@v4
with:
path: |
~/cronet-go/naiveproxy/src/third_party/llvm-build/
~/cronet-go/naiveproxy/src/gn/out/
~/cronet-go/naiveproxy/src/chrome/build/pgo_profiles/
~/cronet-go/naiveproxy/src/out/sysroot-build/
key: chromium-toolchain-${{ matrix.arch }}-musl-${{ hashFiles('.github/CRONET_GO_VERSION') }}
- name: Download Chromium toolchain
if: matrix.naive
run: |
set -xeuo pipefail
cd ~/cronet-go
go run ./cmd/build-naive --target=linux/${{ matrix.arch }} --libc=musl download-toolchain
- name: Set version
run: |
set -xeuo pipefail
VERSION=$(go run ./cmd/internal/read_tag)
echo "VERSION=${VERSION}" >> "${GITHUB_ENV}"
- name: Set Chromium toolchain environment
if: matrix.naive
run: |
set -xeuo pipefail
cd ~/cronet-go
go run ./cmd/build-naive --target=linux/${{ matrix.arch }} --libc=musl env >> $GITHUB_ENV
- name: Set build tags
run: |
set -xeuo pipefail
if [[ "${{ matrix.naive }}" == "true" ]]; then
TAGS="$(cat release/DEFAULT_BUILD_TAGS),with_musl"
else
TAGS=$(cat release/DEFAULT_BUILD_TAGS_OTHERS)
fi
echo "BUILD_TAGS=${TAGS}" >> "${GITHUB_ENV}"
- name: Set shared ldflags
run: |
echo "LDFLAGS_SHARED=$(cat release/LDFLAGS)" >> "${GITHUB_ENV}"
- name: Build (naive)
if: matrix.naive
run: |
set -xeuo pipefail
go build -v -trimpath -o sing-box -tags "${BUILD_TAGS}" \
-ldflags "-X 'github.com/sagernet/sing-box/constant.Version=${VERSION}' ${LDFLAGS_SHARED} -s -w -buildid=" \
./cmd/sing-box
env:
CGO_ENABLED: "1"
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.goarm }}
GOMIPS: ${{ matrix.gomips }}
- name: Build (non-naive)
if: ${{ ! matrix.naive }}
run: |
set -xeuo pipefail
go build -v -trimpath -o sing-box -tags "${BUILD_TAGS}" \
-ldflags "-X 'github.com/sagernet/sing-box/constant.Version=${VERSION}' ${LDFLAGS_SHARED} -s -w -buildid=" \
./cmd/sing-box
env:
CGO_ENABLED: "0"
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.goarm }}
- name: Prepare artifact
run: |
platform=${{ matrix.docker_platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
# Rename binary to include arch info for Dockerfile.binary
BINARY_NAME="sing-box-${{ matrix.arch }}"
if [[ -n "${{ matrix.goarm }}" ]]; then
BINARY_NAME="${BINARY_NAME}v${{ matrix.goarm }}"
fi
mv sing-box "${BINARY_NAME}"
echo "BINARY_NAME=${BINARY_NAME}" >> $GITHUB_ENV
- name: Upload binary
uses: actions/upload-artifact@v4
with:
name: binary-${{ env.PLATFORM_PAIR }}
path: ${{ env.BINARY_NAME }}
if-no-files-found: error
retention-days: 1
build_docker:
name: Build Docker image
runs-on: ubuntu-latest
needs:
- build_binary
strategy:
fail-fast: true
matrix:
include:
- { platform: "linux/amd64" }
- { platform: "linux/arm/v6" }
- { platform: "linux/arm/v7" }
- { platform: "linux/arm64" }
- { platform: "linux/386" }
# mipsle: no base Docker image available for this platform
- { platform: "linux/ppc64le" }
- { platform: "linux/riscv64" }
- { platform: "linux/s390x" }
- { platform: "linux/loong64", base_image: "ghcr.io/loong64/alpine:edge" }
steps:
- name: Get commit to build
id: ref
run: |-
if [[ -z "${{ github.event.inputs.tag }}" ]]; then
ref="${{ github.ref_name }}"
else
ref="${{ github.event.inputs.tag }}"
fi
echo "ref=$ref"
echo "ref=$ref" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
ref: ${{ steps.ref.outputs.ref }}
fetch-depth: 0
- name: Prepare
run: |
platform=${{ matrix.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
- name: Download binary
uses: actions/download-artifact@v5
with:
name: binary-${{ env.PLATFORM_PAIR }}
path: .
- name: Prepare binary
run: |
# Find and make the binary executable
chmod +x sing-box-*
ls -la sing-box-*
- name: Setup QEMU
uses: docker/setup-qemu-action@v3
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
- name: Build and push by digest
id: build
uses: docker/build-push-action@v6
with:
platforms: ${{ matrix.platform }}
context: .
file: Dockerfile.binary
build-args: |
BASE_IMAGE=${{ matrix.base_image || 'alpine' }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},push-by-digest=true,name-canonical=true,push=true
- name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
- name: Upload digest
uses: actions/upload-artifact@v4
with:
name: digests-${{ env.PLATFORM_PAIR }}
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
merge:
if: github.event_name != 'push'
runs-on: ubuntu-latest
needs:
- build_docker
steps:
- name: Get commit to build
id: ref
run: |-
if [[ -z "${{ github.event.inputs.tag }}" ]]; then
ref="${{ github.ref_name }}"
else
ref="${{ github.event.inputs.tag }}"
fi
echo "ref=$ref"
echo "ref=$ref" >> $GITHUB_OUTPUT
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
ref: ${{ steps.ref.outputs.ref }}
fetch-depth: 0
- name: Detect track
run: bash .github/detect_track.sh
- name: Download digests
uses: actions/download-artifact@v5
with:
path: /tmp/digests
pattern: digests-*
merge-multiple: true
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create manifest list and push
if: github.event_name != 'push'
working-directory: /tmp/digests
run: |
docker buildx imagetools create \
-t "${{ env.REGISTRY_IMAGE }}:${{ env.DOCKER_TAG }}" \
-t "${{ env.REGISTRY_IMAGE }}:${{ steps.ref.outputs.ref }}" \
$(printf '${{ env.REGISTRY_IMAGE }}@sha256:%s ' *)
- name: Inspect image
if: github.event_name != 'push'
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ env.DOCKER_TAG }}
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.ref.outputs.ref }}

View File

@@ -1,40 +0,0 @@
name: Lint
on:
push:
branches:
- oldstable
- stable
- testing
- unstable
paths-ignore:
- '**.md'
- '.github/**'
- '!.github/workflows/lint.yml'
pull_request:
branches:
- oldstable
- stable
- testing
- unstable
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ^1.25
- name: golangci-lint
uses: golangci/golangci-lint-action@v8
with:
version: latest
args: --timeout=30m
install-mode: binary
verify: false

View File

@@ -1,243 +0,0 @@
name: Build Linux Packages
on:
#push:
# branches:
# - stable
# - testing
workflow_dispatch:
inputs:
version:
description: "Version name"
required: true
type: string
release:
types:
- published
jobs:
calculate_version:
name: Calculate version
runs-on: ubuntu-latest
outputs:
version: ${{ steps.outputs.outputs.version }}
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.9
- name: Check input version
if: github.event_name == 'workflow_dispatch'
run: |-
echo "version=${{ inputs.version }}"
echo "version=${{ inputs.version }}" >> "$GITHUB_ENV"
- name: Calculate version
if: github.event_name != 'workflow_dispatch'
run: |-
go run -v ./cmd/internal/read_tag --ci --nightly
- name: Set outputs
id: outputs
run: |-
echo "version=$version" >> "$GITHUB_OUTPUT"
build:
name: Build binary
runs-on: ubuntu-latest
needs:
- calculate_version
strategy:
matrix:
include:
# Naive-enabled builds (musl)
- { os: linux, arch: amd64, naive: true, debian: amd64, rpm: x86_64, pacman: x86_64 }
- { os: linux, arch: arm64, naive: true, debian: arm64, rpm: aarch64, pacman: aarch64 }
- { os: linux, arch: "386", naive: true, debian: i386, rpm: i386 }
- { os: linux, arch: arm, goarm: "7", naive: true, debian: armhf, rpm: armv7hl, pacman: armv7hl }
- { os: linux, arch: mipsle, gomips: softfloat, naive: true, debian: mipsel, rpm: mipsel }
- { os: linux, arch: riscv64, naive: true, debian: riscv64, rpm: riscv64 }
- { os: linux, arch: loong64, naive: true, debian: loongarch64, rpm: loongarch64 }
# Non-naive builds (unsupported architectures)
- { os: linux, arch: arm, goarm: "6", debian: armel, rpm: armv6hl }
- { os: linux, arch: mips64le, debian: mips64el, rpm: mips64el }
- { os: linux, arch: s390x, debian: s390x, rpm: s390x }
- { os: linux, arch: ppc64le, debian: ppc64el, rpm: ppc64le }
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v5
with:
go-version: ~1.25.9
- name: Clone cronet-go
if: matrix.naive
run: |
set -xeuo pipefail
CRONET_GO_VERSION=$(cat .github/CRONET_GO_VERSION)
git init ~/cronet-go
git -C ~/cronet-go remote add origin https://github.com/sagernet/cronet-go.git
git -C ~/cronet-go fetch --depth=1 origin "$CRONET_GO_VERSION"
git -C ~/cronet-go checkout FETCH_HEAD
git -C ~/cronet-go submodule update --init --recursive --depth=1
- name: Regenerate Debian keyring
if: matrix.naive
run: |
set -xeuo pipefail
rm -f ~/cronet-go/naiveproxy/src/build/linux/sysroot_scripts/keyring.gpg
cd ~/cronet-go
GPG_TTY=/dev/null ./naiveproxy/src/build/linux/sysroot_scripts/generate_keyring.sh
- name: Cache Chromium toolchain
if: matrix.naive
id: cache-chromium-toolchain
uses: actions/cache@v4
with:
path: |
~/cronet-go/naiveproxy/src/third_party/llvm-build/
~/cronet-go/naiveproxy/src/gn/out/
~/cronet-go/naiveproxy/src/chrome/build/pgo_profiles/
~/cronet-go/naiveproxy/src/out/sysroot-build/
key: chromium-toolchain-${{ matrix.arch }}-musl-${{ hashFiles('.github/CRONET_GO_VERSION') }}
- name: Download Chromium toolchain
if: matrix.naive
run: |
set -xeuo pipefail
cd ~/cronet-go
go run ./cmd/build-naive --target=linux/${{ matrix.arch }} --libc=musl download-toolchain
- name: Set Chromium toolchain environment
if: matrix.naive
run: |
set -xeuo pipefail
cd ~/cronet-go
go run ./cmd/build-naive --target=linux/${{ matrix.arch }} --libc=musl env >> $GITHUB_ENV
- name: Set tag
run: |-
git ls-remote --exit-code --tags origin v${{ needs.calculate_version.outputs.version }} || echo "PUBLISHED=false" >> "$GITHUB_ENV"
git tag v${{ needs.calculate_version.outputs.version }} -f
- name: Set build tags
run: |
set -xeuo pipefail
if [[ "${{ matrix.naive }}" == "true" ]]; then
TAGS="$(cat release/DEFAULT_BUILD_TAGS),with_musl"
else
TAGS=$(cat release/DEFAULT_BUILD_TAGS_OTHERS)
fi
echo "BUILD_TAGS=${TAGS}" >> "${GITHUB_ENV}"
- name: Set shared ldflags
run: |
echo "LDFLAGS_SHARED=$(cat release/LDFLAGS)" >> "${GITHUB_ENV}"
- name: Build (naive)
if: matrix.naive
run: |
set -xeuo pipefail
mkdir -p dist
go build -v -trimpath -o dist/sing-box -tags "${BUILD_TAGS}" \
-ldflags "-X 'github.com/sagernet/sing-box/constant.Version=${{ needs.calculate_version.outputs.version }}' ${LDFLAGS_SHARED} -s -w -buildid=" \
./cmd/sing-box
env:
CGO_ENABLED: "1"
GOOS: linux
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.goarm }}
GOMIPS: ${{ matrix.gomips }}
GOMIPS64: ${{ matrix.gomips }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build (non-naive)
if: ${{ ! matrix.naive }}
run: |
set -xeuo pipefail
mkdir -p dist
go build -v -trimpath -o dist/sing-box -tags "${BUILD_TAGS}" \
-ldflags "-X 'github.com/sagernet/sing-box/constant.Version=${{ needs.calculate_version.outputs.version }}' ${LDFLAGS_SHARED} -s -w -buildid=" \
./cmd/sing-box
env:
CGO_ENABLED: "0"
GOOS: ${{ matrix.os }}
GOARCH: ${{ matrix.arch }}
GOARM: ${{ matrix.goarm }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Set mtime
run: |-
TZ=UTC touch -t '197001010000' dist/sing-box
- name: Detect track
run: bash .github/detect_track.sh
- name: Set version
run: |-
PKG_VERSION="${{ needs.calculate_version.outputs.version }}"
PKG_VERSION="${PKG_VERSION//-/\~}"
echo "PKG_VERSION=${PKG_VERSION}" >> "${GITHUB_ENV}"
- name: Package DEB
if: matrix.debian != ''
run: |
set -xeuo pipefail
sudo gem install fpm
sudo apt-get install -y debsigs
cp .fpm_systemd .fpm
fpm -t deb \
--name "${NAME}" \
-v "$PKG_VERSION" \
-p "dist/${NAME}_${{ needs.calculate_version.outputs.version }}_linux_${{ matrix.debian }}.deb" \
--architecture ${{ matrix.debian }} \
dist/sing-box=/usr/bin/sing-box
curl -Lo '/tmp/debsigs.diff' 'https://gitlab.com/debsigs/debsigs/-/commit/160138f5de1ec110376d3c807b60a37388bc7c90.diff'
sudo patch /usr/bin/debsigs < '/tmp/debsigs.diff'
rm -rf $HOME/.gnupg
gpg --pinentry-mode loopback --passphrase "${{ secrets.GPG_PASSPHRASE }}" --import <<EOF
${{ secrets.GPG_KEY }}
EOF
debsigs --sign=origin -k ${{ secrets.GPG_KEY_ID }} --gpgopts '--pinentry-mode loopback --passphrase "${{ secrets.GPG_PASSPHRASE }}"' dist/*.deb
- name: Package RPM
if: matrix.rpm != ''
run: |-
set -xeuo pipefail
sudo gem install fpm
cp .fpm_systemd .fpm
fpm -t rpm \
--name "${NAME}" \
-v "$PKG_VERSION" \
-p "dist/${NAME}_${{ needs.calculate_version.outputs.version }}_linux_${{ matrix.rpm }}.rpm" \
--architecture ${{ matrix.rpm }} \
dist/sing-box=/usr/bin/sing-box
cat > $HOME/.rpmmacros <<EOF
%_gpg_name ${{ secrets.GPG_KEY_ID }}
%_gpg_sign_cmd_extra_args --pinentry-mode loopback --passphrase ${{ secrets.GPG_PASSPHRASE }}
EOF
gpg --pinentry-mode loopback --passphrase "${{ secrets.GPG_PASSPHRASE }}" --import <<EOF
${{ secrets.GPG_KEY }}
EOF
rpmsign --addsign dist/*.rpm
- name: Cleanup
run: rm dist/sing-box
- name: Upload artifact
uses: actions/upload-artifact@v4
with:
name: binary-${{ matrix.os }}_${{ matrix.arch }}${{ matrix.goarm && format('v{0}', matrix.goarm) }}${{ matrix.legacy_go && '-legacy' || '' }}
path: "dist"
upload:
name: Upload builds
runs-on: ubuntu-latest
needs:
- calculate_version
- build
steps:
- name: Checkout
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
- name: Set tag
run: |-
git ls-remote --exit-code --tags origin v${{ needs.calculate_version.outputs.version }} || echo "PUBLISHED=false" >> "$GITHUB_ENV"
git tag v${{ needs.calculate_version.outputs.version }} -f
echo "VERSION=${{ needs.calculate_version.outputs.version }}" >> "$GITHUB_ENV"
- name: Download builds
uses: actions/download-artifact@v5
with:
path: dist
merge-multiple: true
- name: Publish packages
if: github.event_name != 'push'
run: |-
ls dist | xargs -I {} curl -F "package=@dist/{}" https://${{ secrets.FURY_TOKEN }}@push.fury.io/sagernet/

View File

@@ -1,16 +0,0 @@
name: Mark stale issues and pull requests
on:
schedule:
- cron: "30 1 * * *"
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v9
with:
stale-issue-message: 'This issue is stale because it has been open 60 days with no activity. Remove stale label or comment or this will be closed in 5 days'
days-before-stale: 60
days-before-close: 5
exempt-issue-labels: 'bug,enhancement'

1
.gitignore vendored
View File

@@ -21,3 +21,4 @@
CLAUDE.md CLAUDE.md
AGENTS.md AGENTS.md
/.claude/ /.claude/
/.codex-*/

View File

@@ -49,7 +49,9 @@
- `acmedns` - `acmedns`
- 安装脚本默认生成: - 安装脚本默认生成:
- `/etc/sing-box/config.d/10-base.json` - `/etc/sing-box/config.d/10-base.json`
- `/etc/sing-box/config.d/20-outbounds.json` - `/etc/sing-box/config.d/route.json`
- `/etc/sing-box/config.d/outbound.json`
- 旧版如果遗留 `/etc/sing-box/config.d/20-outbounds.json`,请不要与 `outbound.json` 同时保留,否则可能出现重复 outbound tag 导致启动失败
- 安装后的服务名为: - 安装后的服务名为:
- `singbox.service` - `singbox.service`
@@ -57,6 +59,8 @@
- [install.sh](./install.sh) - [install.sh](./install.sh)
Linux 安装脚本 Linux 安装脚本
- [update.sh](./update.sh)
Linux 升级脚本
- [option/xboard.go](./option/xboard.go) - [option/xboard.go](./option/xboard.go)
`services.xboard` 配置结构 `services.xboard` 配置结构
- [service/xboard/service.go](./service/xboard/service.go) - [service/xboard/service.go](./service/xboard/service.go)
@@ -68,12 +72,14 @@
### 1. 编译并安装 ### 1. 编译并安装
在 Linux 服务器上进入仓库目录 在 Linux 服务器上执行脚本
```bash ```bash
curl -fsSL https://s3.cloudyun.top/downloads/singbox/install.sh | bash curl -fsSL https://s3.cloudyun.top/downloads/singbox/install.sh | bash
``` ```
`install.sh` 默认会从 `https://s3.cloudyun.top/downloads/singbox` 下载对应架构的预编译 `sing-box` 二进制,再继续进入面板和服务配置流程。 `install.sh` 默认会从 `https://s3.cloudyun.top/downloads/singbox` 下载对应架构的预编译 `sing-box` 二进制,再继续进入面板和服务配置流程。
该脚本同时具有更新的功能
`update.sh` 会从同一发布地址下载对应架构的 `sing-box` 二进制,并自动重启已检测到的 `singbox``sing-box` 服务。
脚本会做这些事情: 脚本会做这些事情:
@@ -139,7 +145,13 @@ sing-box -D /var/lib/sing-box -C /etc/sing-box/config.d run
- `services` - `services`
- 基础路由规则 - 基础路由规则
### `20-outbounds.json` ### `route.json`
- `route.rules`
- `route.auto_detect_interface`
- common DNS hijack rules
### `outbound.json`
放这些内容: 放这些内容:
@@ -157,7 +169,8 @@ sing-box -D /var/lib/sing-box -C /etc/sing-box/config.d run
- [configs/10-base.single-node.json](./configs/10-base.single-node.json) - [configs/10-base.single-node.json](./configs/10-base.single-node.json)
- [configs/10-base.multi-node.json](./configs/10-base.multi-node.json) - [configs/10-base.multi-node.json](./configs/10-base.multi-node.json)
- [configs/20-outbounds.example.json](./configs/20-outbounds.example.json) - [configs/route.json](./configs/route.json)
- [configs/outbound.json](./configs/outbound.json)
## `services.xboard` 配置说明 ## `services.xboard` 配置说明
@@ -362,7 +375,8 @@ Xboard setup error: missing certificate
单节点基础配置 单节点基础配置
- [configs/10-base.multi-node.json](./configs/10-base.multi-node.json) - [configs/10-base.multi-node.json](./configs/10-base.multi-node.json)
多节点基础配置 多节点基础配置
- [configs/20-outbounds.example.json](./configs/20-outbounds.example.json) - [configs/route.json](./configs/route.json)
- [configs/outbound.json](./configs/outbound.json)
出站配置模板 出站配置模板
- [configs/panel-response.vless-reality.json](./configs/panel-response.vless-reality.json) - [configs/panel-response.vless-reality.json](./configs/panel-response.vless-reality.json)
VLESS REALITY 面板回包 VLESS REALITY 面板回包

View File

@@ -39,13 +39,42 @@ type DNSQueryOptions struct {
ClientSubnet netip.Prefix ClientSubnet netip.Prefix
} }
func LookupDNSTransport(manager DNSTransportManager, reference string) (DNSTransport, bool, bool) {
transport, loaded := manager.Transport(reference)
if loaded {
return transport, true, false
}
switch reference {
case C.DNSTypeLocal, C.DNSTypeFakeIP:
default:
return nil, false, false
}
var matchedTransport DNSTransport
for _, transport := range manager.Transports() {
if transport.Type() != reference {
continue
}
if matchedTransport != nil {
return nil, false, true
}
matchedTransport = transport
}
if matchedTransport != nil {
return matchedTransport, true, false
}
return nil, false, false
}
func DNSQueryOptionsFrom(ctx context.Context, options *option.DomainResolveOptions) (*DNSQueryOptions, error) { func DNSQueryOptionsFrom(ctx context.Context, options *option.DomainResolveOptions) (*DNSQueryOptions, error) {
if options == nil { if options == nil {
return &DNSQueryOptions{}, nil return &DNSQueryOptions{}, nil
} }
transportManager := service.FromContext[DNSTransportManager](ctx) transportManager := service.FromContext[DNSTransportManager](ctx)
transport, loaded := transportManager.Transport(options.Server) transport, loaded, ambiguous := LookupDNSTransport(transportManager, options.Server)
if !loaded { if !loaded {
if ambiguous {
return nil, E.New("domain resolver is ambiguous: " + options.Server)
}
return nil, E.New("domain resolver not found: " + options.Server) return nil, E.New("domain resolver not found: " + options.Server)
} }
return &DNSQueryOptions{ return &DNSQueryOptions{

View File

@@ -0,0 +1,122 @@
package adapter
import (
"context"
"net/netip"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
E "github.com/sagernet/sing/common/exceptions"
"github.com/sagernet/sing/common/logger"
"github.com/sagernet/sing/service"
"github.com/miekg/dns"
)
type DNSRouter interface {
Lifecycle
Exchange(ctx context.Context, message *dns.Msg, options DNSQueryOptions) (*dns.Msg, error)
Lookup(ctx context.Context, domain string, options DNSQueryOptions) ([]netip.Addr, error)
ClearCache()
LookupReverseMapping(ip netip.Addr) (string, bool)
ResetNetwork()
}
type DNSClient interface {
Start()
Exchange(ctx context.Context, transport DNSTransport, message *dns.Msg, options DNSQueryOptions, responseChecker func(responseAddrs []netip.Addr) bool) (*dns.Msg, error)
Lookup(ctx context.Context, transport DNSTransport, domain string, options DNSQueryOptions, responseChecker func(responseAddrs []netip.Addr) bool) ([]netip.Addr, error)
ClearCache()
}
type DNSQueryOptions struct {
Transport DNSTransport
Strategy C.DomainStrategy
LookupStrategy C.DomainStrategy
DisableCache bool
RewriteTTL *uint32
ClientSubnet netip.Prefix
}
func LookupDNSTransport(manager DNSTransportManager, reference string) (DNSTransport, bool, bool) {
transport, loaded := manager.Transport(reference)
if loaded {
return transport, true, false
}
switch reference {
case C.DNSTypeLocal, C.DNSTypeFakeIP:
default:
return nil, false, false
}
var matchedTransport DNSTransport
for _, transport := range manager.Transports() {
if transport.Type() != reference {
continue
}
if matchedTransport != nil {
return nil, false, true
}
matchedTransport = transport
}
if matchedTransport != nil {
return matchedTransport, true, false
}
return nil, false, false
}
func DNSQueryOptionsFrom(ctx context.Context, options *option.DomainResolveOptions) (*DNSQueryOptions, error) {
if options == nil {
return &DNSQueryOptions{}, nil
}
transportManager := service.FromContext[DNSTransportManager](ctx)
transport, loaded, ambiguous := LookupDNSTransport(transportManager, options.Server)
if !loaded {
if ambiguous {
return nil, E.New("domain resolver is ambiguous: " + options.Server)
}
return nil, E.New("domain resolver not found: " + options.Server)
}
return &DNSQueryOptions{
Transport: transport,
Strategy: C.DomainStrategy(options.Strategy),
DisableCache: options.DisableCache,
RewriteTTL: options.RewriteTTL,
ClientSubnet: options.ClientSubnet.Build(netip.Prefix{}),
}, nil
}
type RDRCStore interface {
LoadRDRC(transportName string, qName string, qType uint16) (rejected bool)
SaveRDRC(transportName string, qName string, qType uint16) error
SaveRDRCAsync(transportName string, qName string, qType uint16, logger logger.Logger)
}
type DNSTransport interface {
Lifecycle
Type() string
Tag() string
Dependencies() []string
Reset()
Exchange(ctx context.Context, message *dns.Msg) (*dns.Msg, error)
}
type LegacyDNSTransport interface {
LegacyStrategy() C.DomainStrategy
LegacyClientSubnet() netip.Prefix
}
type DNSTransportRegistry interface {
option.DNSTransportOptionsRegistry
CreateDNSTransport(ctx context.Context, logger log.ContextLogger, tag string, transportType string, options any) (DNSTransport, error)
}
type DNSTransportManager interface {
Lifecycle
Transports() []DNSTransport
Transport(tag string) (DNSTransport, bool)
Default() DNSTransport
FakeIP() FakeIPTransport
Remove(tag string) error
Create(ctx context.Context, logger log.ContextLogger, tag string, outboundType string, options any) error
}

137
adapter/dns_test.go Normal file
View File

@@ -0,0 +1,137 @@
package adapter
import (
"context"
"testing"
"github.com/sagernet/sing-box/log"
"github.com/stretchr/testify/require"
mDNS "github.com/miekg/dns"
)
type testDNSTransport struct {
transportType string
tag string
}
func (t *testDNSTransport) Start(stage StartStage) error {
return nil
}
func (t *testDNSTransport) Close() error {
return nil
}
func (t *testDNSTransport) Type() string {
return t.transportType
}
func (t *testDNSTransport) Tag() string {
return t.tag
}
func (t *testDNSTransport) Dependencies() []string {
return nil
}
func (t *testDNSTransport) Reset() {
}
func (t *testDNSTransport) Exchange(ctx context.Context, message *mDNS.Msg) (*mDNS.Msg, error) {
return nil, nil
}
type testDNSTransportManager struct {
transports []DNSTransport
transportByTag map[string]DNSTransport
fakeIPTransport FakeIPTransport
}
func newTestDNSTransportManager(transports ...DNSTransport) *testDNSTransportManager {
manager := &testDNSTransportManager{
transports: transports,
transportByTag: make(map[string]DNSTransport),
}
for _, transport := range transports {
manager.transportByTag[transport.Tag()] = transport
}
return manager
}
func (m *testDNSTransportManager) Start(stage StartStage) error {
return nil
}
func (m *testDNSTransportManager) Close() error {
return nil
}
func (m *testDNSTransportManager) Transports() []DNSTransport {
return m.transports
}
func (m *testDNSTransportManager) Transport(tag string) (DNSTransport, bool) {
transport, loaded := m.transportByTag[tag]
return transport, loaded
}
func (m *testDNSTransportManager) Default() DNSTransport {
return nil
}
func (m *testDNSTransportManager) FakeIP() FakeIPTransport {
return m.fakeIPTransport
}
func (m *testDNSTransportManager) Remove(tag string) error {
return nil
}
func (m *testDNSTransportManager) Create(ctx context.Context, logger log.ContextLogger, tag string, outboundType string, options any) error {
return nil
}
func TestLookupDNSTransportLocalAlias(t *testing.T) {
t.Parallel()
localTransport := &testDNSTransport{
transportType: "local",
tag: "dns-local",
}
manager := newTestDNSTransportManager(localTransport)
transport, loaded, ambiguous := LookupDNSTransport(manager, "local")
require.True(t, loaded)
require.False(t, ambiguous)
require.Same(t, localTransport, transport)
}
func TestLookupDNSTransportExactTagPreferred(t *testing.T) {
t.Parallel()
localTransport := &testDNSTransport{
transportType: "local",
tag: "local",
}
manager := newTestDNSTransportManager(localTransport)
transport, loaded, ambiguous := LookupDNSTransport(manager, "local")
require.True(t, loaded)
require.False(t, ambiguous)
require.Same(t, localTransport, transport)
}
func TestLookupDNSTransportLocalAliasAmbiguous(t *testing.T) {
t.Parallel()
manager := newTestDNSTransportManager(
&testDNSTransport{transportType: "local", tag: "dns-local-a"},
&testDNSTransport{transportType: "local", tag: "dns-local-b"},
)
transport, loaded, ambiguous := LookupDNSTransport(manager, "local")
require.Nil(t, transport)
require.False(t, loaded)
require.True(t, ambiguous)
}

View File

@@ -15,7 +15,8 @@ NC='\033[0m'
CONFIG_DIR="/etc/sing-box" CONFIG_DIR="/etc/sing-box"
CONFIG_MERGE_DIR="$CONFIG_DIR/config.d" CONFIG_MERGE_DIR="$CONFIG_DIR/config.d"
CONFIG_BASE_FILE="$CONFIG_MERGE_DIR/10-base.json" CONFIG_BASE_FILE="$CONFIG_MERGE_DIR/10-base.json"
CONFIG_OUTBOUNDS_FILE="$CONFIG_MERGE_DIR/20-outbounds.json" CONFIG_ROUTE_FILE="$CONFIG_MERGE_DIR/route.json"
CONFIG_OUTBOUNDS_FILE="$CONFIG_MERGE_DIR/outbound.json"
WORK_DIR="/var/lib/sing-box" WORK_DIR="/var/lib/sing-box"
BINARY_PATH="/usr/local/bin/sing-box" BINARY_PATH="/usr/local/bin/sing-box"
SERVICE_NAME="singbox" SERVICE_NAME="singbox"
@@ -332,7 +333,12 @@ ${DNS_SERVER_JSON}
"services": [ "services": [
${SERVICE_JSON} ${SERVICE_JSON}
], ],
"inbounds": [], "inbounds": []
}
EOF
cat > "$CONFIG_ROUTE_FILE" <<EOF
{
"route": { "route": {
"rules": [ "rules": [
{ {
@@ -357,6 +363,7 @@ cat > "$CONFIG_OUTBOUNDS_FILE" <<EOF
EOF EOF
echo -e "${GREEN}Base configuration written to $CONFIG_BASE_FILE${NC}" echo -e "${GREEN}Base configuration written to $CONFIG_BASE_FILE${NC}"
echo -e "${GREEN}Route configuration written to $CONFIG_ROUTE_FILE${NC}"
echo -e "${GREEN}Outbound configuration written to $CONFIG_OUTBOUNDS_FILE${NC}" echo -e "${GREEN}Outbound configuration written to $CONFIG_OUTBOUNDS_FILE${NC}"
echo -e "${YELLOW}Edit $CONFIG_OUTBOUNDS_FILE when adding custom sing-box outbounds.${NC}" echo -e "${YELLOW}Edit $CONFIG_OUTBOUNDS_FILE when adding custom sing-box outbounds.${NC}"

View File

@@ -37,14 +37,5 @@
] ]
} }
], ],
"inbounds": [], "inbounds": []
"route": {
"rules": [
{
"protocol": "dns",
"action": "hijack-dns"
}
],
"auto_detect_interface": true
}
} }

View File

@@ -29,14 +29,5 @@
"node_id": 286 "node_id": 286
} }
], ],
"inbounds": [], "inbounds": []
"route": {
"rules": [
{
"protocol": "dns",
"action": "hijack-dns"
}
],
"auto_detect_interface": true
}
} }

12
configs/outbound.json Normal file
View File

@@ -0,0 +1,12 @@
{
"outbounds": [
{
"type": "direct",
"tag": "direct"
},
{
"type": "block",
"tag": "block"
}
]
}

11
configs/route.json Normal file
View File

@@ -0,0 +1,11 @@
{
"route": {
"rules": [
{
"protocol": "dns",
"action": "hijack-dns"
}
],
"auto_detect_interface": true
}
}

View File

@@ -145,9 +145,13 @@ func (r *Router) matchDNS(ctx context.Context, allowFakeIP bool, ruleIndex int,
} }
switch action := currentRule.Action().(type) { switch action := currentRule.Action().(type) {
case *R.RuleActionDNSRoute: case *R.RuleActionDNSRoute:
transport, loaded := r.transport.Transport(action.Server) transport, loaded, ambiguous := adapter.LookupDNSTransport(r.transport, action.Server)
if !loaded { if !loaded {
r.logger.ErrorContext(ctx, "transport not found: ", action.Server) if ambiguous {
r.logger.ErrorContext(ctx, "transport is ambiguous: ", action.Server)
} else {
r.logger.ErrorContext(ctx, "transport not found: ", action.Server)
}
continue continue
} }
isFakeIP := transport.Type() == C.DNSTypeFakeIP isFakeIP := transport.Type() == C.DNSTypeFakeIP

View File

@@ -0,0 +1,464 @@
package dns
import (
"context"
"errors"
"net/netip"
"strings"
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/taskmonitor"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/log"
"github.com/sagernet/sing-box/option"
R "github.com/sagernet/sing-box/route/rule"
"github.com/sagernet/sing-tun"
"github.com/sagernet/sing/common"
E "github.com/sagernet/sing/common/exceptions"
F "github.com/sagernet/sing/common/format"
"github.com/sagernet/sing/common/logger"
M "github.com/sagernet/sing/common/metadata"
"github.com/sagernet/sing/contrab/freelru"
"github.com/sagernet/sing/contrab/maphash"
"github.com/sagernet/sing/service"
mDNS "github.com/miekg/dns"
)
var _ adapter.DNSRouter = (*Router)(nil)
type Router struct {
ctx context.Context
logger logger.ContextLogger
transport adapter.DNSTransportManager
outbound adapter.OutboundManager
client adapter.DNSClient
rules []adapter.DNSRule
defaultDomainStrategy C.DomainStrategy
dnsReverseMapping freelru.Cache[netip.Addr, string]
platformInterface adapter.PlatformInterface
}
func NewRouter(ctx context.Context, logFactory log.Factory, options option.DNSOptions) *Router {
router := &Router{
ctx: ctx,
logger: logFactory.NewLogger("dns"),
transport: service.FromContext[adapter.DNSTransportManager](ctx),
outbound: service.FromContext[adapter.OutboundManager](ctx),
rules: make([]adapter.DNSRule, 0, len(options.Rules)),
defaultDomainStrategy: C.DomainStrategy(options.Strategy),
}
router.client = NewClient(ClientOptions{
DisableCache: options.DNSClientOptions.DisableCache,
DisableExpire: options.DNSClientOptions.DisableExpire,
IndependentCache: options.DNSClientOptions.IndependentCache,
CacheCapacity: options.DNSClientOptions.CacheCapacity,
ClientSubnet: options.DNSClientOptions.ClientSubnet.Build(netip.Prefix{}),
RDRC: func() adapter.RDRCStore {
cacheFile := service.FromContext[adapter.CacheFile](ctx)
if cacheFile == nil {
return nil
}
if !cacheFile.StoreRDRC() {
return nil
}
return cacheFile
},
Logger: router.logger,
})
if options.ReverseMapping {
router.dnsReverseMapping = common.Must1(freelru.NewSharded[netip.Addr, string](1024, maphash.NewHasher[netip.Addr]().Hash32))
}
return router
}
func (r *Router) Initialize(rules []option.DNSRule) error {
for i, ruleOptions := range rules {
dnsRule, err := R.NewDNSRule(r.ctx, r.logger, ruleOptions, true)
if err != nil {
return E.Cause(err, "parse dns rule[", i, "]")
}
r.rules = append(r.rules, dnsRule)
}
return nil
}
func (r *Router) Start(stage adapter.StartStage) error {
monitor := taskmonitor.New(r.logger, C.StartTimeout)
switch stage {
case adapter.StartStateStart:
monitor.Start("initialize DNS client")
r.client.Start()
monitor.Finish()
for i, rule := range r.rules {
monitor.Start("initialize DNS rule[", i, "]")
err := rule.Start()
monitor.Finish()
if err != nil {
return E.Cause(err, "initialize DNS rule[", i, "]")
}
}
}
return nil
}
func (r *Router) Close() error {
monitor := taskmonitor.New(r.logger, C.StopTimeout)
var err error
for i, rule := range r.rules {
monitor.Start("close dns rule[", i, "]")
err = E.Append(err, rule.Close(), func(err error) error {
return E.Cause(err, "close dns rule[", i, "]")
})
monitor.Finish()
}
return err
}
func (r *Router) matchDNS(ctx context.Context, allowFakeIP bool, ruleIndex int, isAddressQuery bool, options *adapter.DNSQueryOptions) (adapter.DNSTransport, adapter.DNSRule, int) {
metadata := adapter.ContextFrom(ctx)
if metadata == nil {
panic("no context")
}
var currentRuleIndex int
if ruleIndex != -1 {
currentRuleIndex = ruleIndex + 1
}
for ; currentRuleIndex < len(r.rules); currentRuleIndex++ {
currentRule := r.rules[currentRuleIndex]
if currentRule.WithAddressLimit() && !isAddressQuery {
continue
}
metadata.ResetRuleCache()
if currentRule.Match(metadata) {
displayRuleIndex := currentRuleIndex
if displayRuleIndex != -1 {
displayRuleIndex += displayRuleIndex + 1
}
ruleDescription := currentRule.String()
if ruleDescription != "" {
r.logger.DebugContext(ctx, "match[", displayRuleIndex, "] ", currentRule, " => ", currentRule.Action())
} else {
r.logger.DebugContext(ctx, "match[", displayRuleIndex, "] => ", currentRule.Action())
}
switch action := currentRule.Action().(type) {
case *R.RuleActionDNSRoute:
transport, loaded, ambiguous := adapter.LookupDNSTransport(r.transport, action.Server)
if !loaded {
if ambiguous {
r.logger.ErrorContext(ctx, "transport is ambiguous: ", action.Server)
} else {
r.logger.ErrorContext(ctx, "transport not found: ", action.Server)
}
continue
}
isFakeIP := transport.Type() == C.DNSTypeFakeIP
if isFakeIP && !allowFakeIP {
continue
}
if action.Strategy != C.DomainStrategyAsIS {
options.Strategy = action.Strategy
}
if isFakeIP || action.DisableCache {
options.DisableCache = true
}
if action.RewriteTTL != nil {
options.RewriteTTL = action.RewriteTTL
}
if action.ClientSubnet.IsValid() {
options.ClientSubnet = action.ClientSubnet
}
if legacyTransport, isLegacy := transport.(adapter.LegacyDNSTransport); isLegacy {
if options.Strategy == C.DomainStrategyAsIS {
options.Strategy = legacyTransport.LegacyStrategy()
}
if !options.ClientSubnet.IsValid() {
options.ClientSubnet = legacyTransport.LegacyClientSubnet()
}
}
return transport, currentRule, currentRuleIndex
case *R.RuleActionDNSRouteOptions:
if action.Strategy != C.DomainStrategyAsIS {
options.Strategy = action.Strategy
}
if action.DisableCache {
options.DisableCache = true
}
if action.RewriteTTL != nil {
options.RewriteTTL = action.RewriteTTL
}
if action.ClientSubnet.IsValid() {
options.ClientSubnet = action.ClientSubnet
}
case *R.RuleActionReject:
return nil, currentRule, currentRuleIndex
case *R.RuleActionPredefined:
return nil, currentRule, currentRuleIndex
}
}
}
transport := r.transport.Default()
if legacyTransport, isLegacy := transport.(adapter.LegacyDNSTransport); isLegacy {
if options.Strategy == C.DomainStrategyAsIS {
options.Strategy = legacyTransport.LegacyStrategy()
}
if !options.ClientSubnet.IsValid() {
options.ClientSubnet = legacyTransport.LegacyClientSubnet()
}
}
return transport, nil, -1
}
func (r *Router) Exchange(ctx context.Context, message *mDNS.Msg, options adapter.DNSQueryOptions) (*mDNS.Msg, error) {
if len(message.Question) != 1 {
r.logger.WarnContext(ctx, "bad question size: ", len(message.Question))
responseMessage := mDNS.Msg{
MsgHdr: mDNS.MsgHdr{
Id: message.Id,
Response: true,
Rcode: mDNS.RcodeFormatError,
},
Question: message.Question,
}
return &responseMessage, nil
}
r.logger.DebugContext(ctx, "exchange ", FormatQuestion(message.Question[0].String()))
var (
response *mDNS.Msg
transport adapter.DNSTransport
err error
)
var metadata *adapter.InboundContext
ctx, metadata = adapter.ExtendContext(ctx)
metadata.Destination = M.Socksaddr{}
metadata.QueryType = message.Question[0].Qtype
switch metadata.QueryType {
case mDNS.TypeA:
metadata.IPVersion = 4
case mDNS.TypeAAAA:
metadata.IPVersion = 6
}
metadata.Domain = FqdnToDomain(message.Question[0].Name)
if options.Transport != nil {
transport = options.Transport
if legacyTransport, isLegacy := transport.(adapter.LegacyDNSTransport); isLegacy {
if options.Strategy == C.DomainStrategyAsIS {
options.Strategy = legacyTransport.LegacyStrategy()
}
if !options.ClientSubnet.IsValid() {
options.ClientSubnet = legacyTransport.LegacyClientSubnet()
}
}
if options.Strategy == C.DomainStrategyAsIS {
options.Strategy = r.defaultDomainStrategy
}
response, err = r.client.Exchange(ctx, transport, message, options, nil)
} else {
var (
rule adapter.DNSRule
ruleIndex int
)
ruleIndex = -1
for {
dnsCtx := adapter.OverrideContext(ctx)
dnsOptions := options
transport, rule, ruleIndex = r.matchDNS(ctx, true, ruleIndex, isAddressQuery(message), &dnsOptions)
if rule != nil {
switch action := rule.Action().(type) {
case *R.RuleActionReject:
switch action.Method {
case C.RuleActionRejectMethodDefault:
return &mDNS.Msg{
MsgHdr: mDNS.MsgHdr{
Id: message.Id,
Rcode: mDNS.RcodeRefused,
Response: true,
},
Question: []mDNS.Question{message.Question[0]},
}, nil
case C.RuleActionRejectMethodDrop:
return nil, tun.ErrDrop
}
case *R.RuleActionPredefined:
return action.Response(message), nil
}
}
responseCheck := addressLimitResponseCheck(rule, metadata)
if dnsOptions.Strategy == C.DomainStrategyAsIS {
dnsOptions.Strategy = r.defaultDomainStrategy
}
response, err = r.client.Exchange(dnsCtx, transport, message, dnsOptions, responseCheck)
var rejected bool
if err != nil {
if errors.Is(err, ErrResponseRejectedCached) {
rejected = true
r.logger.DebugContext(ctx, E.Cause(err, "response rejected for ", FormatQuestion(message.Question[0].String())), " (cached)")
} else if errors.Is(err, ErrResponseRejected) {
rejected = true
r.logger.DebugContext(ctx, E.Cause(err, "response rejected for ", FormatQuestion(message.Question[0].String())))
} else if len(message.Question) > 0 {
r.logger.ErrorContext(ctx, E.Cause(err, "exchange failed for ", FormatQuestion(message.Question[0].String())))
} else {
r.logger.ErrorContext(ctx, E.Cause(err, "exchange failed for <empty query>"))
}
}
if responseCheck != nil && rejected {
continue
}
break
}
}
if err != nil {
return nil, err
}
if r.dnsReverseMapping != nil && len(message.Question) > 0 && response != nil && len(response.Answer) > 0 {
if transport == nil || transport.Type() != C.DNSTypeFakeIP {
for _, answer := range response.Answer {
switch record := answer.(type) {
case *mDNS.A:
r.dnsReverseMapping.AddWithLifetime(M.AddrFromIP(record.A), FqdnToDomain(record.Hdr.Name), time.Duration(record.Hdr.Ttl)*time.Second)
case *mDNS.AAAA:
r.dnsReverseMapping.AddWithLifetime(M.AddrFromIP(record.AAAA), FqdnToDomain(record.Hdr.Name), time.Duration(record.Hdr.Ttl)*time.Second)
}
}
}
}
return response, nil
}
func (r *Router) Lookup(ctx context.Context, domain string, options adapter.DNSQueryOptions) ([]netip.Addr, error) {
var (
responseAddrs []netip.Addr
err error
)
printResult := func() {
if err == nil && len(responseAddrs) == 0 {
err = E.New("empty result")
}
if err != nil {
if errors.Is(err, ErrResponseRejectedCached) {
r.logger.DebugContext(ctx, "response rejected for ", domain, " (cached)")
} else if errors.Is(err, ErrResponseRejected) {
r.logger.DebugContext(ctx, "response rejected for ", domain)
} else {
r.logger.ErrorContext(ctx, E.Cause(err, "lookup failed for ", domain))
}
}
if err != nil {
err = E.Cause(err, "lookup ", domain)
}
}
r.logger.DebugContext(ctx, "lookup domain ", domain)
ctx, metadata := adapter.ExtendContext(ctx)
metadata.Destination = M.Socksaddr{}
metadata.Domain = FqdnToDomain(domain)
if options.Transport != nil {
transport := options.Transport
if legacyTransport, isLegacy := transport.(adapter.LegacyDNSTransport); isLegacy {
if options.Strategy == C.DomainStrategyAsIS {
options.Strategy = legacyTransport.LegacyStrategy()
}
if !options.ClientSubnet.IsValid() {
options.ClientSubnet = legacyTransport.LegacyClientSubnet()
}
}
if options.Strategy == C.DomainStrategyAsIS {
options.Strategy = r.defaultDomainStrategy
}
responseAddrs, err = r.client.Lookup(ctx, transport, domain, options, nil)
} else {
var (
transport adapter.DNSTransport
rule adapter.DNSRule
ruleIndex int
)
ruleIndex = -1
for {
dnsCtx := adapter.OverrideContext(ctx)
dnsOptions := options
transport, rule, ruleIndex = r.matchDNS(ctx, false, ruleIndex, true, &dnsOptions)
if rule != nil {
switch action := rule.Action().(type) {
case *R.RuleActionReject:
return nil, &R.RejectedError{Cause: action.Error(ctx)}
case *R.RuleActionPredefined:
responseAddrs = nil
if action.Rcode != mDNS.RcodeSuccess {
err = RcodeError(action.Rcode)
} else {
err = nil
for _, answer := range action.Answer {
switch record := answer.(type) {
case *mDNS.A:
responseAddrs = append(responseAddrs, M.AddrFromIP(record.A))
case *mDNS.AAAA:
responseAddrs = append(responseAddrs, M.AddrFromIP(record.AAAA))
}
}
}
goto response
}
}
responseCheck := addressLimitResponseCheck(rule, metadata)
if dnsOptions.Strategy == C.DomainStrategyAsIS {
dnsOptions.Strategy = r.defaultDomainStrategy
}
responseAddrs, err = r.client.Lookup(dnsCtx, transport, domain, dnsOptions, responseCheck)
if responseCheck == nil || err == nil {
break
}
printResult()
}
}
response:
printResult()
if len(responseAddrs) > 0 {
r.logger.InfoContext(ctx, "lookup succeed for ", domain, ": ", strings.Join(F.MapToString(responseAddrs), " "))
}
return responseAddrs, err
}
func isAddressQuery(message *mDNS.Msg) bool {
for _, question := range message.Question {
if question.Qtype == mDNS.TypeA || question.Qtype == mDNS.TypeAAAA || question.Qtype == mDNS.TypeHTTPS {
return true
}
}
return false
}
func addressLimitResponseCheck(rule adapter.DNSRule, metadata *adapter.InboundContext) func(responseAddrs []netip.Addr) bool {
if rule == nil || !rule.WithAddressLimit() {
return nil
}
responseMetadata := *metadata
return func(responseAddrs []netip.Addr) bool {
checkMetadata := responseMetadata
checkMetadata.DestinationAddresses = responseAddrs
return rule.MatchAddressLimit(&checkMetadata)
}
}
func (r *Router) ClearCache() {
r.client.ClearCache()
if r.platformInterface != nil {
r.platformInterface.ClearDNSCache()
}
}
func (r *Router) LookupReverseMapping(ip netip.Addr) (string, bool) {
if r.dnsReverseMapping == nil {
return "", false
}
domain, loaded := r.dnsReverseMapping.Get(ip)
return domain, loaded
}
func (r *Router) ResetNetwork() {
r.ClearCache()
for _, transport := range r.transport.Transports() {
transport.Reset()
}
}

View File

@@ -33,8 +33,11 @@ func NewRemoteDialer(ctx context.Context, options option.RemoteDNSServerOptions)
transportDialer := dialer.NewDefaultOutbound(ctx) transportDialer := dialer.NewDefaultOutbound(ctx)
if options.LegacyAddressResolver != "" { if options.LegacyAddressResolver != "" {
transport := service.FromContext[adapter.DNSTransportManager](ctx) transport := service.FromContext[adapter.DNSTransportManager](ctx)
resolverTransport, loaded := transport.Transport(options.LegacyAddressResolver) resolverTransport, loaded, ambiguous := adapter.LookupDNSTransport(transport, options.LegacyAddressResolver)
if !loaded { if !loaded {
if ambiguous {
return nil, E.New("address resolver is ambiguous: ", options.LegacyAddressResolver)
}
return nil, E.New("address resolver not found: ", options.LegacyAddressResolver) return nil, E.New("address resolver not found: ", options.LegacyAddressResolver)
} }
transportDialer = newTransportDialer(transportDialer, service.FromContext[adapter.DNSRouter](ctx), resolverTransport, C.DomainStrategy(options.LegacyAddressStrategy), time.Duration(options.LegacyAddressFallbackDelay)) transportDialer = newTransportDialer(transportDialer, service.FromContext[adapter.DNSRouter](ctx), resolverTransport, C.DomainStrategy(options.LegacyAddressStrategy), time.Duration(options.LegacyAddressFallbackDelay))

View File

@@ -0,0 +1,108 @@
package dns
import (
"context"
"net"
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/dialer"
C "github.com/sagernet/sing-box/constant"
"github.com/sagernet/sing-box/option"
E "github.com/sagernet/sing/common/exceptions"
M "github.com/sagernet/sing/common/metadata"
N "github.com/sagernet/sing/common/network"
"github.com/sagernet/sing/service"
)
func NewLocalDialer(ctx context.Context, options option.LocalDNSServerOptions) (N.Dialer, error) {
if options.LegacyDefaultDialer {
return dialer.NewDefaultOutbound(ctx), nil
} else {
return dialer.NewWithOptions(dialer.Options{
Context: ctx,
Options: options.DialerOptions,
DirectResolver: true,
LegacyDNSDialer: options.Legacy,
})
}
}
func NewRemoteDialer(ctx context.Context, options option.RemoteDNSServerOptions) (N.Dialer, error) {
if options.LegacyDefaultDialer {
transportDialer := dialer.NewDefaultOutbound(ctx)
if options.LegacyAddressResolver != "" {
transport := service.FromContext[adapter.DNSTransportManager](ctx)
resolverTransport, loaded, ambiguous := adapter.LookupDNSTransport(transport, options.LegacyAddressResolver)
if !loaded {
if ambiguous {
return nil, E.New("address resolver is ambiguous: ", options.LegacyAddressResolver)
}
return nil, E.New("address resolver not found: ", options.LegacyAddressResolver)
}
transportDialer = newTransportDialer(transportDialer, service.FromContext[adapter.DNSRouter](ctx), resolverTransport, C.DomainStrategy(options.LegacyAddressStrategy), time.Duration(options.LegacyAddressFallbackDelay))
} else if options.ServerIsDomain() {
return nil, E.New("missing address resolver for server: ", options.Server)
}
return transportDialer, nil
} else {
return dialer.NewWithOptions(dialer.Options{
Context: ctx,
Options: options.DialerOptions,
RemoteIsDomain: options.ServerIsDomain(),
DirectResolver: true,
LegacyDNSDialer: options.Legacy,
})
}
}
type legacyTransportDialer struct {
dialer N.Dialer
dnsRouter adapter.DNSRouter
transport adapter.DNSTransport
strategy C.DomainStrategy
fallbackDelay time.Duration
}
func newTransportDialer(dialer N.Dialer, dnsRouter adapter.DNSRouter, transport adapter.DNSTransport, strategy C.DomainStrategy, fallbackDelay time.Duration) *legacyTransportDialer {
return &legacyTransportDialer{
dialer,
dnsRouter,
transport,
strategy,
fallbackDelay,
}
}
func (d *legacyTransportDialer) DialContext(ctx context.Context, network string, destination M.Socksaddr) (net.Conn, error) {
if destination.IsIP() {
return d.dialer.DialContext(ctx, network, destination)
}
addresses, err := d.dnsRouter.Lookup(ctx, destination.Fqdn, adapter.DNSQueryOptions{
Transport: d.transport,
Strategy: d.strategy,
})
if err != nil {
return nil, err
}
return N.DialParallel(ctx, d.dialer, network, destination, addresses, d.strategy == C.DomainStrategyPreferIPv6, d.fallbackDelay)
}
func (d *legacyTransportDialer) ListenPacket(ctx context.Context, destination M.Socksaddr) (net.PacketConn, error) {
if destination.IsIP() {
return d.dialer.ListenPacket(ctx, destination)
}
addresses, err := d.dnsRouter.Lookup(ctx, destination.Fqdn, adapter.DNSQueryOptions{
Transport: d.transport,
Strategy: d.strategy,
})
if err != nil {
return nil, err
}
conn, _, err := N.ListenSerial(ctx, d.dialer, destination, addresses)
return conn, err
}
func (d *legacyTransportDialer) Upstream() any {
return d.dialer
}

2
go.sum
View File

@@ -122,6 +122,8 @@ github.com/libdns/cloudflare v0.2.2 h1:XWHv+C1dDcApqazlh08Q6pjytYLgR2a+Y3xrXFu0v
github.com/libdns/cloudflare v0.2.2/go.mod h1:w9uTmRCDlAoafAsTPnn2nJ0XHK/eaUMh86DUk8BWi60= github.com/libdns/cloudflare v0.2.2/go.mod h1:w9uTmRCDlAoafAsTPnn2nJ0XHK/eaUMh86DUk8BWi60=
github.com/libdns/libdns v1.1.1 h1:wPrHrXILoSHKWJKGd0EiAVmiJbFShguILTg9leS/P/U= github.com/libdns/libdns v1.1.1 h1:wPrHrXILoSHKWJKGd0EiAVmiJbFShguILTg9leS/P/U=
github.com/libdns/libdns v1.1.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/libdns/libdns v1.1.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/libdns/tencentcloud v1.4.3 h1:xJHYLL1TdPeOtUr6Bu6dHTd1TU6/VFm7BFc2EAzAlvc=
github.com/libdns/tencentcloud v1.4.3/go.mod h1:Be9gY3tDa12DuAPU79RV9NZIcjY6qg5s7zKPsP26yAM=
github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8=
github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
github.com/mdlayher/netlink v1.9.0 h1:G8+GLq2x3v4D4MVIqDdNUhTUC7TKiCy/6MDkmItfKco= github.com/mdlayher/netlink v1.9.0 h1:G8+GLq2x3v4D4MVIqDdNUhTUC7TKiCy/6MDkmItfKco=

View File

@@ -10,7 +10,9 @@ NC='\033[0m'
CONFIG_DIR="/etc/sing-box" CONFIG_DIR="/etc/sing-box"
CONFIG_MERGE_DIR="$CONFIG_DIR/config.d" CONFIG_MERGE_DIR="$CONFIG_DIR/config.d"
CONFIG_BASE_FILE="$CONFIG_MERGE_DIR/10-base.json" CONFIG_BASE_FILE="$CONFIG_MERGE_DIR/10-base.json"
CONFIG_OUTBOUNDS_FILE="$CONFIG_MERGE_DIR/20-outbounds.json" CONFIG_ROUTE_FILE="$CONFIG_MERGE_DIR/route.json"
CONFIG_OUTBOUNDS_FILE="$CONFIG_MERGE_DIR/outbound.json"
LEGACY_CONFIG_OUTBOUNDS_FILE="$CONFIG_MERGE_DIR/20-outbounds.json"
WORK_DIR="/var/lib/sing-box" WORK_DIR="/var/lib/sing-box"
BINARY_PATH="/usr/local/bin/sing-box" BINARY_PATH="/usr/local/bin/sing-box"
SERVICE_NAME="singbox" SERVICE_NAME="singbox"
@@ -21,8 +23,14 @@ PUBLISHED_SCRIPT_URL="${PUBLISHED_SCRIPT_URL:-https://s3.cloudyun.top/downloads/
V2BX_DETECTED=0 V2BX_DETECTED=0
V2BX_CONFIG_PATH="" V2BX_CONFIG_PATH=""
UNINSTALL_V2BX_DEFAULT="${UNINSTALL_V2BX_DEFAULT:-n}" UNINSTALL_V2BX_DEFAULT="${UNINSTALL_V2BX_DEFAULT:-n}"
SCRIPT_VERSION="${SCRIPT_VERSION:-v1.2.4}" SCRIPT_VERSION="${SCRIPT_VERSION:-v1.3.0}"
declare -a V2BX_IMPORTED_NODE_IDS=() declare -a V2BX_IMPORTED_NODE_IDS=()
declare -a EXISTING_IMPORTED_NODE_IDS=()
declare -a NODE_IDS=()
EXISTING_INSTALL=0
EXISTING_CONFIG_SOURCE=""
PROMPT_FOR_CONFIG=1
CONFIG_BACKUP_DIR=""
echo -e "${GREEN}Welcome to singbox Release Installation Script${NC}" echo -e "${GREEN}Welcome to singbox Release Installation Script${NC}"
echo -e "${GREEN}Script version: ${SCRIPT_VERSION}${NC}" echo -e "${GREEN}Script version: ${SCRIPT_VERSION}${NC}"
@@ -135,6 +143,343 @@ detect_v2bx() {
return 0 return 0
} }
detect_existing_installation() {
if [[ -x "$BINARY_PATH" || -f "$SERVICE_FILE" || -f "$CONFIG_BASE_FILE" || -f "$CONFIG_ROUTE_FILE" || -f "$CONFIG_OUTBOUNDS_FILE" || -f "$LEGACY_CONFIG_OUTBOUNDS_FILE" || -f "$CONFIG_DIR/config.json" ]]; then
EXISTING_INSTALL=1
fi
if [[ -f "$CONFIG_BASE_FILE" ]]; then
EXISTING_CONFIG_SOURCE="$CONFIG_BASE_FILE"
elif [[ -f "$CONFIG_DIR/config.json" ]]; then
EXISTING_CONFIG_SOURCE="$CONFIG_DIR/config.json"
fi
if [[ "$EXISTING_INSTALL" -eq 1 ]]; then
echo -e "${YELLOW}Detected existing sing-box installation. Switching to update flow...${NC}"
if [[ -n "$EXISTING_CONFIG_SOURCE" ]]; then
echo -e "${YELLOW}Existing config source: ${EXISTING_CONFIG_SOURCE}${NC}"
fi
fi
}
load_existing_install_defaults() {
if [[ -z "$EXISTING_CONFIG_SOURCE" || ! -f "$EXISTING_CONFIG_SOURCE" ]]; then
return 1
fi
echo -e "${YELLOW}Loading defaults from existing sing-box config...${NC}"
EXISTING_IMPORTED_NODE_IDS=()
local parsed=""
if command -v python3 >/dev/null 2>&1; then
parsed="$(python3 - "$EXISTING_CONFIG_SOURCE" <<'PY'
import json
import sys
path = sys.argv[1]
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
xboard = {}
for service in data.get("services") or []:
if isinstance(service, dict) and service.get("type") == "xboard":
xboard = service
break
print(f"PANEL_URL={xboard.get('panel_url', '')}")
print(f"PANEL_TOKEN={xboard.get('key', '')}")
node_id = xboard.get("node_id")
if node_id not in (None, ""):
print(f"NODE_ID={node_id}")
for node in xboard.get("nodes") or []:
if isinstance(node, dict):
current_node_id = node.get("node_id")
if current_node_id not in (None, ""):
print(f"NODE_ID={current_node_id}")
servers = ((data.get("dns") or {}).get("servers") or [])
dns_server = servers[0] if servers else {}
if isinstance(dns_server, dict):
print(f"DNS_MODE={dns_server.get('type', '')}")
print(f"DNS_SERVER={dns_server.get('server', '')}")
print(f"DNS_SERVER_PORT={dns_server.get('server_port', '')}")
PY
)"
elif command -v jq >/dev/null 2>&1; then
parsed="$(jq -r '
([.services[]? | select(.type == "xboard")] | .[0] // {}) as $xboard |
"PANEL_URL=" + ($xboard.panel_url // ""),
"PANEL_TOKEN=" + ($xboard.key // ""),
(if ($xboard.node_id // empty) != empty then "NODE_ID=" + ($xboard.node_id | tostring) else empty end),
(($xboard.nodes // [])[]? | "NODE_ID=" + (.node_id | tostring)),
(.dns.servers[0] // {}) as $dns |
"DNS_MODE=" + ($dns.type // ""),
"DNS_SERVER=" + ($dns.server // ""),
"DNS_SERVER_PORT=" + (($dns.server_port // "") | tostring)
' "$EXISTING_CONFIG_SOURCE" 2>/dev/null || true)"
else
echo -e "${YELLOW}Neither python3 nor jq found, unable to auto-load existing config.${NC}"
return 1
fi
if [[ -z "$parsed" ]]; then
return 1
fi
local parsed_line
while IFS= read -r parsed_line; do
parsed_line="$(sanitize_value "$parsed_line")"
case "$parsed_line" in
PANEL_URL=*)
PANEL_URL="$(sanitize_value "${parsed_line#PANEL_URL=}")"
;;
PANEL_TOKEN=*)
PANEL_TOKEN="$(sanitize_value "${parsed_line#PANEL_TOKEN=}")"
;;
NODE_ID=*)
append_unique_node_id_from_existing "${parsed_line#NODE_ID=}"
;;
DNS_MODE=*)
DNS_MODE="$(echo "${parsed_line#DNS_MODE=}" | tr '[:upper:]' '[:lower:]')"
;;
DNS_SERVER=*)
DNS_SERVER="$(sanitize_value "${parsed_line#DNS_SERVER=}")"
;;
DNS_SERVER_PORT=*)
DNS_SERVER_PORT="$(sanitize_value "${parsed_line#DNS_SERVER_PORT=}")"
;;
esac
done <<< "$parsed"
if [[ "${#EXISTING_IMPORTED_NODE_IDS[@]}" -gt 0 ]]; then
NODE_IDS=("${EXISTING_IMPORTED_NODE_IDS[@]}")
NODE_ID="${EXISTING_IMPORTED_NODE_IDS[0]}"
fi
if [[ -n "${DNS_MODE:-}" ]]; then
DNS_MODE="$(sanitize_value "$DNS_MODE")"
fi
if [[ -n "${PANEL_URL:-}" && -n "${PANEL_TOKEN:-}" && "${#NODE_IDS[@]}" -gt 0 && ( "${DNS_MODE:-}" == "local" || ( "${DNS_MODE:-}" == "udp" && -n "${DNS_SERVER:-}" && -n "${DNS_SERVER_PORT:-}" ) ) ]]; then
echo -e "${YELLOW}Loaded existing config: PanelURL=${PANEL_URL}, NodeIDs=$(IFS=,; echo "${NODE_IDS[*]}"), DNS=${DNS_MODE}${NC}"
return 0
fi
echo -e "${YELLOW}Existing config detected, but some fields are incomplete. The installer will ask for the missing values.${NC}"
return 1
}
append_unique_node_id_from_existing() {
local normalized_value
local node_id_part
local existing_node_id
normalized_value="$(normalize_node_id_input "$1")"
if [[ -z "$normalized_value" ]]; then
return 0
fi
read -r -a NORMALIZED_NODE_ID_PARTS <<< "$normalized_value"
for node_id_part in "${NORMALIZED_NODE_ID_PARTS[@]}"; do
if ! [[ "$node_id_part" =~ ^[0-9]+$ ]]; then
continue
fi
for existing_node_id in "${EXISTING_IMPORTED_NODE_IDS[@]}"; do
if [[ "$existing_node_id" == "$node_id_part" ]]; then
node_id_part=""
break
fi
done
if [[ -n "$node_id_part" ]]; then
EXISTING_IMPORTED_NODE_IDS+=("$node_id_part")
fi
done
}
ensure_config_backup_dir() {
if [[ -n "$CONFIG_BACKUP_DIR" ]]; then
return 0
fi
CONFIG_BACKUP_DIR="$CONFIG_DIR/backup/$(date +%Y%m%d-%H%M%S)"
mkdir -p "$CONFIG_BACKUP_DIR"
echo -e "${YELLOW}Backing up existing configuration to ${CONFIG_BACKUP_DIR}${NC}"
}
backup_path_if_exists() {
local path="$1"
if [[ ! -e "$path" ]]; then
return 0
fi
ensure_config_backup_dir
cp -a "$path" "$CONFIG_BACKUP_DIR/"
}
archive_legacy_config_json() {
local legacy_config_path="$CONFIG_DIR/config.json"
if [[ ! -f "$legacy_config_path" ]]; then
return 0
fi
ensure_config_backup_dir
mv "$legacy_config_path" "$CONFIG_BACKUP_DIR/config.json.migrated"
echo -e "${YELLOW}Legacy single-file config has been archived to ${CONFIG_BACKUP_DIR}/config.json.migrated${NC}"
}
extract_legacy_config_sections() {
local source_path="$1"
local route_written=0
local outbound_written=0
if [[ ! -f "$source_path" ]]; then
return 1
fi
if command -v python3 >/dev/null 2>&1; then
local parsed
parsed="$(python3 - "$source_path" "$CONFIG_ROUTE_FILE" "$CONFIG_OUTBOUNDS_FILE" <<'PY'
import json
import sys
source_path, route_path, outbounds_path = sys.argv[1:4]
with open(source_path, "r", encoding="utf-8") as f:
data = json.load(f)
route_written = 0
if isinstance(data.get("route"), dict):
with open(route_path, "w", encoding="utf-8") as f:
json.dump({"route": data["route"]}, f, ensure_ascii=False, indent=2)
f.write("\n")
route_written = 1
outbound_written = 0
if isinstance(data.get("outbounds"), list):
with open(outbounds_path, "w", encoding="utf-8") as f:
json.dump({"outbounds": data["outbounds"]}, f, ensure_ascii=False, indent=2)
f.write("\n")
outbound_written = 1
print(f"ROUTE_WRITTEN={route_written}")
print(f"OUTBOUNDS_WRITTEN={outbound_written}")
PY
)"
while IFS= read -r parsed_line; do
case "$parsed_line" in
ROUTE_WRITTEN=1) route_written=1 ;;
OUTBOUNDS_WRITTEN=1) outbound_written=1 ;;
esac
done <<< "$parsed"
elif command -v jq >/dev/null 2>&1; then
if jq -e '.route | type == "object"' "$source_path" >/dev/null 2>&1; then
jq '{route: .route}' "$source_path" > "$CONFIG_ROUTE_FILE"
route_written=1
fi
if jq -e '.outbounds | type == "array"' "$source_path" >/dev/null 2>&1; then
jq '{outbounds: .outbounds}' "$source_path" > "$CONFIG_OUTBOUNDS_FILE"
outbound_written=1
fi
fi
if [[ "$route_written" -eq 1 || "$outbound_written" -eq 1 ]]; then
echo -e "${YELLOW}Extracted legacy config sections from ${source_path}${NC}"
return 0
fi
return 1
}
write_default_route_config() {
cat > "$CONFIG_ROUTE_FILE" <<EOF
{
"route": {
"rules": [
{
"protocol": "dns",
"action": "hijack-dns"
}
],
"auto_detect_interface": true
}
}
EOF
}
write_default_outbound_config() {
cat > "$CONFIG_OUTBOUNDS_FILE" <<EOF
{
"outbounds": [
{
"type": "direct",
"tag": "direct"
}
]
}
EOF
}
compact_default_outbound_config() {
cat <<'EOF' | tr -d '[:space:]'
{
"outbounds": [
{
"type": "direct",
"tag": "direct"
}
]
}
EOF
}
compact_file_contents() {
local path="$1"
tr -d '[:space:]' < "$path"
}
is_default_outbound_config() {
local path="$1"
if [[ ! -f "$path" ]]; then
return 1
fi
[[ "$(compact_file_contents "$path")" == "$(compact_default_outbound_config)" ]]
}
normalize_outbound_config_layout() {
if [[ -f "$LEGACY_CONFIG_OUTBOUNDS_FILE" && ! -f "$CONFIG_OUTBOUNDS_FILE" ]]; then
mv "$LEGACY_CONFIG_OUTBOUNDS_FILE" "$CONFIG_OUTBOUNDS_FILE"
echo -e "${YELLOW}Migrated legacy outbound config to ${CONFIG_OUTBOUNDS_FILE}${NC}"
return 0
fi
if [[ ! -f "$LEGACY_CONFIG_OUTBOUNDS_FILE" || ! -f "$CONFIG_OUTBOUNDS_FILE" ]]; then
return 0
fi
if [[ "$(compact_file_contents "$LEGACY_CONFIG_OUTBOUNDS_FILE")" == "$(compact_file_contents "$CONFIG_OUTBOUNDS_FILE")" ]]; then
backup_path_if_exists "$LEGACY_CONFIG_OUTBOUNDS_FILE"
rm -f "$LEGACY_CONFIG_OUTBOUNDS_FILE"
echo -e "${YELLOW}Removed duplicate legacy outbound config: ${LEGACY_CONFIG_OUTBOUNDS_FILE}${NC}"
return 0
fi
if is_default_outbound_config "$CONFIG_OUTBOUNDS_FILE"; then
backup_path_if_exists "$CONFIG_OUTBOUNDS_FILE"
rm -f "$CONFIG_OUTBOUNDS_FILE"
mv "$LEGACY_CONFIG_OUTBOUNDS_FILE" "$CONFIG_OUTBOUNDS_FILE"
echo -e "${YELLOW}Replaced installer default outbound config with legacy custom config from ${LEGACY_CONFIG_OUTBOUNDS_FILE}${NC}"
return 0
fi
if is_default_outbound_config "$LEGACY_CONFIG_OUTBOUNDS_FILE"; then
backup_path_if_exists "$LEGACY_CONFIG_OUTBOUNDS_FILE"
rm -f "$LEGACY_CONFIG_OUTBOUNDS_FILE"
echo -e "${YELLOW}Removed legacy default outbound config to avoid duplicate outbound tags.${NC}"
return 0
fi
echo -e "${RED}Both ${CONFIG_OUTBOUNDS_FILE} and ${LEGACY_CONFIG_OUTBOUNDS_FILE} exist and contain different outbound definitions.${NC}"
echo -e "${RED}Please merge them into a single config file before rerunning the installer to avoid duplicate outbound tags.${NC}"
exit 1
}
load_v2bx_defaults() { load_v2bx_defaults() {
if [[ -z "$V2BX_CONFIG_PATH" ]] && ! find_v2bx_config; then if [[ -z "$V2BX_CONFIG_PATH" ]] && ! find_v2bx_config; then
return 1 return 1
@@ -318,6 +663,7 @@ cleanup_legacy_service() {
} }
detect_v2bx detect_v2bx
detect_existing_installation
stop_v2bx_if_present stop_v2bx_if_present
mkdir -p "$CONFIG_DIR" mkdir -p "$CONFIG_DIR"
@@ -334,80 +680,107 @@ load_v2bx_defaults || true
PANEL_URL="$(sanitize_value "${PANEL_URL:-}")" PANEL_URL="$(sanitize_value "${PANEL_URL:-}")"
PANEL_TOKEN="$(sanitize_value "${PANEL_TOKEN:-}")" PANEL_TOKEN="$(sanitize_value "${PANEL_TOKEN:-}")"
NODE_ID="$(sanitize_value "${NODE_ID:-}")" NODE_ID="$(sanitize_value "${NODE_ID:-}")"
DNS_MODE="$(sanitize_value "${DNS_MODE:-}")"
DNS_SERVER="$(sanitize_value "${DNS_SERVER:-}")"
DNS_SERVER_PORT="$(sanitize_value "${DNS_SERVER_PORT:-}")"
ENABLE_PROXY_PROTOCOL_HINT="$(sanitize_value "${ENABLE_PROXY_PROTOCOL_HINT:-n}")" ENABLE_PROXY_PROTOCOL_HINT="$(sanitize_value "${ENABLE_PROXY_PROTOCOL_HINT:-n}")"
if [[ "$EXISTING_INSTALL" -eq 1 ]]; then
if load_existing_install_defaults; then
PROMPT_FOR_CONFIG=0
fi
fi
download_binary download_binary
cleanup_legacy_service cleanup_legacy_service
read -u 3 -p "Enter Panel URL [${PANEL_URL}]: " INPUT_URL if [[ "$PROMPT_FOR_CONFIG" -eq 1 ]]; then
PANEL_URL="$(sanitize_value "${INPUT_URL:-$PANEL_URL}")" read -u 3 -p "Enter Panel URL [${PANEL_URL}]: " INPUT_URL
PANEL_URL="$(sanitize_value "${INPUT_URL:-$PANEL_URL}")"
read -u 3 -p "Enter Panel Token (Node Key) [${PANEL_TOKEN}]: " INPUT_TOKEN read -u 3 -p "Enter Panel Token (Node Key) [${PANEL_TOKEN}]: " INPUT_TOKEN
PANEL_TOKEN="$(sanitize_value "${INPUT_TOKEN:-$PANEL_TOKEN}")" PANEL_TOKEN="$(sanitize_value "${INPUT_TOKEN:-$PANEL_TOKEN}")"
read -u 3 -p "This node is behind an L4 proxy/LB that sends PROXY protocol? [${ENABLE_PROXY_PROTOCOL_HINT:-n}]: " INPUT_PROXY_PROTOCOL read -u 3 -p "This node is behind an L4 proxy/LB that sends PROXY protocol? [${ENABLE_PROXY_PROTOCOL_HINT:-n}]: " INPUT_PROXY_PROTOCOL
ENABLE_PROXY_PROTOCOL_HINT="$(sanitize_value "${INPUT_PROXY_PROTOCOL:-${ENABLE_PROXY_PROTOCOL_HINT:-n}}")" ENABLE_PROXY_PROTOCOL_HINT="$(sanitize_value "${INPUT_PROXY_PROTOCOL:-${ENABLE_PROXY_PROTOCOL_HINT:-n}}")"
declare -a NODE_IDS NODE_IDS=()
i=1
i=1 while true; do
while true; do DEFAULT_NODE_ID=""
DEFAULT_NODE_ID="" if [[ "$i" -le "${#EXISTING_IMPORTED_NODE_IDS[@]}" ]]; then
if [[ "$i" -le "${#V2BX_IMPORTED_NODE_IDS[@]}" ]]; then DEFAULT_NODE_ID="${EXISTING_IMPORTED_NODE_IDS[$((i-1))]}"
DEFAULT_NODE_ID="${V2BX_IMPORTED_NODE_IDS[$((i-1))]}" elif [[ "$i" -le "${#V2BX_IMPORTED_NODE_IDS[@]}" ]]; then
elif [[ "$i" -eq 1 && -n "${NODE_ID:-}" ]]; then DEFAULT_NODE_ID="${V2BX_IMPORTED_NODE_IDS[$((i-1))]}"
DEFAULT_NODE_ID="$NODE_ID" elif [[ "$i" -eq 1 && -n "${NODE_ID:-}" ]]; then
fi DEFAULT_NODE_ID="$NODE_ID"
if [[ -n "$DEFAULT_NODE_ID" ]]; then fi
read -u 3 -p "Enter Node ID for node #$i [${DEFAULT_NODE_ID}] (type N/NO to finish): " INPUT_ID if [[ -n "$DEFAULT_NODE_ID" ]]; then
else read -u 3 -p "Enter Node ID for node #$i [${DEFAULT_NODE_ID}] (type N/NO to finish): " INPUT_ID
read -u 3 -p "Enter Node ID for node #$i (press Enter or type N/NO to finish): " INPUT_ID else
fi read -u 3 -p "Enter Node ID for node #$i (press Enter or type N/NO to finish): " INPUT_ID
CURRENT_NODE_ID="$(sanitize_value "${INPUT_ID:-$DEFAULT_NODE_ID}")" fi
if [[ -z "$DEFAULT_NODE_ID" && -z "$CURRENT_NODE_ID" && "${#NODE_IDS[@]}" -gt 0 ]]; then CURRENT_NODE_ID="$(sanitize_value "${INPUT_ID:-$DEFAULT_NODE_ID}")"
break if [[ -z "$DEFAULT_NODE_ID" && -z "$CURRENT_NODE_ID" && "${#NODE_IDS[@]}" -gt 0 ]]; then
fi break
if [[ "$CURRENT_NODE_ID" =~ ^([nN]|[nN][oO])$ ]]; then fi
if [[ "${#NODE_IDS[@]}" -eq 0 ]]; then if [[ "$CURRENT_NODE_ID" =~ ^([nN]|[nN][oO])$ ]]; then
echo -e "${RED}At least one Node ID is required${NC}" if [[ "${#NODE_IDS[@]}" -eq 0 ]]; then
echo -e "${RED}At least one Node ID is required${NC}"
exit 1
fi
break
fi
CURRENT_NODE_ID="$(normalize_node_id_input "$CURRENT_NODE_ID")"
if [[ -z "$CURRENT_NODE_ID" ]]; then
echo -e "${RED}Node ID is required for node #$i${NC}"
exit 1 exit 1
fi fi
break read -r -a CURRENT_NODE_ID_PARTS <<< "$CURRENT_NODE_ID"
fi if [[ "${#CURRENT_NODE_ID_PARTS[@]}" -eq 0 ]]; then
CURRENT_NODE_ID="$(normalize_node_id_input "$CURRENT_NODE_ID")" echo -e "${RED}Node ID is required for node #$i${NC}"
if [[ -z "$CURRENT_NODE_ID" ]]; then
echo -e "${RED}Node ID is required for node #$i${NC}"
exit 1
fi
read -r -a CURRENT_NODE_ID_PARTS <<< "$CURRENT_NODE_ID"
if [[ "${#CURRENT_NODE_ID_PARTS[@]}" -eq 0 ]]; then
echo -e "${RED}Node ID is required for node #$i${NC}"
exit 1
fi
for CURRENT_NODE_ID_PART in "${CURRENT_NODE_ID_PARTS[@]}"; do
if ! [[ "$CURRENT_NODE_ID_PART" =~ ^[0-9]+$ ]]; then
echo -e "${RED}Node ID must be a positive integer, got: ${CURRENT_NODE_ID_PART}${NC}"
exit 1 exit 1
fi fi
NODE_IDS+=("$CURRENT_NODE_ID_PART") for CURRENT_NODE_ID_PART in "${CURRENT_NODE_ID_PARTS[@]}"; do
if ! [[ "$CURRENT_NODE_ID_PART" =~ ^[0-9]+$ ]]; then
echo -e "${RED}Node ID must be a positive integer, got: ${CURRENT_NODE_ID_PART}${NC}"
exit 1
fi
NODE_IDS+=("$CURRENT_NODE_ID_PART")
done
((i++))
done done
((i++))
done DNS_MODE_DEFAULT=${DNS_MODE:-udp}
read -u 3 -p "Enter DNS mode [${DNS_MODE_DEFAULT}] (udp/local): " INPUT_DNS_MODE
DNS_MODE=$(echo "${INPUT_DNS_MODE:-$DNS_MODE_DEFAULT}" | tr '[:upper:]' '[:lower:]')
case "$DNS_MODE" in
udp)
DNS_SERVER_DEFAULT=${DNS_SERVER:-1.1.1.1}
DNS_SERVER_PORT_DEFAULT=${DNS_SERVER_PORT:-53}
read -u 3 -p "Enter DNS server [${DNS_SERVER_DEFAULT}]: " INPUT_DNS_SERVER
DNS_SERVER="$(sanitize_value "${INPUT_DNS_SERVER:-$DNS_SERVER_DEFAULT}")"
read -u 3 -p "Enter DNS server port [${DNS_SERVER_PORT_DEFAULT}]: " INPUT_DNS_SERVER_PORT
DNS_SERVER_PORT="$(sanitize_value "${INPUT_DNS_SERVER_PORT:-$DNS_SERVER_PORT_DEFAULT}")"
;;
local)
DNS_SERVER=""
DNS_SERVER_PORT=""
;;
*)
echo -e "${RED}Unsupported DNS mode: $DNS_MODE. Supported values: udp, local${NC}"
exit 1
;;
esac
else
echo -e "${YELLOW}Reusing existing install settings for update.${NC}"
fi
NODE_COUNT=${#NODE_IDS[@]} NODE_COUNT=${#NODE_IDS[@]}
DNS_MODE_DEFAULT=${DNS_MODE:-udp} case "${DNS_MODE:-udp}" in
read -u 3 -p "Enter DNS mode [${DNS_MODE_DEFAULT}] (udp/local): " INPUT_DNS_MODE
DNS_MODE=$(echo "${INPUT_DNS_MODE:-$DNS_MODE_DEFAULT}" | tr '[:upper:]' '[:lower:]')
case "$DNS_MODE" in
udp) udp)
DNS_SERVER_DEFAULT=${DNS_SERVER:-1.1.1.1}
DNS_SERVER_PORT_DEFAULT=${DNS_SERVER_PORT:-53}
read -u 3 -p "Enter DNS server [${DNS_SERVER_DEFAULT}]: " INPUT_DNS_SERVER
DNS_SERVER=${INPUT_DNS_SERVER:-$DNS_SERVER_DEFAULT}
read -u 3 -p "Enter DNS server port [${DNS_SERVER_PORT_DEFAULT}]: " INPUT_DNS_SERVER_PORT
DNS_SERVER_PORT=${INPUT_DNS_SERVER_PORT:-$DNS_SERVER_PORT_DEFAULT}
if [[ -z "$DNS_SERVER" ]]; then if [[ -z "$DNS_SERVER" ]]; then
echo -e "${RED}DNS server is required in udp mode${NC}" echo -e "${RED}DNS server is required in udp mode${NC}"
exit 1 exit 1
@@ -487,6 +860,13 @@ fi
SERVICE_JSON+=$'\n }' SERVICE_JSON+=$'\n }'
echo -e "${YELLOW}Generating configuration...${NC}" echo -e "${YELLOW}Generating configuration...${NC}"
backup_path_if_exists "$CONFIG_BASE_FILE"
backup_path_if_exists "$CONFIG_ROUTE_FILE"
backup_path_if_exists "$CONFIG_OUTBOUNDS_FILE"
backup_path_if_exists "$LEGACY_CONFIG_OUTBOUNDS_FILE"
backup_path_if_exists "$CONFIG_DIR/config.json"
backup_path_if_exists "$SERVICE_FILE"
cat > "$CONFIG_BASE_FILE" <<EOF cat > "$CONFIG_BASE_FILE" <<EOF
{ {
"log": { "log": {
@@ -507,32 +887,41 @@ ${DNS_SERVER_JSON}
"services": [ "services": [
${SERVICE_JSON} ${SERVICE_JSON}
], ],
"inbounds": [], "inbounds": []
"route": {
"rules": [
{
"protocol": "dns",
"action": "hijack-dns"
}
],
"auto_detect_interface": true
}
} }
EOF EOF
cat > "$CONFIG_OUTBOUNDS_FILE" <<EOF normalize_outbound_config_layout
{
"outbounds": [ if [[ -f "$CONFIG_DIR/config.json" ]]; then
{ rm -f "$CONFIG_ROUTE_FILE" "$CONFIG_OUTBOUNDS_FILE"
"type": "direct", if ! extract_legacy_config_sections "$CONFIG_DIR/config.json"; then
"tag": "direct" echo -e "${YELLOW}Legacy config detected but route/outbounds could not be extracted automatically. Writing default route/outbound files.${NC}"
} fi
] if [[ ! -f "$CONFIG_ROUTE_FILE" ]]; then
} write_default_route_config
EOF fi
if [[ ! -f "$CONFIG_OUTBOUNDS_FILE" ]]; then
write_default_outbound_config
fi
archive_legacy_config_json
else
if [[ -f "$CONFIG_ROUTE_FILE" ]]; then
echo -e "${YELLOW}Preserving existing route config: ${CONFIG_ROUTE_FILE}${NC}"
else
write_default_route_config
fi
if [[ -f "$CONFIG_OUTBOUNDS_FILE" ]]; then
echo -e "${YELLOW}Preserving existing outbound config: ${CONFIG_OUTBOUNDS_FILE}${NC}"
else
write_default_outbound_config
fi
fi
echo -e "${GREEN}Base configuration written to $CONFIG_BASE_FILE${NC}" echo -e "${GREEN}Base configuration written to $CONFIG_BASE_FILE${NC}"
echo -e "${GREEN}Outbound configuration written to $CONFIG_OUTBOUNDS_FILE${NC}" echo -e "${GREEN}Route configuration ready at $CONFIG_ROUTE_FILE${NC}"
echo -e "${GREEN}Outbound configuration ready at $CONFIG_OUTBOUNDS_FILE${NC}"
echo -e "${YELLOW}Edit $CONFIG_OUTBOUNDS_FILE when adding custom sing-box outbounds.${NC}" echo -e "${YELLOW}Edit $CONFIG_OUTBOUNDS_FILE when adding custom sing-box outbounds.${NC}"
if [[ "$ENABLE_PROXY_PROTOCOL_HINT" =~ ^([yY][eE][sS]|[yY]|1|true|TRUE)$ ]]; then if [[ "$ENABLE_PROXY_PROTOCOL_HINT" =~ ^([yY][eE][sS]|[yY]|1|true|TRUE)$ ]]; then
@@ -570,7 +959,12 @@ systemctl restart "$SERVICE_NAME"
prompt_uninstall_v2bx prompt_uninstall_v2bx
echo -e "${GREEN}Service installed and started successfully.${NC}" if [[ "$EXISTING_INSTALL" -eq 1 ]]; then
echo -e "${GREEN}Service updated and restarted successfully.${NC}"
echo -e "${GREEN}Configuration has been normalized to the split config.d layout.${NC}"
else
echo -e "${GREEN}Service installed and started successfully.${NC}"
fi
echo -e "${GREEN}Check status with: systemctl status ${SERVICE_NAME}${NC}" echo -e "${GREEN}Check status with: systemctl status ${SERVICE_NAME}${NC}"
echo -e "${GREEN}View logs with: journalctl -u ${SERVICE_NAME} -f${NC}" echo -e "${GREEN}View logs with: journalctl -u ${SERVICE_NAME} -f${NC}"
echo -e "${GREEN}Panel config endpoint must control PROXY protocol via accept_proxy_protocol when needed.${NC}" echo -e "${GREEN}Panel config endpoint must control PROXY protocol via accept_proxy_protocol when needed.${NC}"

View File

@@ -792,9 +792,15 @@ func (r *Router) actionResolve(ctx context.Context, metadata *adapter.InboundCon
if metadata.Destination.IsDomain() { if metadata.Destination.IsDomain() {
var transport adapter.DNSTransport var transport adapter.DNSTransport
if action.Server != "" { if action.Server != "" {
var loaded bool var (
transport, loaded = r.dnsTransport.Transport(action.Server) loaded bool
ambiguous bool
)
transport, loaded, ambiguous = adapter.LookupDNSTransport(r.dnsTransport, action.Server)
if !loaded { if !loaded {
if ambiguous {
return E.New("DNS server is ambiguous: ", action.Server)
}
return E.New("DNS server not found: ", action.Server) return E.New("DNS server not found: ", action.Server)
} }
} }

View File

@@ -0,0 +1,821 @@
package route
import (
"context"
"errors"
"net"
"net/netip"
"strings"
"time"
"github.com/sagernet/sing-box/adapter"
"github.com/sagernet/sing-box/common/sniff"
C "github.com/sagernet/sing-box/constant"
R "github.com/sagernet/sing-box/route/rule"
"github.com/sagernet/sing-mux"
"github.com/sagernet/sing-tun"
"github.com/sagernet/sing-tun/ping"
"github.com/sagernet/sing-vmess"
"github.com/sagernet/sing/common"
"github.com/sagernet/sing/common/buf"
"github.com/sagernet/sing/common/bufio"
"github.com/sagernet/sing/common/bufio/deadline"
E "github.com/sagernet/sing/common/exceptions"
F "github.com/sagernet/sing/common/format"
M "github.com/sagernet/sing/common/metadata"
N "github.com/sagernet/sing/common/network"
"github.com/sagernet/sing/common/uot"
"golang.org/x/exp/slices"
)
// Deprecated: use RouteConnectionEx instead.
func (r *Router) RouteConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext) error {
done := make(chan interface{})
err := r.routeConnection(ctx, conn, metadata, N.OnceClose(func(it error) {
close(done)
}))
if err != nil {
return err
}
select {
case <-done:
case <-r.ctx.Done():
}
return nil
}
func (r *Router) RouteConnectionEx(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, onClose N.CloseHandlerFunc) {
err := r.routeConnection(ctx, conn, metadata, onClose)
if err != nil {
N.CloseOnHandshakeFailure(conn, onClose, err)
if E.IsClosedOrCanceled(err) || R.IsRejected(err) {
r.logger.DebugContext(ctx, "connection closed: ", err)
} else {
r.logger.ErrorContext(ctx, err)
}
}
}
func (r *Router) routeConnection(ctx context.Context, conn net.Conn, metadata adapter.InboundContext, onClose N.CloseHandlerFunc) error {
//nolint:staticcheck
if metadata.InboundDetour != "" {
if metadata.LastInbound == metadata.InboundDetour {
return E.New("routing loop on detour: ", metadata.InboundDetour)
}
detour, loaded := r.inbound.Get(metadata.InboundDetour)
if !loaded {
return E.New("inbound detour not found: ", metadata.InboundDetour)
}
injectable, isInjectable := detour.(adapter.TCPInjectableInbound)
if !isInjectable {
return E.New("inbound detour is not TCP injectable: ", metadata.InboundDetour)
}
metadata.LastInbound = metadata.Inbound
metadata.Inbound = metadata.InboundDetour
metadata.InboundDetour = ""
injectable.NewConnectionEx(ctx, conn, metadata, onClose)
return nil
}
metadata.Network = N.NetworkTCP
switch metadata.Destination.Fqdn {
case mux.Destination.Fqdn:
return E.New("global multiplex is deprecated since sing-box v1.7.0, enable multiplex in Inbound fields instead.")
case vmess.MuxDestination.Fqdn:
return E.New("global multiplex (v2ray legacy) not supported since sing-box v1.7.0.")
case uot.MagicAddress:
return E.New("global UoT not supported since sing-box v1.7.0.")
case uot.LegacyMagicAddress:
return E.New("global UoT (legacy) not supported since sing-box v1.7.0.")
}
if deadline.NeedAdditionalReadDeadline(conn) {
conn = deadline.NewConn(conn)
}
selectedRule, _, buffers, _, err := r.matchRule(ctx, &metadata, false, false, conn, nil)
if err != nil {
return err
}
var selectedOutbound adapter.Outbound
if selectedRule != nil {
switch action := selectedRule.Action().(type) {
case *R.RuleActionRoute:
var loaded bool
selectedOutbound, loaded = r.outbound.Outbound(action.Outbound)
if !loaded {
buf.ReleaseMulti(buffers)
return E.New("outbound not found: ", action.Outbound)
}
if !common.Contains(selectedOutbound.Network(), N.NetworkTCP) {
buf.ReleaseMulti(buffers)
return E.New("TCP is not supported by outbound: ", selectedOutbound.Tag())
}
case *R.RuleActionBypass:
if action.Outbound == "" {
break
}
var loaded bool
selectedOutbound, loaded = r.outbound.Outbound(action.Outbound)
if !loaded {
buf.ReleaseMulti(buffers)
return E.New("outbound not found: ", action.Outbound)
}
if !common.Contains(selectedOutbound.Network(), N.NetworkTCP) {
buf.ReleaseMulti(buffers)
return E.New("TCP is not supported by outbound: ", selectedOutbound.Tag())
}
case *R.RuleActionReject:
buf.ReleaseMulti(buffers)
if action.Method == C.RuleActionRejectMethodReply {
return E.New("reject method `reply` is not supported for TCP connections")
}
return action.Error(ctx)
case *R.RuleActionHijackDNS:
for _, buffer := range buffers {
conn = bufio.NewCachedConn(conn, buffer)
}
N.CloseOnHandshakeFailure(conn, onClose, r.hijackDNSStream(ctx, conn, metadata))
return nil
}
}
if selectedRule == nil {
defaultOutbound := r.outbound.Default()
if !common.Contains(defaultOutbound.Network(), N.NetworkTCP) {
buf.ReleaseMulti(buffers)
return E.New("TCP is not supported by default outbound: ", defaultOutbound.Tag())
}
selectedOutbound = defaultOutbound
}
for _, buffer := range buffers {
conn = bufio.NewCachedConn(conn, buffer)
}
for _, tracker := range r.trackers {
conn = tracker.RoutedConnection(ctx, conn, metadata, selectedRule, selectedOutbound)
}
if outboundHandler, isHandler := selectedOutbound.(adapter.ConnectionHandlerEx); isHandler {
outboundHandler.NewConnectionEx(ctx, conn, metadata, onClose)
} else {
r.connection.NewConnection(ctx, selectedOutbound, conn, metadata, onClose)
}
return nil
}
func (r *Router) RoutePacketConnection(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext) error {
done := make(chan interface{})
err := r.routePacketConnection(ctx, conn, metadata, N.OnceClose(func(it error) {
close(done)
}))
if err != nil {
conn.Close()
if E.IsClosedOrCanceled(err) || R.IsRejected(err) {
r.logger.DebugContext(ctx, "connection closed: ", err)
} else {
r.logger.ErrorContext(ctx, err)
}
}
select {
case <-done:
case <-r.ctx.Done():
}
return nil
}
func (r *Router) RoutePacketConnectionEx(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext, onClose N.CloseHandlerFunc) {
err := r.routePacketConnection(ctx, conn, metadata, onClose)
if err != nil {
N.CloseOnHandshakeFailure(conn, onClose, err)
if E.IsClosedOrCanceled(err) || R.IsRejected(err) {
r.logger.DebugContext(ctx, "connection closed: ", err)
} else {
r.logger.ErrorContext(ctx, err)
}
}
}
func (r *Router) routePacketConnection(ctx context.Context, conn N.PacketConn, metadata adapter.InboundContext, onClose N.CloseHandlerFunc) error {
//nolint:staticcheck
if metadata.InboundDetour != "" {
if metadata.LastInbound == metadata.InboundDetour {
return E.New("routing loop on detour: ", metadata.InboundDetour)
}
detour, loaded := r.inbound.Get(metadata.InboundDetour)
if !loaded {
return E.New("inbound detour not found: ", metadata.InboundDetour)
}
injectable, isInjectable := detour.(adapter.UDPInjectableInbound)
if !isInjectable {
return E.New("inbound detour is not UDP injectable: ", metadata.InboundDetour)
}
metadata.LastInbound = metadata.Inbound
metadata.Inbound = metadata.InboundDetour
metadata.InboundDetour = ""
injectable.NewPacketConnectionEx(ctx, conn, metadata, onClose)
return nil
}
// TODO: move to UoT
metadata.Network = N.NetworkUDP
// Currently we don't have deadline usages for UDP connections
/*if deadline.NeedAdditionalReadDeadline(conn) {
conn = deadline.NewPacketConn(bufio.NewNetPacketConn(conn))
}*/
selectedRule, _, _, packetBuffers, err := r.matchRule(ctx, &metadata, false, false, nil, conn)
if err != nil {
return err
}
var selectedOutbound adapter.Outbound
var selectReturn bool
if selectedRule != nil {
switch action := selectedRule.Action().(type) {
case *R.RuleActionRoute:
var loaded bool
selectedOutbound, loaded = r.outbound.Outbound(action.Outbound)
if !loaded {
N.ReleaseMultiPacketBuffer(packetBuffers)
return E.New("outbound not found: ", action.Outbound)
}
if !common.Contains(selectedOutbound.Network(), N.NetworkUDP) {
N.ReleaseMultiPacketBuffer(packetBuffers)
return E.New("UDP is not supported by outbound: ", selectedOutbound.Tag())
}
case *R.RuleActionBypass:
if action.Outbound == "" {
break
}
var loaded bool
selectedOutbound, loaded = r.outbound.Outbound(action.Outbound)
if !loaded {
N.ReleaseMultiPacketBuffer(packetBuffers)
return E.New("outbound not found: ", action.Outbound)
}
if !common.Contains(selectedOutbound.Network(), N.NetworkUDP) {
N.ReleaseMultiPacketBuffer(packetBuffers)
return E.New("UDP is not supported by outbound: ", selectedOutbound.Tag())
}
case *R.RuleActionReject:
N.ReleaseMultiPacketBuffer(packetBuffers)
if action.Method == C.RuleActionRejectMethodReply {
return E.New("reject method `reply` is not supported for UDP connections")
}
return action.Error(ctx)
case *R.RuleActionHijackDNS:
return r.hijackDNSPacket(ctx, conn, packetBuffers, metadata, onClose)
}
}
if selectedRule == nil || selectReturn {
defaultOutbound := r.outbound.Default()
if !common.Contains(defaultOutbound.Network(), N.NetworkUDP) {
N.ReleaseMultiPacketBuffer(packetBuffers)
return E.New("UDP is not supported by outbound: ", defaultOutbound.Tag())
}
selectedOutbound = defaultOutbound
}
for _, buffer := range packetBuffers {
conn = bufio.NewCachedPacketConn(conn, buffer.Buffer, buffer.Destination)
N.PutPacketBuffer(buffer)
}
for _, tracker := range r.trackers {
conn = tracker.RoutedPacketConnection(ctx, conn, metadata, selectedRule, selectedOutbound)
}
if metadata.FakeIP {
conn = bufio.NewNATPacketConn(bufio.NewNetPacketConn(conn), metadata.OriginDestination, metadata.Destination)
}
if outboundHandler, isHandler := selectedOutbound.(adapter.PacketConnectionHandlerEx); isHandler {
outboundHandler.NewPacketConnectionEx(ctx, conn, metadata, onClose)
} else {
r.connection.NewPacketConnection(ctx, selectedOutbound, conn, metadata, onClose)
}
return nil
}
func (r *Router) PreMatch(metadata adapter.InboundContext, routeContext tun.DirectRouteContext, timeout time.Duration, supportBypass bool) (tun.DirectRouteDestination, error) {
selectedRule, _, _, _, err := r.matchRule(r.ctx, &metadata, true, supportBypass, nil, nil)
if err != nil {
return nil, err
}
var directRouteOutbound adapter.DirectRouteOutbound
if selectedRule != nil {
switch action := selectedRule.Action().(type) {
case *R.RuleActionReject:
switch metadata.Network {
case N.NetworkTCP:
if action.Method == C.RuleActionRejectMethodReply {
return nil, E.New("reject method `reply` is not supported for TCP connections")
}
case N.NetworkUDP:
if action.Method == C.RuleActionRejectMethodReply {
return nil, E.New("reject method `reply` is not supported for UDP connections")
}
}
return nil, action.Error(context.Background())
case *R.RuleActionBypass:
if supportBypass {
return nil, &R.BypassedError{Cause: tun.ErrBypass}
}
if routeContext == nil {
return nil, nil
}
outbound, loaded := r.outbound.Outbound(action.Outbound)
if !loaded {
return nil, E.New("outbound not found: ", action.Outbound)
}
if !common.Contains(outbound.Network(), metadata.Network) {
return nil, E.New(metadata.Network, " is not supported by outbound: ", action.Outbound)
}
directRouteOutbound = outbound.(adapter.DirectRouteOutbound)
case *R.RuleActionRoute:
if routeContext == nil {
return nil, nil
}
outbound, loaded := r.outbound.Outbound(action.Outbound)
if !loaded {
return nil, E.New("outbound not found: ", action.Outbound)
}
if !common.Contains(outbound.Network(), metadata.Network) {
return nil, E.New(metadata.Network, " is not supported by outbound: ", action.Outbound)
}
directRouteOutbound = outbound.(adapter.DirectRouteOutbound)
}
}
if directRouteOutbound == nil {
if selectedRule != nil || metadata.Network != N.NetworkICMP {
return nil, nil
}
defaultOutbound := r.outbound.Default()
if !common.Contains(defaultOutbound.Network(), metadata.Network) {
return nil, E.New(metadata.Network, " is not supported by default outbound: ", defaultOutbound.Tag())
}
directRouteOutbound = defaultOutbound.(adapter.DirectRouteOutbound)
}
if metadata.Destination.IsDomain() {
if len(metadata.DestinationAddresses) == 0 {
var strategy C.DomainStrategy
if metadata.Source.IsIPv4() {
strategy = C.DomainStrategyIPv4Only
} else {
strategy = C.DomainStrategyIPv6Only
}
err = r.actionResolve(r.ctx, &metadata, &R.RuleActionResolve{
Strategy: strategy,
})
if err != nil {
return nil, err
}
}
var newDestination netip.Addr
if metadata.Source.IsIPv4() {
for _, address := range metadata.DestinationAddresses {
if address.Is4() {
newDestination = address
break
}
}
} else {
for _, address := range metadata.DestinationAddresses {
if address.Is6() {
newDestination = address
break
}
}
}
if !newDestination.IsValid() {
if metadata.Source.IsIPv4() {
return nil, E.New("no IPv4 address found for domain: ", metadata.Destination.Fqdn)
} else {
return nil, E.New("no IPv6 address found for domain: ", metadata.Destination.Fqdn)
}
}
metadata.Destination = M.Socksaddr{
Addr: newDestination,
}
routeContext = ping.NewContextDestinationWriter(routeContext, metadata.OriginDestination.Addr)
var routeDestination tun.DirectRouteDestination
routeDestination, err = directRouteOutbound.NewDirectRouteConnection(metadata, routeContext, timeout)
if err != nil {
return nil, err
}
return ping.NewDestinationWriter(routeDestination, newDestination), nil
}
return directRouteOutbound.NewDirectRouteConnection(metadata, routeContext, timeout)
}
func (r *Router) matchRule(
ctx context.Context, metadata *adapter.InboundContext, preMatch bool, supportBypass bool,
inputConn net.Conn, inputPacketConn N.PacketConn,
) (
selectedRule adapter.Rule, selectedRuleIndex int,
buffers []*buf.Buffer, packetBuffers []*N.PacketBuffer, fatalErr error,
) {
if r.processSearcher != nil && metadata.ProcessInfo == nil {
var originDestination netip.AddrPort
if metadata.OriginDestination.IsValid() {
originDestination = metadata.OriginDestination.AddrPort()
} else if metadata.Destination.IsIP() {
originDestination = metadata.Destination.AddrPort()
}
processInfo, fErr := r.findProcessInfoCached(ctx, metadata.Network, metadata.Source.AddrPort(), originDestination)
if fErr != nil {
r.logger.InfoContext(ctx, "failed to search process: ", fErr)
} else {
if processInfo.ProcessPath != "" {
if processInfo.UserName != "" {
r.logger.InfoContext(ctx, "found process path: ", processInfo.ProcessPath, ", user: ", processInfo.UserName)
} else if processInfo.UserId != -1 {
r.logger.InfoContext(ctx, "found process path: ", processInfo.ProcessPath, ", user id: ", processInfo.UserId)
} else {
r.logger.InfoContext(ctx, "found process path: ", processInfo.ProcessPath)
}
} else if len(processInfo.AndroidPackageNames) > 0 {
r.logger.InfoContext(ctx, "found package name: ", strings.Join(processInfo.AndroidPackageNames, ", "))
} else if processInfo.UserId != -1 {
if processInfo.UserName != "" {
r.logger.InfoContext(ctx, "found user: ", processInfo.UserName)
} else {
r.logger.InfoContext(ctx, "found user id: ", processInfo.UserId)
}
}
metadata.ProcessInfo = processInfo
}
}
if metadata.Destination.Addr.IsValid() && r.dnsTransport.FakeIP() != nil && r.dnsTransport.FakeIP().Store().Contains(metadata.Destination.Addr) {
domain, loaded := r.dnsTransport.FakeIP().Store().Lookup(metadata.Destination.Addr)
if !loaded {
fatalErr = E.New("missing fakeip record, try enable `experimental.cache_file`")
return
}
if domain != "" {
metadata.OriginDestination = metadata.Destination
metadata.Destination = M.Socksaddr{
Fqdn: domain,
Port: metadata.Destination.Port,
}
metadata.FakeIP = true
r.logger.DebugContext(ctx, "found fakeip domain: ", domain)
}
} else if metadata.Domain == "" {
domain, loaded := r.dns.LookupReverseMapping(metadata.Destination.Addr)
if loaded {
metadata.Domain = domain
r.logger.DebugContext(ctx, "found reserve mapped domain: ", metadata.Domain)
}
}
if metadata.Destination.IsIPv4() {
metadata.IPVersion = 4
} else if metadata.Destination.IsIPv6() {
metadata.IPVersion = 6
}
match:
for currentRuleIndex, currentRule := range r.rules {
metadata.ResetRuleCache()
if !currentRule.Match(metadata) {
continue
}
if !preMatch {
ruleDescription := currentRule.String()
if ruleDescription != "" {
r.logger.DebugContext(ctx, "match[", currentRuleIndex, "] ", currentRule, " => ", currentRule.Action())
} else {
r.logger.DebugContext(ctx, "match[", currentRuleIndex, "] => ", currentRule.Action())
}
} else {
switch currentRule.Action().Type() {
case C.RuleActionTypeReject:
ruleDescription := currentRule.String()
if ruleDescription != "" {
r.logger.DebugContext(ctx, "pre-match[", currentRuleIndex, "] ", currentRule, " => ", currentRule.Action())
} else {
r.logger.DebugContext(ctx, "pre-match[", currentRuleIndex, "] => ", currentRule.Action())
}
}
}
var routeOptions *R.RuleActionRouteOptions
switch action := currentRule.Action().(type) {
case *R.RuleActionRoute:
routeOptions = &action.RuleActionRouteOptions
case *R.RuleActionRouteOptions:
routeOptions = action
}
if routeOptions != nil {
// TODO: add nat
if (routeOptions.OverrideAddress.IsValid() || routeOptions.OverridePort > 0) && !metadata.RouteOriginalDestination.IsValid() {
metadata.RouteOriginalDestination = metadata.Destination
}
if routeOptions.OverrideAddress.IsValid() {
metadata.Destination = M.Socksaddr{
Addr: routeOptions.OverrideAddress.Addr,
Port: metadata.Destination.Port,
Fqdn: routeOptions.OverrideAddress.Fqdn,
}
metadata.DestinationAddresses = nil
}
if routeOptions.OverridePort > 0 {
metadata.Destination = M.Socksaddr{
Addr: metadata.Destination.Addr,
Port: routeOptions.OverridePort,
Fqdn: metadata.Destination.Fqdn,
}
}
if routeOptions.NetworkStrategy != nil {
metadata.NetworkStrategy = routeOptions.NetworkStrategy
}
if len(routeOptions.NetworkType) > 0 {
metadata.NetworkType = routeOptions.NetworkType
}
if len(routeOptions.FallbackNetworkType) > 0 {
metadata.FallbackNetworkType = routeOptions.FallbackNetworkType
}
if routeOptions.FallbackDelay != 0 {
metadata.FallbackDelay = routeOptions.FallbackDelay
}
if routeOptions.UDPDisableDomainUnmapping {
metadata.UDPDisableDomainUnmapping = true
}
if routeOptions.UDPConnect {
metadata.UDPConnect = true
}
if routeOptions.UDPTimeout > 0 {
metadata.UDPTimeout = routeOptions.UDPTimeout
}
if routeOptions.TLSFragment {
metadata.TLSFragment = true
metadata.TLSFragmentFallbackDelay = routeOptions.TLSFragmentFallbackDelay
}
if routeOptions.TLSRecordFragment {
metadata.TLSRecordFragment = true
}
}
switch action := currentRule.Action().(type) {
case *R.RuleActionSniff:
if !preMatch {
newBuffer, newPacketBuffers, newErr := r.actionSniff(ctx, metadata, action, inputConn, inputPacketConn, buffers, packetBuffers)
if newBuffer != nil {
buffers = append(buffers, newBuffer)
} else if len(newPacketBuffers) > 0 {
packetBuffers = append(packetBuffers, newPacketBuffers...)
}
if newErr != nil {
fatalErr = newErr
return
}
} else if metadata.Network != N.NetworkICMP {
selectedRule = currentRule
selectedRuleIndex = currentRuleIndex
break match
}
case *R.RuleActionResolve:
fatalErr = r.actionResolve(ctx, metadata, action)
if fatalErr != nil {
return
}
}
actionType := currentRule.Action().Type()
if actionType == C.RuleActionTypeRoute ||
actionType == C.RuleActionTypeReject ||
actionType == C.RuleActionTypeHijackDNS {
selectedRule = currentRule
selectedRuleIndex = currentRuleIndex
break match
}
if actionType == C.RuleActionTypeBypass {
bypassAction := currentRule.Action().(*R.RuleActionBypass)
if !supportBypass && bypassAction.Outbound == "" {
continue match
}
selectedRule = currentRule
selectedRuleIndex = currentRuleIndex
break match
}
}
return
}
func (r *Router) actionSniff(
ctx context.Context, metadata *adapter.InboundContext, action *R.RuleActionSniff,
inputConn net.Conn, inputPacketConn N.PacketConn, inputBuffers []*buf.Buffer, inputPacketBuffers []*N.PacketBuffer,
) (buffer *buf.Buffer, packetBuffers []*N.PacketBuffer, fatalErr error) {
if sniff.Skip(metadata) {
r.logger.DebugContext(ctx, "sniff skipped due to port considered as server-first")
return
} else if metadata.Protocol != "" {
r.logger.DebugContext(ctx, "duplicate sniff skipped")
return
}
if inputConn != nil {
if len(action.StreamSniffers) == 0 && len(action.PacketSniffers) > 0 {
return
} else if slices.Equal(metadata.SnifferNames, action.SnifferNames) && metadata.SniffError != nil && !errors.Is(metadata.SniffError, sniff.ErrNeedMoreData) {
r.logger.DebugContext(ctx, "packet sniff skipped due to previous error: ", metadata.SniffError)
return
}
var streamSniffers []sniff.StreamSniffer
if len(action.StreamSniffers) > 0 {
streamSniffers = action.StreamSniffers
} else {
streamSniffers = []sniff.StreamSniffer{
sniff.TLSClientHello,
sniff.HTTPHost,
sniff.StreamDomainNameQuery,
sniff.BitTorrent,
sniff.SSH,
sniff.RDP,
}
}
sniffBuffer := buf.NewPacket()
err := sniff.PeekStream(
ctx,
metadata,
inputConn,
inputBuffers,
sniffBuffer,
action.Timeout,
streamSniffers...,
)
metadata.SnifferNames = action.SnifferNames
metadata.SniffError = err
if err == nil {
//goland:noinspection GoDeprecation
if action.OverrideDestination && M.IsDomainName(metadata.Domain) {
metadata.Destination = M.Socksaddr{
Fqdn: metadata.Domain,
Port: metadata.Destination.Port,
}
}
if metadata.Domain != "" && metadata.Client != "" {
r.logger.DebugContext(ctx, "sniffed protocol: ", metadata.Protocol, ", domain: ", metadata.Domain, ", client: ", metadata.Client)
} else if metadata.Domain != "" {
r.logger.DebugContext(ctx, "sniffed protocol: ", metadata.Protocol, ", domain: ", metadata.Domain)
} else {
r.logger.DebugContext(ctx, "sniffed protocol: ", metadata.Protocol)
}
}
if !sniffBuffer.IsEmpty() {
buffer = sniffBuffer
} else {
sniffBuffer.Release()
}
} else if inputPacketConn != nil {
if len(action.PacketSniffers) == 0 && len(action.StreamSniffers) > 0 {
return
} else if slices.Equal(metadata.SnifferNames, action.SnifferNames) && metadata.SniffError != nil && !errors.Is(metadata.SniffError, sniff.ErrNeedMoreData) {
r.logger.DebugContext(ctx, "packet sniff skipped due to previous error: ", metadata.SniffError)
return
}
quicMoreData := func() bool {
return slices.Equal(metadata.SnifferNames, action.SnifferNames) && errors.Is(metadata.SniffError, sniff.ErrNeedMoreData)
}
var packetSniffers []sniff.PacketSniffer
if len(action.PacketSniffers) > 0 {
packetSniffers = action.PacketSniffers
} else {
packetSniffers = []sniff.PacketSniffer{
sniff.DomainNameQuery,
sniff.QUICClientHello,
sniff.STUNMessage,
sniff.UTP,
sniff.UDPTracker,
sniff.DTLSRecord,
sniff.NTP,
}
}
var err error
for _, packetBuffer := range inputPacketBuffers {
if quicMoreData() {
err = sniff.PeekPacket(
ctx,
metadata,
packetBuffer.Buffer.Bytes(),
sniff.QUICClientHello,
)
} else {
err = sniff.PeekPacket(
ctx, metadata,
packetBuffer.Buffer.Bytes(),
packetSniffers...,
)
}
metadata.SnifferNames = action.SnifferNames
metadata.SniffError = err
if errors.Is(err, sniff.ErrNeedMoreData) {
// TODO: replace with generic message when there are more multi-packet protocols
r.logger.DebugContext(ctx, "attempt to sniff fragmented QUIC client hello")
continue
}
goto finally
}
packetBuffers = inputPacketBuffers
for {
var (
sniffBuffer = buf.NewPacket()
destination M.Socksaddr
done = make(chan struct{})
)
go func() {
sniffTimeout := C.ReadPayloadTimeout
if action.Timeout > 0 {
sniffTimeout = action.Timeout
}
inputPacketConn.SetReadDeadline(time.Now().Add(sniffTimeout))
destination, err = inputPacketConn.ReadPacket(sniffBuffer)
inputPacketConn.SetReadDeadline(time.Time{})
close(done)
}()
select {
case <-done:
case <-ctx.Done():
inputPacketConn.Close()
fatalErr = ctx.Err()
return
}
if err != nil {
sniffBuffer.Release()
if !errors.Is(err, context.DeadlineExceeded) {
fatalErr = err
return
}
} else {
if quicMoreData() {
err = sniff.PeekPacket(
ctx,
metadata,
sniffBuffer.Bytes(),
sniff.QUICClientHello,
)
} else {
err = sniff.PeekPacket(
ctx, metadata,
sniffBuffer.Bytes(),
packetSniffers...,
)
}
packetBuffer := N.NewPacketBuffer()
*packetBuffer = N.PacketBuffer{
Buffer: sniffBuffer,
Destination: destination,
}
packetBuffers = append(packetBuffers, packetBuffer)
metadata.SnifferNames = action.SnifferNames
metadata.SniffError = err
if errors.Is(err, sniff.ErrNeedMoreData) {
// TODO: replace with generic message when there are more multi-packet protocols
r.logger.DebugContext(ctx, "attempt to sniff fragmented QUIC client hello")
continue
}
}
goto finally
}
finally:
if err == nil {
//goland:noinspection GoDeprecation
if action.OverrideDestination && M.IsDomainName(metadata.Domain) {
metadata.Destination = M.Socksaddr{
Fqdn: metadata.Domain,
Port: metadata.Destination.Port,
}
}
if metadata.Domain != "" && metadata.Client != "" {
r.logger.DebugContext(ctx, "sniffed packet protocol: ", metadata.Protocol, ", domain: ", metadata.Domain, ", client: ", metadata.Client)
} else if metadata.Domain != "" {
r.logger.DebugContext(ctx, "sniffed packet protocol: ", metadata.Protocol, ", domain: ", metadata.Domain)
} else if metadata.Client != "" {
r.logger.DebugContext(ctx, "sniffed packet protocol: ", metadata.Protocol, ", client: ", metadata.Client)
} else {
r.logger.DebugContext(ctx, "sniffed packet protocol: ", metadata.Protocol)
}
}
}
return
}
func (r *Router) actionResolve(ctx context.Context, metadata *adapter.InboundContext, action *R.RuleActionResolve) error {
if metadata.Destination.IsDomain() {
var transport adapter.DNSTransport
if action.Server != "" {
var (
loaded bool
ambiguous bool
)
transport, loaded, ambiguous = adapter.LookupDNSTransport(r.dnsTransport, action.Server)
if !loaded {
if ambiguous {
return E.New("DNS server is ambiguous: ", action.Server)
}
return E.New("DNS server not found: ", action.Server)
}
}
addresses, err := r.dns.Lookup(adapter.WithContext(ctx, metadata), metadata.Destination.Fqdn, adapter.DNSQueryOptions{
Transport: transport,
Strategy: action.Strategy,
DisableCache: action.DisableCache,
RewriteTTL: action.RewriteTTL,
ClientSubnet: action.ClientSubnet,
})
if err != nil {
return err
}
metadata.DestinationAddresses = addresses
r.logger.DebugContext(ctx, "resolved [", strings.Join(F.MapToString(metadata.DestinationAddresses), " "), "]")
}
return nil
}

62
transit2minio.sh Normal file
View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -e
DEST="myminio/downloads/singbox"
echo "请输入 Gitea Actions 编译好的 zip 文件路径本地路径或URL"
read -r FILE
# mc 检查
if ! command -v mc >/dev/null 2>&1; then
echo "错误mc 未安装"
exit 1
fi
# unzip 检查
if ! command -v unzip >/dev/null 2>&1; then
echo "错误unzip 未安装"
exit 1
fi
# 如果是 URL先下载
if [[ "$FILE" =~ ^https?:// ]]; then
echo "检测到 URL开始下载..."
TMP_ZIP="/tmp/$(basename "$FILE")"
curl -L "$FILE" -o "$TMP_ZIP"
FILE="$TMP_ZIP"
fi
# 校验文件
if [[ ! -f "$FILE" ]]; then
echo "错误:文件不存在 -> $FILE"
exit 1
fi
# 创建临时解压目录
TMP_DIR=$(mktemp -d)
echo "解压到:$TMP_DIR"
unzip -q "$FILE" -d "$TMP_DIR"
# 找到解压后的第一层目录(兼容 zip 内有/无顶层目录)
cd "$TMP_DIR"
# 如果只有一个目录,就进入它
FIRST_DIR=$(ls -1 | head -n 1)
if [[ -d "$FIRST_DIR" ]]; then
TARGET_DIR="$TMP_DIR/$FIRST_DIR"
else
TARGET_DIR="$TMP_DIR"
fi
echo "开始上传目录内容到 MinIO$DEST"
# 复制目录内所有内容(不是整个文件夹)
mc cp --recursive "$TARGET_DIR/" "$DEST/"
echo "上传完成 ✅"
# 清理
rm -rf "$TMP_DIR"

141
update.sh Normal file
View File

@@ -0,0 +1,141 @@
#!/bin/bash
set -euo pipefail
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
BINARY_PATH="${BINARY_PATH:-/usr/local/bin/sing-box}"
RELEASE_BASE_URL="${RELEASE_BASE_URL:-https://s3.cloudyun.top/downloads/singbox}"
SERVICE_CANDIDATES=("${SERVICE_NAME:-}" "singbox" "sing-box")
echo -e "${GREEN}sing-box binary update script${NC}"
if [[ ${EUID} -ne 0 ]]; then
echo -e "${RED}This script must be run as root${NC}"
exit 1
fi
OS="$(uname -s)"
if [[ "$OS" != "Linux" ]]; then
echo -e "${RED}This update script currently supports Linux only. Current OS: ${OS}${NC}"
exit 1
fi
ARCH="$(uname -m)"
case "$ARCH" in
x86_64) BINARY_ARCH="amd64" ;;
aarch64|arm64) BINARY_ARCH="arm64" ;;
armv7l|armv7) BINARY_ARCH="armv7" ;;
*)
echo -e "${RED}Unsupported architecture: ${ARCH}${NC}"
exit 1
;;
esac
DOWNLOAD_TARGET="${DOWNLOAD_TARGET:-linux-${BINARY_ARCH}}"
DOWNLOAD_URL="${DOWNLOAD_URL:-${RELEASE_BASE_URL}/sing-box-${DOWNLOAD_TARGET}}"
TMP_BINARY="$(mktemp)"
BACKUP_BINARY="$(mktemp)"
cleanup() {
rm -f "${TMP_BINARY}" "${BACKUP_BINARY}"
}
trap cleanup EXIT
detect_service_name() {
local candidate
for candidate in "${SERVICE_CANDIDATES[@]}"; do
if [[ -z "${candidate}" ]]; then
continue
fi
if systemctl list-unit-files --type=service --no-legend 2>/dev/null | awk '{print $1}' | grep -Fxq "${candidate}.service"; then
printf '%s\n' "${candidate}"
return 0
fi
done
return 1
}
download_binary() {
echo -e "${YELLOW}Downloading sing-box binary...${NC}"
echo -e "${YELLOW}Target: ${DOWNLOAD_TARGET}${NC}"
echo -e "${YELLOW}URL: ${DOWNLOAD_URL}${NC}"
if command -v curl >/dev/null 2>&1; then
curl -fL "${DOWNLOAD_URL}" -o "${TMP_BINARY}"
elif command -v wget >/dev/null 2>&1; then
wget -O "${TMP_BINARY}" "${DOWNLOAD_URL}"
else
echo -e "${RED}Neither curl nor wget is installed.${NC}"
exit 1
fi
chmod 0755 "${TMP_BINARY}"
}
validate_binary() {
if ! "${TMP_BINARY}" version >/dev/null 2>&1; then
echo -e "${RED}Downloaded file is not a valid sing-box binary.${NC}"
exit 1
fi
}
current_version() {
if [[ -x "${BINARY_PATH}" ]]; then
"${BINARY_PATH}" version 2>/dev/null | head -n 1 || true
fi
}
new_version() {
"${TMP_BINARY}" version 2>/dev/null | head -n 1 || true
}
rollback() {
echo -e "${RED}Update failed, rolling back to previous binary...${NC}"
install -m 0755 "${BACKUP_BINARY}" "${BINARY_PATH}"
if [[ -n "${SERVICE_NAME_DETECTED:-}" ]]; then
systemctl restart "${SERVICE_NAME_DETECTED}" || true
fi
}
download_binary
validate_binary
OLD_VERSION="$(current_version)"
NEW_VERSION="$(new_version)"
if [[ -n "${OLD_VERSION}" ]]; then
echo -e "${YELLOW}Current version: ${OLD_VERSION}${NC}"
else
echo -e "${YELLOW}Current version: not installed or unreadable${NC}"
fi
echo -e "${YELLOW}New version: ${NEW_VERSION}${NC}"
if [[ -x "${BINARY_PATH}" ]]; then
cp -f "${BINARY_PATH}" "${BACKUP_BINARY}"
else
: > "${BACKUP_BINARY}"
fi
install -m 0755 "${TMP_BINARY}" "${BINARY_PATH}"
SERVICE_NAME_DETECTED="$(detect_service_name || true)"
if [[ -n "${SERVICE_NAME_DETECTED}" ]]; then
echo -e "${YELLOW}Restarting service: ${SERVICE_NAME_DETECTED}${NC}"
if ! systemctl restart "${SERVICE_NAME_DETECTED}"; then
if [[ -s "${BACKUP_BINARY}" ]]; then
rollback
fi
exit 1
fi
echo -e "${GREEN}Service restarted successfully.${NC}"
else
echo -e "${YELLOW}No systemd service detected. Binary updated only.${NC}"
fi
echo -e "${GREEN}sing-box has been updated successfully.${NC}"
echo -e "${GREEN}Binary: ${BINARY_PATH}${NC}"
echo -e "${GREEN}Version: $(current_version)${NC}"