update
71
.github/workflows/upstream.yml
vendored
@ -92,7 +92,9 @@ jobs:
|
||||
) &
|
||||
(
|
||||
git_sparse_clone master "https://github.com/xiaoqingfengATGH/feeds-xiaoqingfeng" "xiaoqingfeng" homeredirect luci-app-homeredirect
|
||||
)
|
||||
) &
|
||||
|
||||
git_sparse_clone master "https://github.com/coolsnowwolf/lede" "leanlede" package/lean package/network/services/shellsync package/network/utils/dpdk
|
||||
|
||||
for ipk in $(ls applications); do
|
||||
if [[ $(ls -l applications/$ipk/po | wc -l) -gt 3 ]]; then
|
||||
@ -117,75 +119,8 @@ jobs:
|
||||
- name: Modify
|
||||
continue-on-error: true
|
||||
run: |
|
||||
shopt -s extglob
|
||||
(
|
||||
function latest() {
|
||||
(curl -gs -H 'Content-Type: application/json' \
|
||||
-H "Authorization: Bearer ${{ secrets.REPO_TOKEN }}" \
|
||||
-X POST -d '{ "query": "query {repository(owner: \"'"$1"'\", name: \"'"$2"'\"){refs(refPrefix:\"refs/tags/\",last:1,orderBy:{field:TAG_COMMIT_DATE,direction:ASC}){edges{node{name target{commitUrl}}}}defaultBranchRef{target{...on Commit {oid}}}}}"}' https://api.github.com/graphql)
|
||||
}
|
||||
for pkg in $(ls !(luci-*|natflow|ps3netsrv|rblibtorrent|shadowsocksr-libev|v2ray-core|n2n_v2|mosdns|coremark|mbedtls|rtl8821cu|rtl8188eu|rtl8812au-ac|quickjspp|libcups)/Makefile); do
|
||||
repo="$(grep PKG_SOURCE_URL $pkg | grep github | cut -f 4-5 -d '/' | sed -e 's/.git//' | grep -E '[0-9a-zA-Z_-]+$')" || true
|
||||
if [ "$repo" ]; then
|
||||
owner="$(echo $repo | cut -f 1 -d '/')"
|
||||
name="$(echo $repo | cut -f 2 -d '/')"
|
||||
latest="$(latest $owner $name)"
|
||||
sed -i "s/PKG_SOURCE_VERSION:=.*/PKG_SOURCE_VERSION:=$(echo $latest | jq -r '.data.repository.defaultBranchRef.target.oid')/" $pkg || true
|
||||
over="$(grep -E PKG_VERSION:=.*$ $pkg | cut -f 2 -d '=' | sed -e 's/^\(v\|release-\)//' | grep -E "[0-9]")" || true
|
||||
if [ "$over" ]; then
|
||||
ver="$(echo $latest | jq -r '.data.repository.refs.edges[-1].node.name' | sed -e 's/^\(v\|release-\)//' | grep -E "^[^()]*$" | grep -E "[0-9]")" || true
|
||||
# [ "$ver" ] || ver="$(git ls-remote --tags git://github.com/$repo | cut -d/ -f3- | sort -t. -nk1,3 | awk '/^[^{]*$/{version=$1}END{print version}' | grep -o -E "[0-9].*")" || true
|
||||
echo "$repo-$over-$ver"
|
||||
if [ "$ver" ]; then
|
||||
function version_lt() { test "$(echo "$@" | tr " " "\n" | sort -rV | head -n 1)" != "$1"; }
|
||||
if version_lt "$over" "$ver"; then
|
||||
sed -i -e "s/PKG_SOURCE_VERSION:=.*/PKG_SOURCE_VERSION:=$(echo $latest | jq -r '.data.repository.refs.edges[-1].node.target.commitUrl' | cut -f 7 -d /)/" \
|
||||
-e "s/PKG_VERSION:=.*/PKG_VERSION:=$ver/" $pkg || true
|
||||
fi || true
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
touch /tmp/ok
|
||||
) &
|
||||
|
||||
for pkg in $(find -maxdepth 1 -type d -name "luci-*" ! -name "luci-app-dockerman" ! -name "luci-app-frpc" ! -name "luci-app-frps"); do
|
||||
if [ "$(grep "luci.mk" $pkg/Makefile)" ]; then
|
||||
sed -i '/\(PKG_VERSION\|PKG_RELEASE\):=/d' $pkg/Makefile
|
||||
fi
|
||||
grep -q '"nas",' $pkg/luasrc/controller/*.lua && ! grep -q '_("NAS")' $pkg/luasrc/controller/*.lua &&
|
||||
sed -i 's/index()/index()\n\tentry({"admin", "nas"}, firstchild(), _("NAS") , 45).dependent = false/' $pkg/luasrc/controller/*.lua
|
||||
done
|
||||
(
|
||||
for pkg in $(ls -d */); do
|
||||
if [ "$(grep "PKG_RELEASE" $pkg/Makefile)" ]; then
|
||||
sed -i "s/PKG_RELEASE:=.*/PKG_RELEASE:=$(git rev-list --count master $pkg)/" $pkg/Makefile || true
|
||||
fi
|
||||
done
|
||||
touch /tmp/ok2
|
||||
) &
|
||||
|
||||
bash diy/create_acl_for_luci.sh -a >/dev/null 2>&1 || true
|
||||
bash diy/convert_translation.sh -a >/dev/null 2>&1 || true
|
||||
|
||||
rm -f miniupnpd/patches/106-spam-syslog* go-aliyundrive-webdav/patches/100-fix-read*
|
||||
sed -i "/minisign:minisign/d" luci-app-dnscrypt-proxy2/Makefile
|
||||
sed -i 's/+dockerd/+dockerd +cgroupfs-mount/' luci-app-docker*/Makefile
|
||||
sed -i '$i /etc/init.d/dockerd restart &' luci-app-docker*/root/etc/uci-defaults/*
|
||||
sed -i '/\/etc\/config\/AdGuardHome/a /etc/AdGuardHome.yaml' luci-app-adguardhome/Makefile
|
||||
sed -i 's/+libcap /+libcap +libcap-bin /' luci-app-openclash/Makefile
|
||||
|
||||
sed -i \
|
||||
-e 's?include \.\./\.\./\(lang\|devel\)?include $(TOPDIR)/feeds/packages/\1?' \
|
||||
-e "s/\(PKG_HASH\|PKG_MD5SUM\|PKG_MIRROR_HASH\):=.*/\1:=skip/" \
|
||||
-e 's?\.\./\.\./luci.mk?$(TOPDIR)/feeds/luci/luci.mk?' \
|
||||
-e 's/ca-certificates/ca-bundle/' \
|
||||
*/Makefile
|
||||
|
||||
while [[ ! -f /tmp/ok || ! -f /tmp/ok2 ]]; do
|
||||
echo "wait"
|
||||
sleep 1
|
||||
done
|
||||
|
||||
- name: SSH connection to Actions
|
||||
uses: kiddin9/debugger-action@master
|
||||
|
2
.gitignore
vendored
@ -9,9 +9,7 @@
|
||||
/luci-app-softether
|
||||
/luci-app-cifs
|
||||
/luci-app-cifsd
|
||||
/luci-app-smstool
|
||||
/luci-app-mosquitto
|
||||
/luci-app-appfilter
|
||||
/luci-app-babeld
|
||||
/luci-app-siitwizard
|
||||
/luci-app-ttyd
|
||||
|
@ -1,77 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=aliyundrive-fuse
|
||||
PKG_VERSION:=0.1.6
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_LICENSE:=MIT
|
||||
PKG_MAINTAINER:=messense <messense@icloud.com>
|
||||
|
||||
PKG_LIBC:=musl
|
||||
ifeq ($(ARCH),arm)
|
||||
PKG_LIBC:=musleabi
|
||||
|
||||
ARM_CPU_FEATURES:=$(word 2,$(subst +,$(space),$(call qstrip,$(CONFIG_CPU_TYPE))))
|
||||
ifneq ($(filter $(ARM_CPU_FEATURES),vfp vfpv2),)
|
||||
PKG_LIBC:=musleabihf
|
||||
endif
|
||||
endif
|
||||
|
||||
PKG_ARCH=$(ARCH)
|
||||
ifeq ($(ARCH),i386)
|
||||
PKG_ARCH:=i686
|
||||
endif
|
||||
|
||||
PKG_SOURCE:=aliyundrive-fuse-v$(PKG_VERSION).$(PKG_ARCH)-unknown-linux-$(PKG_LIBC).tar.gz
|
||||
PKG_SOURCE_URL:=https://github.com/messense/aliyundrive-fuse/releases/download/v$(PKG_VERSION)/
|
||||
PKG_HASH:=skip
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/$(PKG_NAME)
|
||||
SECTION:=multimedia
|
||||
CATEGORY:=Multimedia
|
||||
DEPENDS:=+fuse-utils
|
||||
TITLE:=FUSE for AliyunDrive
|
||||
URL:=https://github.com/messense/aliyundrive-fuse
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/description
|
||||
FUSE for AliyunDrive.
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/conffiles
|
||||
/etc/config/aliyundrive-fuse
|
||||
endef
|
||||
|
||||
define Download/sha256sum
|
||||
FILE:=$(PKG_SOURCE).sha256
|
||||
URL_FILE:=$(FILE)
|
||||
URL:=$(PKG_SOURCE_URL)
|
||||
HASH:=skip
|
||||
endef
|
||||
$(eval $(call Download,sha256sum))
|
||||
|
||||
define Build/Prepare
|
||||
mv $(DL_DIR)/$(PKG_SOURCE).sha256 .
|
||||
cp $(DL_DIR)/$(PKG_SOURCE) .
|
||||
shasum -a 256 -c $(PKG_SOURCE).sha256
|
||||
rm $(PKG_SOURCE).sha256 $(PKG_SOURCE)
|
||||
|
||||
tar -C $(PKG_BUILD_DIR)/ -zxf $(DL_DIR)/$(PKG_SOURCE)
|
||||
endef
|
||||
|
||||
define Build/Compile
|
||||
echo "$(PKG_NAME) using precompiled binary."
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/install
|
||||
$(INSTALL_DIR) $(1)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_BUILD_DIR)/aliyundrive-fuse $(1)/usr/bin/aliyundrive-fuse
|
||||
$(INSTALL_DIR) $(1)/etc/init.d
|
||||
$(INSTALL_BIN) ./files/aliyundrive-fuse.init $(1)/etc/init.d/aliyundrive-fuse
|
||||
$(INSTALL_DIR) $(1)/etc/config
|
||||
$(INSTALL_CONF) ./files/aliyundrive-fuse.config $(1)/etc/config/aliyundrive-fuse
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,$(PKG_NAME)))
|
@ -1,7 +0,0 @@
|
||||
config default
|
||||
option enable '0'
|
||||
option debug '0'
|
||||
option refresh_token ''
|
||||
option domain_id ''
|
||||
option mount_point '/mnt/aliyundrive'
|
||||
option read_buffer_size '10485760'
|
@ -1,48 +0,0 @@
|
||||
#!/bin/sh /etc/rc.common
|
||||
|
||||
USE_PROCD=1
|
||||
|
||||
START=99
|
||||
STOP=15
|
||||
|
||||
NAME=aliyundrive-fuse
|
||||
|
||||
uci_get_by_type() {
|
||||
local ret=$(uci get $NAME.@$1[0].$2 2>/dev/null)
|
||||
echo ${ret:=$3}
|
||||
}
|
||||
|
||||
start_service() {
|
||||
local enable=$(uci_get_by_type default enable)
|
||||
case "$enable" in
|
||||
1|on|true|yes|enabled)
|
||||
local refresh_token=$(uci_get_by_type default refresh_token)
|
||||
local domain_id=$(uci_get_by_type default domain_id)
|
||||
local mount_point=$(uci_get_by_type default mount_point)
|
||||
local read_buf_size=$(uci_get_by_type default read_buffer_size 10485760)
|
||||
|
||||
local extra_options=""
|
||||
|
||||
if [[ ! -z "$domain_id" ]]; then
|
||||
extra_options="$extra_options --domain-id $domain_id"
|
||||
fi
|
||||
|
||||
mkdir -p "$mount_point"
|
||||
procd_open_instance
|
||||
procd_set_param command /bin/sh -c "/usr/bin/$NAME $extra_options -S $read_buf_size --workdir /var/run/$NAME $mount_point >>/var/log/$NAME.log 2>&1"
|
||||
procd_set_param pidfile /var/run/$NAME.pid
|
||||
procd_set_param env REFRESH_TOKEN="$refresh_token"
|
||||
case $(uci_get_by_type default debug) in
|
||||
1|on|true|yes|enabled)
|
||||
procd_append_param env RUST_LOG="aliyundrive_fuse=debug" ;;
|
||||
*) ;;
|
||||
esac
|
||||
procd_close_instance ;;
|
||||
*)
|
||||
stop_service ;;
|
||||
esac
|
||||
}
|
||||
|
||||
service_triggers() {
|
||||
procd_add_reload_trigger "aliyundrive-fuse"
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=aliyundrive-webdav
|
||||
PKG_VERSION:=1.2.2
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_LICENSE:=MIT
|
||||
PKG_MAINTAINER:=messense <messense@icloud.com>
|
||||
|
||||
PKG_LIBC:=musl
|
||||
ifeq ($(ARCH),arm)
|
||||
PKG_LIBC:=musleabi
|
||||
|
||||
ARM_CPU_FEATURES:=$(word 2,$(subst +,$(space),$(call qstrip,$(CONFIG_CPU_TYPE))))
|
||||
ifneq ($(filter $(ARM_CPU_FEATURES),vfp vfpv2),)
|
||||
PKG_LIBC:=musleabihf
|
||||
endif
|
||||
endif
|
||||
|
||||
PKG_ARCH=$(ARCH)
|
||||
ifeq ($(ARCH),i386)
|
||||
PKG_ARCH:=i686
|
||||
endif
|
||||
|
||||
PKG_SOURCE:=aliyundrive-webdav-v$(PKG_VERSION).$(PKG_ARCH)-unknown-linux-$(PKG_LIBC).tar.gz
|
||||
PKG_SOURCE_URL:=https://github.com/messense/aliyundrive-webdav/releases/download/v$(PKG_VERSION)/
|
||||
PKG_HASH:=skip
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/aliyundrive-webdav
|
||||
SECTION:=multimedia
|
||||
CATEGORY:=Multimedia
|
||||
TITLE:=WebDAV server for AliyunDrive
|
||||
URL:=https://github.com/messense/aliyundrive-webdav
|
||||
endef
|
||||
|
||||
define Package/aliyundrive-webdav/description
|
||||
WebDAV server for AliyunDrive.
|
||||
endef
|
||||
|
||||
define Package/aliyundrive-webdav/conffiles
|
||||
/etc/config/aliyundrive-webdav
|
||||
endef
|
||||
|
||||
define Download/sha256sum
|
||||
FILE:=$(PKG_SOURCE).sha256
|
||||
URL_FILE:=$(FILE)
|
||||
URL:=$(PKG_SOURCE_URL)
|
||||
HASH:=skip
|
||||
endef
|
||||
$(eval $(call Download,sha256sum))
|
||||
|
||||
define Build/Prepare
|
||||
mv $(DL_DIR)/$(PKG_SOURCE).sha256 .
|
||||
cp $(DL_DIR)/$(PKG_SOURCE) .
|
||||
shasum -a 256 -c $(PKG_SOURCE).sha256
|
||||
rm $(PKG_SOURCE).sha256 $(PKG_SOURCE)
|
||||
|
||||
tar -C $(PKG_BUILD_DIR)/ -zxf $(DL_DIR)/$(PKG_SOURCE)
|
||||
endef
|
||||
|
||||
define Build/Compile
|
||||
echo "aliyundrive-webdav using precompiled binary."
|
||||
endef
|
||||
|
||||
define Package/aliyundrive-webdav/install
|
||||
$(INSTALL_DIR) $(1)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_BUILD_DIR)/aliyundrive-webdav $(1)/usr/bin/aliyundrive-webdav
|
||||
$(INSTALL_DIR) $(1)/etc/init.d
|
||||
$(INSTALL_BIN) ./files/aliyundrive-webdav.init $(1)/etc/init.d/aliyundrive-webdav
|
||||
$(INSTALL_DIR) $(1)/etc/config
|
||||
$(INSTALL_CONF) ./files/aliyundrive-webdav.config $(1)/etc/config/aliyundrive-webdav
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,aliyundrive-webdav))
|
@ -1,17 +0,0 @@
|
||||
config server
|
||||
option enable '0'
|
||||
option debug '0'
|
||||
option refresh_token ''
|
||||
option host '0.0.0.0'
|
||||
option port '8080'
|
||||
option auth_user ''
|
||||
option auth_password ''
|
||||
option read_buffer_size '10485760'
|
||||
option cache_size '1000'
|
||||
option cache_ttl '600'
|
||||
option root '/'
|
||||
option no_trash '0'
|
||||
option domain_id ''
|
||||
option read_only '0'
|
||||
option tls_cert ''
|
||||
option tls_key ''
|
@ -1,75 +0,0 @@
|
||||
#!/bin/sh /etc/rc.common
|
||||
|
||||
USE_PROCD=1
|
||||
|
||||
START=99
|
||||
STOP=15
|
||||
|
||||
NAME=aliyundrive-webdav
|
||||
|
||||
uci_get_by_type() {
|
||||
local ret=$(uci get $NAME.@$1[0].$2 2>/dev/null)
|
||||
echo ${ret:=$3}
|
||||
}
|
||||
|
||||
start_service() {
|
||||
local enable=$(uci_get_by_type server enable)
|
||||
case "$enable" in
|
||||
1|on|true|yes|enabled)
|
||||
local refresh_token=$(uci_get_by_type server refresh_token)
|
||||
local auth_user=$(uci_get_by_type server auth_user)
|
||||
local auth_password=$(uci_get_by_type server auth_password)
|
||||
local read_buf_size=$(uci_get_by_type server read_buffer_size 10485760)
|
||||
local cache_size=$(uci_get_by_type server cache_size 1000)
|
||||
local cache_ttl=$(uci_get_by_type server cache_ttl 600)
|
||||
local host=$(uci_get_by_type server host 127.0.0.1)
|
||||
local port=$(uci_get_by_type server port 8080)
|
||||
local root=$(uci_get_by_type server root /)
|
||||
local domain_id=$(uci_get_by_type server domain_id)
|
||||
local tls_cert=$(uci_get_by_type server tls_cert)
|
||||
local tls_key=$(uci_get_by_type server tls_key)
|
||||
|
||||
local extra_options="-I"
|
||||
|
||||
if [[ ! -z "$domain_id" ]]; then
|
||||
extra_options="$extra_options --domain-id $domain_id"
|
||||
else
|
||||
case "$(uci_get_by_type server no_trash 0)" in
|
||||
1|on|true|yes|enabled)
|
||||
extra_options="$extra_options --no-trash"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
|
||||
case "$(uci_get_by_type server read_only 0)" in
|
||||
1|on|true|yes|enabled)
|
||||
extra_options="$extra_options --read-only"
|
||||
;;
|
||||
*) ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [[ ! -z "$tls_cert" && ! -z "$tls_key" ]]; then
|
||||
extra_options="$extra_options --tls-cert $tls_cert --tls-key $tls_key"
|
||||
fi
|
||||
|
||||
procd_open_instance
|
||||
procd_set_param command /bin/sh -c "/usr/bin/$NAME $extra_options --host $host --port $port --root $root -S $read_buf_size --cache-size $cache_size --cache-ttl $cache_ttl --workdir /var/run/$NAME >>/var/log/$NAME.log 2>&1"
|
||||
procd_set_param pidfile /var/run/$NAME.pid
|
||||
procd_set_param env REFRESH_TOKEN="$refresh_token"
|
||||
[[ ! -z "$auth_user" ]] && procd_append_param env WEBDAV_AUTH_USER="$auth_user"
|
||||
[[ ! -z "$auth_password" ]] && procd_append_param env WEBDAV_AUTH_PASSWORD="$auth_password"
|
||||
case $(uci_get_by_type server debug) in
|
||||
1|on|true|yes|enabled)
|
||||
procd_append_param env RUST_LOG="aliyundrive_webdav=debug" ;;
|
||||
*) ;;
|
||||
esac
|
||||
procd_close_instance ;;
|
||||
*)
|
||||
stop_service ;;
|
||||
esac
|
||||
}
|
||||
|
||||
service_triggers() {
|
||||
procd_add_reload_trigger "aliyundrive-webdav"
|
||||
}
|
@ -1,72 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
#
|
||||
# Copyright (C) 2021 ImmortalWrt.org
|
||||
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=brook
|
||||
PKG_VERSION:=20210701
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||
PKG_SOURCE_URL:=https://codeload.github.com/txthinking/brook/tar.gz/v$(PKG_VERSION)?
|
||||
PKG_HASH:=92c2253349af05ea5aa7a45cddd39ca638c732b2ffdb5066a5f03d2df40cb0b5
|
||||
|
||||
PKG_MAINTAINER:=Tianling Shen <cnsztl@immortalwrt.org>
|
||||
PKG_LICENSE:=GPL-3.0
|
||||
PKG_LICENSE_FILES:=LICENSE
|
||||
|
||||
PKG_BUILD_DEPENDS:=golang/host
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
PKG_USE_MIPS16:=0
|
||||
|
||||
PKG_CONFIG_DEPENDS:= \
|
||||
CONFIG_BROOK_COMPRESS_GOPROXY \
|
||||
CONFIG_BROOK_COMPRESS_UPX
|
||||
|
||||
GO_PKG:=github.com/txthinking/brook
|
||||
GO_PKG_BUILD_PKG:=github.com/txthinking/brook/cli/brook
|
||||
GO_PKG_LDFLAGS:=-s -w
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
include $(TOPDIR)/feeds/packages/lang/golang/golang-package.mk
|
||||
|
||||
define Package/brook
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
SUBMENU:=Web Servers/Proxies
|
||||
TITLE:=A cross-platform proxy software
|
||||
DEPENDS:=$(GO_ARCH_DEPENDS) +ca-bundle
|
||||
URL:=https://github.com/txthinking/brook
|
||||
endef
|
||||
|
||||
define Package/brook/description
|
||||
Brook is a cross-platform strong encryption and not detectable proxy.
|
||||
Zero-Configuration.
|
||||
endef
|
||||
|
||||
define Package/brook/config
|
||||
config BROOK_COMPRESS_GOPROXY
|
||||
bool "Compiling with GOPROXY proxy"
|
||||
default n
|
||||
|
||||
config BROOK_COMPRESS_UPX
|
||||
bool "Compress executable files with UPX"
|
||||
depends on !mips64
|
||||
default n
|
||||
endef
|
||||
|
||||
ifeq ($(CONFIG_BROOK_COMPRESS_GOPROXY),y)
|
||||
export GO111MODULE=on
|
||||
export GOPROXY=https://goproxy.baidu.com
|
||||
endif
|
||||
|
||||
define Build/Compile
|
||||
$(call GoPackage/Build/Compile)
|
||||
ifeq ($(CONFIG_BROOK_COMPRESS_UPX),y)
|
||||
$(STAGING_DIR_HOST)/bin/upx --lzma --best $(GO_PKG_BUILD_BIN_DIR)/brook
|
||||
endif
|
||||
endef
|
||||
|
||||
$(eval $(call GoBinPackage,brook))
|
||||
$(eval $(call BuildPackage,brook))
|
@ -1,40 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
#
|
||||
# Copyright (C) 2021 ImmortalWrt.org
|
||||
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=chinadns-ng
|
||||
PKG_VERSION:=1.0-beta.25
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_SOURCE_PROTO:=git
|
||||
PKG_SOURCE_URL:=https://github.com/zfl9/chinadns-ng.git
|
||||
PKG_SOURCE_DATE:=2021-05-08
|
||||
PKG_SOURCE_VERSION:=14cc6348d67b09cae37d9bce554c89c2c0e0b265
|
||||
PKG_MIRROR_HASH:=3b66fc0888d9488e3b8e39df3016d51fae1b43325d292381e94aa3c7d2318282
|
||||
|
||||
PKG_LICENSE:=AGPL-3.0-only
|
||||
PKG_LICENSE_FILES:=LICENSE
|
||||
PKG_MAINTAINER:=pexcn <i@pexcn.me>
|
||||
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
PKG_INSTALL:=1
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/chinadns-ng
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
SUBMENU:=IP Addresses and Names
|
||||
TITLE:=ChinaDNS next generation, refactoring with epoll and ipset.
|
||||
URL:=https://github.com/zfl9/chinadns-ng
|
||||
DEPENDS:=+ipset
|
||||
endef
|
||||
|
||||
define Package/chinadns-ng/install
|
||||
$(INSTALL_DIR) $(1)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/chinadns-ng $(1)/usr/bin
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,chinadns-ng))
|
@ -1,9 +0,0 @@
|
||||
--- a/client/src/helpers/version.js
|
||||
+++ b/client/src/helpers/version.js
|
||||
@@ -13,5 +13,5 @@ export const areEqualVersions = (left, right) => {
|
||||
|
||||
const leftVersion = left.replace(/^v/, '');
|
||||
const rightVersion = right.replace(/^v/, '');
|
||||
- return leftVersion === rightVersion;
|
||||
+ return leftVersion < rightVersion;
|
||||
};
|
@ -1,72 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=ddns-scripts-aliyun
|
||||
PKG_VERSION:=1.0.2
|
||||
PKG_RELEASE:=1
|
||||
|
||||
PKG_LICENSE:=GPLv2
|
||||
PKG_MAINTAINER:=Sense <sensec@gmail.com>
|
||||
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/$(PKG_NAME)
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
SUBMENU:=IP Addresses and Names
|
||||
TITLE:=Extension for aliyun.com (require curl)
|
||||
PKGARCH:=all
|
||||
DEPENDS:=+ddns-scripts +curl +jshn +openssl-util
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/description
|
||||
Dynamic DNS Client scripts extension for aliyun.com API v1 (require curl)
|
||||
It requires:
|
||||
"option username" to be a valid aliyun access key id
|
||||
"option password" to be the matching aliyun access key secret
|
||||
"option domain" the dns domain to update the record for (eg. A-record: home.<example.com>)
|
||||
endef
|
||||
|
||||
define Build/Configure
|
||||
endef
|
||||
|
||||
define Build/Compile
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/install
|
||||
$(INSTALL_DIR) $(1)/usr/lib/ddns
|
||||
$(INSTALL_BIN) ./update_aliyun_com.sh $(1)/usr/lib/ddns
|
||||
|
||||
$(INSTALL_DIR) $(1)/usr/share/ddns/default
|
||||
$(INSTALL_DATA) ./aliyun.com.json $(1)/usr/share/ddns/default
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/prerm
|
||||
#!/bin/sh
|
||||
if [ -z "$${IPKG_INSTROOT}" ]; then
|
||||
/etc/init.d/ddns stop
|
||||
fi
|
||||
if [ -w $${IPKG_INSTROOT}/etc/ddns/services ]; then
|
||||
/bin/sed -i '/aliyun\.com/d' $${IPKG_INSTROOT}/etc/ddns/services >/dev/null 2>&1
|
||||
/bin/sed -i '/aliyun\.com/d' $${IPKG_INSTROOT}/etc/ddns/services_ipv6 >/dev/null 2>&1
|
||||
fi
|
||||
exit 0
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/postinst
|
||||
#!/bin/sh
|
||||
if [ -w $${IPKG_INSTROOT}/etc/ddns/services ]; then
|
||||
/bin/sed -i '/aliyun\.com/d' $${IPKG_INSTROOT}/etc/ddns/services >/dev/null 2>&1
|
||||
/bin/sed -i '/aliyun\.com/d' $${IPKG_INSTROOT}/etc/ddns/services_ipv6 >/dev/null 2>&1
|
||||
printf "%s\\t\\t%s\\n" '"aliyun.com"' '"update_aliyun_com.sh"' >> $${IPKG_INSTROOT}/etc/ddns/services
|
||||
printf "%s\\t\\t%s\\n" '"aliyun.com"' '"update_aliyun_com.sh"' >> $${IPKG_INSTROOT}/etc/ddns/services_ipv6
|
||||
fi
|
||||
if [ -z "$${IPKG_INSTROOT}" ]; then
|
||||
/etc/init.d/ddns enabled
|
||||
/etc/init.d/ddns start
|
||||
fi
|
||||
exit 0
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,$(PKG_NAME)))
|
@ -1,9 +0,0 @@
|
||||
{
|
||||
"name": "aliyun.com",
|
||||
"ipv4": {
|
||||
"url": "update_aliyun_com.sh"
|
||||
},
|
||||
"ipv6": {
|
||||
"url": "update_aliyun_com.sh"
|
||||
}
|
||||
}
|
@ -1,304 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# 用于阿里云域名解析的DNS更新脚本
|
||||
# 2017-2021 Sense <sensec at gmail dot com>
|
||||
# 阿里云域名解析API文档 https://help.aliyun.com/document_detail/29739.html
|
||||
#
|
||||
# 本脚本由 dynamic_dns_functions.sh 内的函数 send_update() 调用
|
||||
#
|
||||
# 需要在 /etc/config/ddns 中设置的选项
|
||||
# option username - 阿里云API访问账号 Access Key ID。可通过 aliyun.com 帐号管理的 accesskeys 获取, 或者访问 https://ak-console.aliyun.com
|
||||
# option password - 阿里云API访问密钥 Access Key Secret
|
||||
# option domain - 完整的域名。建议主机与域名之间使用 @符号 分隔,否则将以第一个 .符号 之前的内容作为主机名
|
||||
#
|
||||
|
||||
# 检查传入参数
|
||||
[ -z "$username" ] && write_log 14 "配置错误!保存阿里云API访问账号的'用户名'不能为空"
|
||||
[ -z "$password" ] && write_log 14 "配置错误!保存阿里云API访问密钥的'密码'不能为空"
|
||||
[ $USE_CURL -eq 0 ] && USE_CURL=1 # 强制使用 cURL
|
||||
[ $use_https -eq 1 -a -z "$cacert" ] && cacert="IGNORE" # 使用 HTTPS 时 CA 证书为空则不检查服务器证书
|
||||
|
||||
# 检查外部调用工具
|
||||
local CURL=$(command -v curl)
|
||||
local CURL_SSL=$($CURL -V 2>/dev/null | grep -F "https")
|
||||
local CURL_PROXY=$(find /lib /usr/lib -name libcurl.so* -exec strings {} 2>/dev/null \; | grep -im1 "all_proxy")
|
||||
[ -z "$CURL" ] && write_log 13 "与阿里云服务器通信需要 cURL 支持, 请先安装"
|
||||
command -v sed >/dev/null 2>&1 || write_log 13 "使用阿里云API需要 sed 支持, 请先安装"
|
||||
command -v openssl >/dev/null 2>&1 || write_log 13 "使用阿里云API需要 openssl-util 支持, 请先安装"
|
||||
|
||||
# 包含用于解析 JSON 格式返回值的函数
|
||||
. /usr/share/libubox/jshn.sh
|
||||
|
||||
# 变量声明
|
||||
local __HOST __DOMAIN __FQDN __TYPE __PROG __URLBASE __DATA __TTL __SEPARATOR __RECID
|
||||
__URLBASE="http://alidns.aliyuncs.com"
|
||||
[ $use_https -eq 1 ] && __URLBASE=$(echo $__URLBASE | sed -e 's#^http:#https:#')
|
||||
__TTL=
|
||||
__SEPARATOR="&"
|
||||
|
||||
# 从 $domain 分离主机和域名
|
||||
[ "${domain:0:2}" == "@." ] && domain="${domain/./}" # 主域名处理
|
||||
[ "$domain" == "${domain/@/}" ] && domain="${domain/./@}" # 未找到分隔符,兼容常用域名格式
|
||||
__HOST="${domain%%@*}"
|
||||
__DOMAIN="${domain#*@}"
|
||||
if [ -z "$__HOST" -o "$__HOST" == "$__DOMAIN" ]; then
|
||||
__HOST="@"
|
||||
__FQDN=${__DOMAIN}
|
||||
else
|
||||
__FQDN=${__HOST}.${__DOMAIN}
|
||||
fi
|
||||
|
||||
# 设置记录类型
|
||||
[ $use_ipv6 -eq 0 ] && __TYPE="A" || __TYPE="AAAA"
|
||||
|
||||
# 构造基本通信命令, 从 dynamic_dns_functions.sh 函数 do_transfer() 复制
|
||||
build_command() {
|
||||
__PROG="$CURL -qLsS -o $DATFILE --stderr $ERRFILE"
|
||||
# check HTTPS support
|
||||
[ -z "$CURL_SSL" -a $use_https -eq 1 ] && \
|
||||
write_log 13 "cURL: libcurl 编译时缺少 https 支持"
|
||||
# force network/interface-device to use for communication
|
||||
if [ -n "$bind_network" ]; then
|
||||
local __DEVICE
|
||||
network_get_device __DEVICE $bind_network || \
|
||||
write_log 13 "无法使用 'network_get_device $bind_network' 检测到本地设备 - 错误代码: '$?'"
|
||||
write_log 7 "强制通过设备 '$__DEVICE' 进行通信"
|
||||
__PROG="$__PROG --interface $__DEVICE"
|
||||
fi
|
||||
# force ip version to use
|
||||
if [ $force_ipversion -eq 1 ]; then
|
||||
[ $use_ipv6 -eq 0 ] && __PROG="$__PROG -4" || __PROG="$__PROG -6" # force IPv4/IPv6
|
||||
fi
|
||||
# set certificate parameters
|
||||
if [ $use_https -eq 1 ]; then
|
||||
if [ "$cacert" = "IGNORE" ]; then # idea from Ticket #15327 to ignore server cert
|
||||
__PROG="$__PROG --insecure" # but not empty better to use "IGNORE"
|
||||
elif [ -f "$cacert" ]; then
|
||||
__PROG="$__PROG --cacert $cacert"
|
||||
elif [ -d "$cacert" ]; then
|
||||
__PROG="$__PROG --capath $cacert"
|
||||
elif [ -n "$cacert" ]; then # it's not a file and not a directory but given
|
||||
write_log 14 "在 '$cacert' 中未发现用于 HTTPS 通信的有效证书"
|
||||
fi
|
||||
fi
|
||||
# disable proxy if no set (there might be .wgetrc or .curlrc or wrong environment set)
|
||||
# or check if libcurl compiled with proxy support
|
||||
if [ -z "$proxy" ]; then
|
||||
__PROG="$__PROG --noproxy '*'"
|
||||
elif [ -z "$CURL_PROXY" ]; then
|
||||
# if libcurl has no proxy support and proxy should be used then force ERROR
|
||||
write_log 13 "cURL: libcurl 编译时缺少代理支持"
|
||||
fi
|
||||
}
|
||||
|
||||
# 服务器通信函数, 从 dynamic_dns_functions.sh 函数 do_transfer() 复制
|
||||
server_transfer() {
|
||||
local __URL="$@"
|
||||
local __ERR=0
|
||||
local __CNT=0 # error counter
|
||||
local __RUNPROG
|
||||
|
||||
[ $# -eq 0 ] && write_log 12 "'server_transfer()' 出错 - 参数数量错误"
|
||||
|
||||
while : ; do
|
||||
build_Request $__URL
|
||||
__RUNPROG="$__PROG -X POST '$__URLBASE' -d '$__DATA'" # build final command
|
||||
|
||||
write_log 7 "#> $__RUNPROG"
|
||||
eval $__RUNPROG # DO transfer
|
||||
__ERR=$? # save error code
|
||||
[ $__ERR -eq 0 ] && return 0 # no error leave
|
||||
[ -n "$LUCI_HELPER" ] && return 1 # no retry if called by LuCI helper script
|
||||
|
||||
write_log 3 "cURL Error: '$__ERR'"
|
||||
write_log 7 "$(cat $ERRFILE)" # report error
|
||||
|
||||
[ $VERBOSE -gt 1 ] && {
|
||||
# VERBOSE > 1 then NO retry
|
||||
write_log 4 "Transfer failed - Verbose Mode: $VERBOSE - NO retry on error"
|
||||
return 1
|
||||
}
|
||||
|
||||
__CNT=$(( $__CNT + 1 )) # increment error counter
|
||||
# if error count > retry_count leave here
|
||||
[ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && \
|
||||
write_log 14 "Transfer failed after $retry_count retries"
|
||||
|
||||
write_log 4 "Transfer failed - retry $__CNT/$retry_count in $RETRY_SECONDS seconds"
|
||||
sleep $RETRY_SECONDS &
|
||||
PID_SLEEP=$!
|
||||
wait $PID_SLEEP # enable trap-handler
|
||||
PID_SLEEP=0
|
||||
done
|
||||
# we should never come here there must be a programming error
|
||||
write_log 12 "'server_transfer()' 出错 - 程序代码错误"
|
||||
}
|
||||
|
||||
# 百分号编码
|
||||
percentEncode() {
|
||||
if [ -z "${1//[A-Za-z0-9_.~-]/}" ]; then
|
||||
echo -n "$1"
|
||||
else
|
||||
local string=$1 i=0 ret chr
|
||||
while [ $i -lt ${#string} ]; do
|
||||
chr=${string:$i:1}
|
||||
[ -z "${chr#[^A-Za-z0-9_.~-]}" ] && chr=$(printf '%%%02X' "'$chr")
|
||||
ret="$ret$chr"
|
||||
let i++
|
||||
done
|
||||
echo -n "$ret"
|
||||
fi
|
||||
}
|
||||
|
||||
# 构造阿里云域名解析请求参数
|
||||
build_Request() {
|
||||
local args="$@" HTTP_METHOD="POST" string signature
|
||||
|
||||
# 添加请求参数
|
||||
__DATA=
|
||||
for string in $args; do
|
||||
case "${string%%=*}" in
|
||||
Format|Version|AccessKeyId|SignatureMethod|Timestamp|SignatureVersion|SignatureNonce|Signature) ;; # 过滤公共参数
|
||||
*) __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}");;
|
||||
esac
|
||||
done
|
||||
__DATA="${__DATA:1}"
|
||||
|
||||
# 附加公共参数
|
||||
string="Format=JSON"; __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
string="Version=2015-01-09"; __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
string="AccessKeyId=$username"; __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
string="SignatureMethod=HMAC-SHA1"; __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
string="Timestamp="$(date -u '+%Y-%m-%dT%H:%M:%SZ'); __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
string="SignatureVersion=1.0"; __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
string="SignatureNonce="$(cat '/proc/sys/kernel/random/uuid'); __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
|
||||
# 对请求参数进行排序,用于生成签名
|
||||
string=$(echo -n "$__DATA" | sed 's/\'"${__SEPARATOR}"'/\n/g' | sort | sed ':label; N; s/\n/\'"${__SEPARATOR}"'/g; b label')
|
||||
# 构造用于计算签名的字符串
|
||||
string="${HTTP_METHOD}${__SEPARATOR}"$(percentEncode "/")"${__SEPARATOR}"$(percentEncode "$string")
|
||||
# 字符串计算签名HMAC值
|
||||
signature=$(echo -n "$string" | openssl dgst -sha1 -hmac "${password}&" -binary)
|
||||
# HMAC值编码成字符串,得到签名值
|
||||
signature=$(echo -n "$signature" | openssl base64)
|
||||
|
||||
# 附加签名参数
|
||||
string="Signature=$signature"; __DATA="$__DATA${__SEPARATOR}"$(percentEncode "${string%%=*}")"="$(percentEncode "${string#*=}")
|
||||
}
|
||||
|
||||
# 获取解析记录列表
|
||||
describe_domain() {
|
||||
local __URL count value ipaddr i=1 ret=0
|
||||
__URL="Action=DescribeDomainRecords DomainName=${__DOMAIN} RRKeyWord=${__HOST} Type=${__TYPE}"
|
||||
server_transfer "$__URL" || return 1
|
||||
json_cleanup; json_load "$(cat "$DATFILE" 2>/dev/null)" >/dev/null 2>&1
|
||||
json_get_var count "TotalCount"
|
||||
if [ -z "$count" ]; then
|
||||
json_get_var value "Message"
|
||||
write_log 4 "Aliyun.com 响应失败, 错误原因: $value"
|
||||
return 127
|
||||
else
|
||||
json_select "DomainRecords" >/dev/null 2>&1
|
||||
json_select "Record" >/dev/null 2>&1
|
||||
while [ $i -le $count ]; do
|
||||
json_select $i >/dev/null 2>&1
|
||||
json_get_var value "RR"
|
||||
if [ "$value" == "$__HOST" ]; then
|
||||
json_get_var __RECID "RecordId"
|
||||
# write_log 7 "获得 ${__FQDN} ${__TYPE}记录ID: ${__RECID}"
|
||||
json_get_var value "Locked"
|
||||
[ $value -ne 0 ] && write_log 13 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录已被锁定, 无法更新"
|
||||
json_get_var value "Status"
|
||||
[ "$value" != "ENABLE" ] && ret=$(( $ret | 4 )) && write_log 7 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录已被禁用"
|
||||
json_get_var value "Value"
|
||||
# 展开 IPv6 地址用于比较
|
||||
if [ $use_ipv6 -eq 0 ]; then
|
||||
ipaddr="$__IP"
|
||||
else
|
||||
expand_ipv6 $__IP ipaddr
|
||||
expand_ipv6 $value value
|
||||
fi
|
||||
if [ "$value" == "$ipaddr" ]; then
|
||||
write_log 7 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录无需更新"
|
||||
else
|
||||
write_log 7 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录需要更新"
|
||||
ret=$(( $ret | 2 ))
|
||||
fi
|
||||
break
|
||||
fi
|
||||
json_select ..
|
||||
let i++
|
||||
done
|
||||
if [ -z "$__RECID" ]; then
|
||||
write_log 7 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录不存在"
|
||||
ret=8
|
||||
fi
|
||||
fi
|
||||
return $ret
|
||||
}
|
||||
|
||||
# 添加解析记录
|
||||
add_domain() {
|
||||
local __URL value
|
||||
__URL="Action=AddDomainRecord DomainName=${__DOMAIN} RR=${__HOST} Type=${__TYPE} Value=${__IP}"
|
||||
[ -n "${__TTL}" ] && __URL="${__URL} TTL=${__TTL}"
|
||||
server_transfer "$__URL" || return 1
|
||||
json_cleanup; json_load "$(cat "$DATFILE" 2>/dev/null)" >/dev/null 2>&1
|
||||
json_get_var value "RecordId"
|
||||
if [ -z "$value" ]; then
|
||||
json_get_var value "Message"
|
||||
write_log 4 "Aliyun.com 响应失败, 错误原因: $value"
|
||||
return 127
|
||||
else
|
||||
write_log 7 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录已添加"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# 更新解析记录
|
||||
update_domain() {
|
||||
local __URL value
|
||||
__URL="Action=UpdateDomainRecord RR=${__HOST} RecordId=${__RECID} Type=${__TYPE} Value=${__IP}"
|
||||
[ -n "${__TTL}" ] && __URL="${__URL} TTL=${__TTL}"
|
||||
server_transfer "$__URL" || return 1
|
||||
json_cleanup; json_load "$(cat "$DATFILE" 2>/dev/null)" >/dev/null 2>&1
|
||||
json_get_var value "RecordId"
|
||||
if [ -z "$value" ]; then
|
||||
json_get_var value "Message"
|
||||
write_log 4 "Aliyun.com 响应失败, 错误原因: $value"
|
||||
return 127
|
||||
else
|
||||
write_log 7 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录已更新为: ${__IP}"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# 启用解析记录
|
||||
enable_domain() {
|
||||
local __URL value
|
||||
__URL="Action=SetDomainRecordStatus RecordId=${__RECID} Status=Enable"
|
||||
server_transfer "$__URL" || return 1
|
||||
json_cleanup; json_load "$(cat "$DATFILE" 2>/dev/null)" >/dev/null 2>&1
|
||||
json_get_var value "RecordId"
|
||||
if [ -z "$value" ]; then
|
||||
json_get_var value "Message"
|
||||
write_log 4 "Aliyun.com 响应失败, 错误原因: $value"
|
||||
return 127
|
||||
else
|
||||
write_log 7 "Aliyun.com 上的 ${__FQDN} ${__TYPE}记录已启用"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
build_command
|
||||
describe_domain
|
||||
ret=$?
|
||||
if [ $(( $ret & 1 )) -ne 0 ]; then
|
||||
return $ret
|
||||
elif [ $ret -eq 8 ]; then
|
||||
sleep 3 && { add_domain; [ $? -ne 0 ] && return $?; }
|
||||
else
|
||||
[ $(( $ret & 4 )) -ne 0 ] && sleep 3 && { enable_domain; [ $? -ne 0 ] && return $?; }
|
||||
[ $(( $ret & 2 )) -ne 0 ] && sleep 3 && { update_domain; [ $? -ne 0 ] && return $?; }
|
||||
fi
|
||||
return 0
|
@ -1,56 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=ddns-scripts-dnspod
|
||||
PKG_VERSION:=1.0.3
|
||||
PKG_RELEASE:=8
|
||||
|
||||
PKG_LICENSE:=GPLv2
|
||||
PKG_MAINTAINER:=Small_5
|
||||
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/$(PKG_NAME)
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
SUBMENU:=IP Addresses and Names
|
||||
TITLE:=DDNS extension for Dnspod.com/Dnspod.cn
|
||||
PKGARCH:=all
|
||||
DEPENDS:=+ddns-scripts +curl +jsonfilter
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/description
|
||||
Dynamic DNS Client scripts extension for Dnspod.com/Dnspod.cn
|
||||
endef
|
||||
|
||||
define Build/Configure
|
||||
endef
|
||||
|
||||
define Build/Compile
|
||||
$(CP) ./*.sh $(PKG_BUILD_DIR)
|
||||
# remove comments, white spaces and empty lines
|
||||
for FILE in `find $(PKG_BUILD_DIR) -type f`; do \
|
||||
$(SED) 's/^[[:space:]]*//' \
|
||||
-e '/^#[[:space:]]\|^#$$$$/d' \
|
||||
-e 's/[[:space:]]#[[:space:]].*$$$$//' \
|
||||
-e 's/[[:space:]]*$$$$//' \
|
||||
-e '/^\/\/[[:space:]]/d' \
|
||||
-e '/^[[:space:]]*$$$$/d' $$$$FILE; \
|
||||
done
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/install
|
||||
$(INSTALL_DIR) $(1)/usr/lib/ddns
|
||||
$(INSTALL_BIN) $(PKG_BUILD_DIR)/*.sh $(1)/usr/lib/ddns
|
||||
$(INSTALL_DIR) $(1)/usr/share/ddns/default
|
||||
$(INSTALL_DATA) ./*.json $(1)/usr/share/ddns/default
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/prerm
|
||||
#!/bin/sh
|
||||
[ -z "$${IPKG_INSTROOT}" ] && /etc/init.d/ddns stop >/dev/null 2>&1
|
||||
exit 0 # suppress errors
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,$(PKG_NAME)))
|
@ -1,2 +0,0 @@
|
||||
# ddns-scripts-dnspod
|
||||
支持同一域名同时使用A/AAAA记录
|
@ -1,9 +0,0 @@
|
||||
{
|
||||
"name": "dnspod.cn",
|
||||
"ipv4": {
|
||||
"url": "update_dnspod_cn.sh"
|
||||
},
|
||||
"ipv6": {
|
||||
"url": "update_dnspod_cn.sh"
|
||||
}
|
||||
}
|
@ -1,9 +0,0 @@
|
||||
{
|
||||
"name": "dnspod.com",
|
||||
"ipv4": {
|
||||
"url": "update_dnspod_com.sh"
|
||||
},
|
||||
"ipv6": {
|
||||
"url": "update_dnspod_com.sh"
|
||||
}
|
||||
}
|
@ -1,137 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# 检查传入参数
|
||||
[ -z "$username" ] && write_log 14 "Configuration error! [User name] cannot be empty"
|
||||
[ -z "$password" ] && write_log 14 "Configuration error! [Password] cannot be empty"
|
||||
|
||||
# 检查外部调用工具
|
||||
[ -n "$CURL_SSL" ] || write_log 13 "Dnspod communication require cURL with SSL support. Please install"
|
||||
[ -n "$CURL_PROXY" ] || write_log 13 "cURL: libcurl compiled without Proxy support"
|
||||
|
||||
# 变量声明
|
||||
local __URLBASE __HOST __DOMAIN __TYPE __CMDBASE __POST __POST1 __RECIP __RECID __TTL
|
||||
__URLBASE="https://dnsapi.cn"
|
||||
|
||||
# 从 $domain 分离主机和域名
|
||||
[ "${domain:0:2}" = "@." ] && domain="${domain/./}" # 主域名处理
|
||||
[ "$domain" = "${domain/@/}" ] && domain="${domain/./@}" # 未找到分隔符,兼容常用域名格式
|
||||
__HOST="${domain%%@*}"
|
||||
__DOMAIN="${domain#*@}"
|
||||
[ -z "$__HOST" -o "$__HOST" = "$__DOMAIN" ] && __HOST=@
|
||||
|
||||
# 设置记录类型
|
||||
[ $use_ipv6 = 0 ] && __TYPE=A || __TYPE=AAAA
|
||||
|
||||
# 构造基本通信命令
|
||||
build_command(){
|
||||
__CMDBASE="$CURL -Ss"
|
||||
# 绑定用于通信的主机/IP
|
||||
if [ -n "$bind_network" ];then
|
||||
local __DEVICE
|
||||
network_get_physdev __DEVICE $bind_network || write_log 13 "Can not detect local device using 'network_get_physdev $bind_network' - Error: '$?'"
|
||||
write_log 7 "Force communication via device '$__DEVICE'"
|
||||
__CMDBASE="$__CMDBASE --interface $__DEVICE"
|
||||
fi
|
||||
# 强制设定IP版本
|
||||
if [ $force_ipversion = 1 ];then
|
||||
[ $use_ipv6 = 0 ] && __CMDBASE="$__CMDBASE -4" || __CMDBASE="$__CMDBASE -6"
|
||||
fi
|
||||
# 设置CA证书参数
|
||||
if [ $use_https = 1 ];then
|
||||
if [ "$cacert" = IGNORE ];then
|
||||
__CMDBASE="$__CMDBASE --insecure"
|
||||
elif [ -f "$cacert" ];then
|
||||
__CMDBASE="$__CMDBASE --cacert $cacert"
|
||||
elif [ -d "$cacert" ];then
|
||||
__CMDBASE="$__CMDBASE --capath $cacert"
|
||||
elif [ -n "$cacert" ];then
|
||||
write_log 14 "No valid certificate(s) found at '$cacert' for HTTPS communication"
|
||||
fi
|
||||
fi
|
||||
# 如果没有设置,禁用代理 (这可能是 .wgetrc 或环境设置错误)
|
||||
[ -z "$proxy" ] && __CMDBASE="$__CMDBASE --noproxy '*'"
|
||||
__CMDBASE="$__CMDBASE -d"
|
||||
}
|
||||
|
||||
# 用于Dnspod API的通信函数
|
||||
dnspod_transfer(){
|
||||
__CNT=0
|
||||
case "$1" in
|
||||
0)__A="$__CMDBASE '$__POST' $__URLBASE/Record.List";;
|
||||
1)__A="$__CMDBASE '$__POST1' $__URLBASE/Record.Create";;
|
||||
2)__A="$__CMDBASE '$__POST1&record_id=$__RECID&ttl=$__TTL' $__URLBASE/Record.Modify";;
|
||||
esac
|
||||
|
||||
write_log 7 "#> $__A"
|
||||
while ! __TMP=`eval $__A 2>&1`;do
|
||||
write_log 3 "[$__TMP]"
|
||||
if [ $VERBOSE -gt 1 ];then
|
||||
write_log 4 "Transfer failed - detailed mode: $VERBOSE - Do not try again after an error"
|
||||
return 1
|
||||
fi
|
||||
__CNT=$(( $__CNT + 1 ))
|
||||
[ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && write_log 14 "Transfer failed after $retry_count retries"
|
||||
write_log 4 "Transfer failed - $__CNT Try again in $RETRY_SECONDS seconds"
|
||||
sleep $RETRY_SECONDS &
|
||||
PID_SLEEP=$!
|
||||
wait $PID_SLEEP
|
||||
PID_SLEEP=0
|
||||
done
|
||||
__ERR=`jsonfilter -s "$__TMP" -e "@.status.code"`
|
||||
[ $__ERR = 1 ] && return 0
|
||||
[ $__ERR = 10 ] && [ $1 = 0 ] && return 0
|
||||
__TMP=`jsonfilter -s "$__TMP" -e "@.status.message"`
|
||||
local A="$(date +%H%M%S) ERROR : [$__TMP] - 终止进程"
|
||||
logger -p user.err -t ddns-scripts[$$] $SECTION_ID: ${A:15}
|
||||
printf "%s\n" " $A" >> $LOGFILE
|
||||
exit 1
|
||||
}
|
||||
|
||||
# 添加解析记录
|
||||
add_domain(){
|
||||
dnspod_transfer 1
|
||||
printf "%s\n" " $(date +%H%M%S) : 添加解析记录成功: [$([ "$__HOST" = @ ] || echo $__HOST.)$__DOMAIN],[IP:$__IP]" >> $LOGFILE
|
||||
return 0
|
||||
}
|
||||
|
||||
# 修改解析记录
|
||||
update_domain(){
|
||||
dnspod_transfer 2
|
||||
printf "%s\n" " $(date +%H%M%S) : 修改解析记录成功: [$([ "$__HOST" = @ ] || echo $__HOST.)$__DOMAIN],[IP:$__IP],[TTL:$__TTL]" >> $LOGFILE
|
||||
return 0
|
||||
}
|
||||
|
||||
# 获取域名解析记录
|
||||
describe_domain(){
|
||||
ret=0
|
||||
__POST="login_token=$username,$password&format=json&domain=$__DOMAIN&sub_domain=$__HOST"
|
||||
__POST1="$__POST&value=$__IP&record_type=$__TYPE&record_line_id=0"
|
||||
dnspod_transfer 0
|
||||
__TMP=`jsonfilter -s "$__TMP" -e "@.records[@.type='$__TYPE' && @.line_id='0']"`
|
||||
if [ -z "$__TMP" ];then
|
||||
printf "%s\n" " $(date +%H%M%S) : 解析记录不存在: [$([ "$__HOST" = @ ] || echo $__HOST.)$__DOMAIN]" >> $LOGFILE
|
||||
ret=1
|
||||
else
|
||||
__RECIP=`jsonfilter -s "$__TMP" -e "@.value"`
|
||||
if [ "$__RECIP" != "$__IP" ];then
|
||||
__RECID=`jsonfilter -s "$__TMP" -e "@.id"`
|
||||
__TTL=`jsonfilter -s "$__TMP" -e "@.ttl"`
|
||||
printf "%s\n" " $(date +%H%M%S) : 解析记录需要更新: [解析记录IP:$__RECIP] [本地IP:$__IP]" >> $LOGFILE
|
||||
ret=2
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
build_command
|
||||
describe_domain
|
||||
if [ $ret = 1 ];then
|
||||
sleep 3
|
||||
add_domain
|
||||
elif [ $ret = 2 ];then
|
||||
sleep 3
|
||||
update_domain
|
||||
else
|
||||
printf "%s\n" " $(date +%H%M%S) : 解析记录不需要更新: [解析记录IP:$__RECIP] [本地IP:$__IP]" >> $LOGFILE
|
||||
fi
|
||||
|
||||
return 0
|
@ -1,144 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# 检查传入参数
|
||||
[ -z "$username" ] && write_log 14 "Configuration error! [User name] cannot be empty"
|
||||
[ -z "$password" ] && write_log 14 "Configuration error! [Password] cannot be empty"
|
||||
|
||||
# 检查外部调用工具
|
||||
[ -n "$CURL_SSL" ] || write_log 13 "Dnspod communication require cURL with SSL support. Please install"
|
||||
[ -n "$CURL_PROXY" ] || write_log 13 "cURL: libcurl compiled without Proxy support"
|
||||
|
||||
# 变量声明
|
||||
local __URLBASE __HOST __DOMAIN __TYPE __CMDBASE __TOKEN __POST __POST1 __RECIP __RECID __TTL
|
||||
__URLBASE="https://api.dnspod.com"
|
||||
|
||||
# 从 $domain 分离主机和域名
|
||||
[ "${domain:0:2}" = "@." ] && domain="${domain/./}" # 主域名处理
|
||||
[ "$domain" = "${domain/@/}" ] && domain="${domain/./@}" # 未找到分隔符,兼容常用域名格式
|
||||
__HOST="${domain%%@*}"
|
||||
__DOMAIN="${domain#*@}"
|
||||
[ -z "$__HOST" -o "$__HOST" = "$__DOMAIN" ] && __HOST=@
|
||||
|
||||
# 设置记录类型
|
||||
[ $use_ipv6 = 0 ] && __TYPE=A || __TYPE=AAAA
|
||||
|
||||
# 构造基本通信命令
|
||||
build_command(){
|
||||
__CMDBASE="$CURL -Ss"
|
||||
# 绑定用于通信的主机/IP
|
||||
if [ -n "$bind_network" ];then
|
||||
local __DEVICE
|
||||
network_get_physdev __DEVICE $bind_network || write_log 13 "Can not detect local device using 'network_get_physdev $bind_network' - Error: '$?'"
|
||||
write_log 7 "Force communication via device '$__DEVICE'"
|
||||
__CMDBASE="$__CMDBASE --interface $__DEVICE"
|
||||
fi
|
||||
# 强制设定IP版本
|
||||
if [ $force_ipversion = 1 ];then
|
||||
[ $use_ipv6 = 0 ] && __CMDBASE="$__CMDBASE -4" || __CMDBASE="$__CMDBASE -6"
|
||||
fi
|
||||
# 设置CA证书参数
|
||||
if [ $use_https = 1 ];then
|
||||
if [ "$cacert" = IGNORE ];then
|
||||
__CMDBASE="$__CMDBASE --insecure"
|
||||
elif [ -f "$cacert" ];then
|
||||
__CMDBASE="$__CMDBASE --cacert $cacert"
|
||||
elif [ -d "$cacert" ];then
|
||||
__CMDBASE="$__CMDBASE --capath $cacert"
|
||||
elif [ -n "$cacert" ];then
|
||||
write_log 14 "No valid certificate(s) found at '$cacert' for HTTPS communication"
|
||||
fi
|
||||
fi
|
||||
# 如果没有设置,禁用代理 (这可能是 .wgetrc 或环境设置错误)
|
||||
[ -z "$proxy" ] && __CMDBASE="$__CMDBASE --noproxy '*'"
|
||||
__CMDBASE="$__CMDBASE -d"
|
||||
}
|
||||
|
||||
# 用于Dnspod API的通信函数
|
||||
dnspod_transfer(){
|
||||
__CNT=0;__B=
|
||||
case "$1" in
|
||||
0)__A="$__CMDBASE 'login_email=$username&login_password=$password&format=json' $__URLBASE/Auth";__B=$__A;;
|
||||
1)__A="$__CMDBASE '$__POST' $__URLBASE/Record.List";;
|
||||
2)__A="$__CMDBASE '$__POST1' $__URLBASE/Record.Create";;
|
||||
3)__A="$__CMDBASE '$__POST1&record_id=$__RECID&ttl=$__TTL' $__URLBASE/Record.Modify";;
|
||||
esac
|
||||
|
||||
[ -z "$__B" ] && __B=$(echo -e "$__A" | sed -e "s/${__TOKEN#*,}/***PW***/g")
|
||||
write_log 7 "#> $__B"
|
||||
while ! __TMP=`eval $__A 2>&1`;do
|
||||
write_log 3 "[$__TMP]"
|
||||
if [ $VERBOSE -gt 1 ];then
|
||||
write_log 4 "Transfer failed - detailed mode: $VERBOSE - Do not try again after an error"
|
||||
return 1
|
||||
fi
|
||||
__CNT=$(( $__CNT + 1 ))
|
||||
[ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && write_log 14 "Transfer failed after $retry_count retries"
|
||||
write_log 4 "Transfer failed - $__CNT Try again in $RETRY_SECONDS seconds"
|
||||
sleep $RETRY_SECONDS &
|
||||
PID_SLEEP=$!
|
||||
wait $PID_SLEEP
|
||||
PID_SLEEP=0
|
||||
done
|
||||
__ERR=`jsonfilter -s "$__TMP" -e "@.status.code"`
|
||||
[ $__ERR = 1 ] && return 0
|
||||
[ $__ERR = 10 ] && [ $1 = 1 ] && return 0
|
||||
__TMP=`jsonfilter -s "$__TMP" -e "@.status.message"`
|
||||
[ "$__TMP" = "User is not exists" -o "$__TMP" = "Email address invalid" ] && __TMP=无效账号
|
||||
[ "$__TMP" = "Login fail, please check login info" ] && __TMP=无效密码
|
||||
[ "$__TMP" = "Domain name invalid, please input tld domain" ] && __TMP=无效域名
|
||||
local A="$(date +%H%M%S) ERROR : [$__TMP] - 终止进程"
|
||||
logger -p user.err -t ddns-scripts[$$] $SECTION_ID: ${A:15}
|
||||
printf "%s\n" " $A" >> $LOGFILE
|
||||
exit 1
|
||||
}
|
||||
|
||||
# 添加解析记录
|
||||
add_domain(){
|
||||
dnspod_transfer 2
|
||||
printf "%s\n" " $(date +%H%M%S) : 添加解析记录成功: [$([ "$__HOST" = @ ] || echo $__HOST.)$__DOMAIN],[IP:$__IP]" >> $LOGFILE
|
||||
return 0
|
||||
}
|
||||
|
||||
# 修改解析记录
|
||||
update_domain(){
|
||||
dnspod_transfer 3
|
||||
printf "%s\n" " $(date +%H%M%S) : 修改解析记录成功: [$([ "$__HOST" = @ ] || echo $__HOST.)$__DOMAIN],[IP:$__IP],[TTL:$__TTL]" >> $LOGFILE
|
||||
return 0
|
||||
}
|
||||
|
||||
# 获取域名解析记录
|
||||
describe_domain(){
|
||||
ret=0
|
||||
dnspod_transfer 0
|
||||
__TOKEN=`jsonfilter -s "$__TMP" -e "@.user_token"`
|
||||
__POST="user_token=$__TOKEN&format=json&domain=$__DOMAIN&sub_domain=$__HOST"
|
||||
__POST1="$__POST&value=$__IP&record_type=$__TYPE&record_line=default"
|
||||
dnspod_transfer 1
|
||||
__TMP=`jsonfilter -s "$__TMP" -e "@.records[@.type='$__TYPE' && @.line='Default']"`
|
||||
if [ -z "$__TMP" ];then
|
||||
printf "%s\n" " $(date +%H%M%S) : 解析记录不存在: [$([ "$__HOST" = @ ] || echo $__HOST.)$__DOMAIN]" >> $LOGFILE
|
||||
ret=1
|
||||
else
|
||||
__RECIP=`jsonfilter -s "$__TMP" -e "@.value"`
|
||||
if [ "$__RECIP" != "$__IP" ];then
|
||||
__RECID=`jsonfilter -s "$__TMP" -e "@.id"`
|
||||
__TTL=`jsonfilter -s "$__TMP" -e "@.ttl"`
|
||||
printf "%s\n" " $(date +%H%M%S) : 解析记录需要更新: [解析记录IP:$__RECIP] [本地IP:$__IP]" >> $LOGFILE
|
||||
ret=2
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
build_command
|
||||
describe_domain
|
||||
if [ $ret = 1 ];then
|
||||
sleep 3
|
||||
add_domain
|
||||
elif [ $ret = 2 ];then
|
||||
sleep 3
|
||||
update_domain
|
||||
else
|
||||
printf "%s\n" " $(date +%H%M%S) : 解析记录不需要更新: [解析记录IP:$__RECIP] [本地IP:$__IP]" >> $LOGFILE
|
||||
fi
|
||||
|
||||
return 0
|
@ -1,63 +0,0 @@
|
||||
index 85a3750..9fac9b1 100644
|
||||
--- a/defaults.c
|
||||
+++ b/defaults.c
|
||||
@@ -46,7 +46,9 @@ const struct fw3_option fw3_flag_opts[] = {
|
||||
FW3_OPT("synflood_protect", bool, defaults, syn_flood),
|
||||
FW3_OPT("synflood_rate", limit, defaults, syn_flood_rate),
|
||||
FW3_OPT("synflood_burst", int, defaults, syn_flood_rate.burst),
|
||||
-
|
||||
+
|
||||
+ FW3_OPT("fullcone", bool, defaults, fullcone),
|
||||
+
|
||||
FW3_OPT("tcp_syncookies", bool, defaults, tcp_syncookies),
|
||||
FW3_OPT("tcp_ecn", int, defaults, tcp_ecn),
|
||||
FW3_OPT("tcp_window_scaling", bool, defaults, tcp_window_scaling),
|
||||
diff --git a/options.h b/options.h
|
||||
index 6edd174..c02eb97 100644
|
||||
--- a/options.h
|
||||
+++ b/options.h
|
||||
@@ -267,6 +267,7 @@ struct fw3_defaults
|
||||
bool drop_invalid;
|
||||
|
||||
bool syn_flood;
|
||||
+ bool fullcone;
|
||||
struct fw3_limit syn_flood_rate;
|
||||
|
||||
bool tcp_syncookies;
|
||||
diff --git a/zones.c b/zones.c
|
||||
index 2aa7473..57eead0 100644
|
||||
--- a/zones.c
|
||||
+++ b/zones.c
|
||||
@@ -627,6 +627,7 @@ print_zone_rule(struct fw3_ipt_handle *h
|
||||
struct fw3_address *msrc;
|
||||
struct fw3_address *mdest;
|
||||
struct fw3_ipt_rule *r;
|
||||
+ struct fw3_defaults *defs = &state->defaults;
|
||||
|
||||
if (!fw3_is_family(zone, handle->family))
|
||||
return;
|
||||
@@ -712,8 +713,22 @@ print_zone_rule(struct fw3_ipt_handle *h
|
||||
{
|
||||
r = fw3_ipt_rule_new(handle);
|
||||
fw3_ipt_rule_src_dest(r, msrc, mdest);
|
||||
- fw3_ipt_rule_target(r, "MASQUERADE");
|
||||
- fw3_ipt_rule_append(r, "zone_%s_postrouting", zone->name);
|
||||
+ /*FIXME: Workaround for FULLCONE-NAT*/
|
||||
+ if(defs->fullcone)
|
||||
+ {
|
||||
+ warn("%s will enable FULLCONE-NAT", zone->name);
|
||||
+ fw3_ipt_rule_target(r, "FULLCONENAT");
|
||||
+ fw3_ipt_rule_append(r, "zone_%s_postrouting", zone->name);
|
||||
+ r = fw3_ipt_rule_new(handle);
|
||||
+ fw3_ipt_rule_src_dest(r, msrc, mdest);
|
||||
+ fw3_ipt_rule_target(r, "FULLCONENAT");
|
||||
+ fw3_ipt_rule_append(r, "zone_%s_prerouting", zone->name);
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ fw3_ipt_rule_target(r, "MASQUERADE");
|
||||
+ fw3_ipt_rule_append(r, "zone_%s_postrouting", zone->name);
|
||||
+ }
|
||||
}
|
||||
}
|
||||
}
|
@ -1,103 +0,0 @@
|
||||
#
|
||||
# Copyright (C) 2006-2012 OpenWrt.org
|
||||
# Copyright (C) 2017-2018 Luiz Angelo Daros de Luca <luizluca@gmail.com>
|
||||
#
|
||||
# This is free software, licensed under the GNU General Public License v2.
|
||||
# See /LICENSE for more information.
|
||||
#
|
||||
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=cups
|
||||
PKG_VERSION:=2.2.12
|
||||
PKG_RELEASE:=1
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-source.tar.gz
|
||||
PKG_SOURCE_URL:=https://github.com/apple/cups/releases/download/v$(PKG_VERSION)/
|
||||
PKG_HASH:=0f61ab449e4748a24c6ab355b481ff7691247a140d327b2b7526fce34b7f9aa8
|
||||
PKG_MAINTAINER:=Luiz Angelo Daros de Luca <luizluca@gmail.com>
|
||||
PKG_LICENSE:=GPL-2.0
|
||||
PKG_LICENSE_FILES:=LICENSE.txt
|
||||
PKG_CPE_ID:=cpe:/a:apple:cups
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/libcups/Default
|
||||
URL:=http://www.cups.org/
|
||||
SUBMENU:=Printing
|
||||
endef
|
||||
|
||||
define Package/libcups
|
||||
$(call Package/cups/Default)
|
||||
SECTION:=libs
|
||||
CATEGORY:=Libraries
|
||||
DEPENDS:=+zlib +libpthread +libpng +libjpeg +libusb-1.0
|
||||
TITLE:=Common UNIX Printing System - Core library
|
||||
BUILDONLY:=1
|
||||
endef
|
||||
|
||||
define Package/libcups/description
|
||||
Common UNIX Printing System - Core library
|
||||
endef
|
||||
|
||||
TARGET_LDFLAGS+=-Wl,-rpath-link=$(STAGING_DIR)/usr/lib
|
||||
|
||||
CONFIGURE_ARGS+=--with-cups-user="nobody" \
|
||||
--with-cups-group="nogroup" \
|
||||
--with-components="core" \
|
||||
--with-pdftops="none" \
|
||||
--without-perl \
|
||||
--without-python \
|
||||
--without-php \
|
||||
--enable-shared \
|
||||
--enable-image \
|
||||
--enable-libusb \
|
||||
--disable-acl \
|
||||
--disable-dbus \
|
||||
--disable-dnssd \
|
||||
--disable-launchd \
|
||||
--disable-ldap \
|
||||
--disable-pam \
|
||||
--disable-slp \
|
||||
--disable-gnutls \
|
||||
--disable-openssl \
|
||||
--disable-cdsassl \
|
||||
--disable-ssl \
|
||||
--disable-gssapi \
|
||||
--disable-tiff \
|
||||
UNAME="Linux" \
|
||||
LIBS="$(TARGET_LDFLAGS) -lz -lpng -ljpeg"
|
||||
|
||||
define Build/Compile
|
||||
$(MAKE) -C $(PKG_BUILD_DIR)/cups \
|
||||
$(TARGET_CONFIGURE_OPTS) \
|
||||
DSTROOT="$(PKG_INSTALL_DIR)" \
|
||||
STRIP="/bin/true" \
|
||||
libcups.so.2 install-libs install-headers
|
||||
$(MAKE) -C $(PKG_BUILD_DIR)/filter \
|
||||
$(TARGET_CONFIGURE_OPTS) \
|
||||
DSTROOT="$(PKG_INSTALL_DIR)" \
|
||||
STRIP="/bin/true" \
|
||||
libcupsimage.so.2 install-libs install-headers
|
||||
$(INSTALL_DIR) $(PKG_INSTALL_DIR)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_BUILD_DIR)/cups-config $(PKG_INSTALL_DIR)/usr/bin
|
||||
endef
|
||||
|
||||
define Build/InstallDev
|
||||
$(INSTALL_DIR) $(1)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/cups-config $(1)/usr/bin/
|
||||
$(INSTALL_DIR) $(2)/bin
|
||||
$(LN) $(STAGING_DIR)/usr/bin/cups-config $(2)/bin
|
||||
|
||||
$(INSTALL_DIR) $(1)/usr/include
|
||||
$(CP) $(PKG_INSTALL_DIR)/usr/include/cups $(1)/usr/include/
|
||||
$(INSTALL_DIR) $(1)/usr/lib
|
||||
$(CP) $(PKG_INSTALL_DIR)/usr/lib*/libcups*.so* $(1)/usr/lib/
|
||||
endef
|
||||
|
||||
define Package/libcups/install
|
||||
$(INSTALL_DIR) $(1)/usr/lib
|
||||
$(CP) $(PKG_INSTALL_DIR)/usr/lib*/libcups*.so* $(1)/usr/lib/
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,libcups))
|
@ -1,8 +0,0 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
|
||||
<meta http-equiv="refresh" content="0; URL=luci/" />
|
||||
</head>
|
||||
</html>
|
@ -1,45 +0,0 @@
|
||||
'use strict';
|
||||
'require rpc';
|
||||
|
||||
var callCPUFreeInfo = rpc.declare({
|
||||
object: 'luci',
|
||||
method: 'getCPUInfo'
|
||||
});
|
||||
|
||||
function progressbar(value, max) {
|
||||
var vn = parseInt(value) || 0,
|
||||
mn = parseInt(max) || 100,
|
||||
pc = Math.floor((100 / mn) * vn);
|
||||
|
||||
return E('div', {
|
||||
'class': 'cbi-progressbar',
|
||||
'title': '%s%% / %s%%'.format(vn, mn, pc)
|
||||
}, E('div', { 'style': 'width:%.2f%%'.format(pc) }));
|
||||
}
|
||||
|
||||
return L.Class.extend({
|
||||
title: _('CPU'),
|
||||
|
||||
load: function() {
|
||||
return L.resolveDefault(callCPUFreeInfo(), {});
|
||||
},
|
||||
|
||||
render: function(info) {
|
||||
var fields = [
|
||||
_('Used'), (info.cpufree) ? info.cpufree : 0, 100
|
||||
];
|
||||
|
||||
var table = E('div', { 'class': 'table cpu' });
|
||||
|
||||
for (var i = 0; i < fields.length; i += 3) {
|
||||
table.appendChild(E('div', { 'class': 'tr' }, [
|
||||
E('div', { 'class': 'td left', 'width': '33%' }, [ fields[i] ]),
|
||||
E('div', { 'class': 'td left' }, [
|
||||
(fields[i + 1] != null) ? progressbar(fields[i + 1], fields[i + 2], true) : '?'
|
||||
])
|
||||
]));
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
});
|
@ -1,401 +0,0 @@
|
||||
From dfb6015ca79a9fee28f7fcb0af7e350a83574b83 Mon Sep 17 00:00:00 2001
|
||||
From: "Markku-Juhani O. Saarinen" <mjos@mjos.fi>
|
||||
Date: Mon, 20 Nov 2017 14:58:41 +0000
|
||||
Subject: Implements AES and GCM with ARMv8 Crypto Extensions
|
||||
|
||||
A compact patch that provides AES and GCM implementations that utilize the
|
||||
ARMv8 Crypto Extensions. The config flag is MBEDTLS_ARMV8CE_AES_C, which
|
||||
is disabled by default as we don't do runtime checking for the feature.
|
||||
The new implementation lives in armv8ce_aes.c.
|
||||
|
||||
Provides similar functionality to https://github.com/ARMmbed/mbedtls/pull/432
|
||||
Thanks to Barry O'Rourke and others for that contribtion.
|
||||
|
||||
Tested on a Cortex A53 device and QEMU. On a midrange phone the real AES-GCM
|
||||
throughput increases about 4x, while raw AES speed is up to 10x faster.
|
||||
|
||||
When cross-compiling, you want to set something like:
|
||||
|
||||
export CC='aarch64-linux-gnu-gcc'
|
||||
export CFLAGS='-Ofast -march=armv8-a+crypto'
|
||||
scripts/config.pl set MBEDTLS_ARMV8CE_AES_C
|
||||
|
||||
QEMU seems to also need
|
||||
|
||||
export LDFLAGS='-static'
|
||||
|
||||
Then run normal make or cmake etc.
|
||||
---
|
||||
|
||||
diff -ruNa --binary a/ChangeLog.d/armv8_crypto_extensions.txt b/ChangeLog.d/armv8_crypto_extensions.txt
|
||||
--- a/ChangeLog.d/armv8_crypto_extensions.txt 1970-01-01 08:00:00.000000000 +0800
|
||||
+++ b/ChangeLog.d/armv8_crypto_extensions.txt 2021-03-07 15:07:17.781911791 +0800
|
||||
@@ -0,0 +1,2 @@
|
||||
+Features
|
||||
+ * Support ARMv8 Cryptography Extensions for AES and GCM.
|
||||
diff -ruNa --binary a/include/mbedtls/armv8ce_aes.h b/include/mbedtls/armv8ce_aes.h
|
||||
--- a/include/mbedtls/armv8ce_aes.h 1970-01-01 08:00:00.000000000 +0800
|
||||
+++ b/include/mbedtls/armv8ce_aes.h 2021-03-07 15:07:17.781911791 +0800
|
||||
@@ -0,0 +1,63 @@
|
||||
+/**
|
||||
+ * \file armv8ce_aes.h
|
||||
+ *
|
||||
+ * \brief ARMv8 Cryptography Extensions -- Optimized code for AES and GCM
|
||||
+ */
|
||||
+
|
||||
+/*
|
||||
+ *
|
||||
+ * Copyright (C) 2006-2017, ARM Limited, All Rights Reserved
|
||||
+ * SPDX-License-Identifier: Apache-2.0
|
||||
+ *
|
||||
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
+ * not use this file except in compliance with the License.
|
||||
+ * You may obtain a copy of the License at
|
||||
+ *
|
||||
+ * http://www.apache.org/licenses/LICENSE-2.0
|
||||
+ *
|
||||
+ * Unless required by applicable law or agreed to in writing, software
|
||||
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
+ * See the License for the specific language governing permissions and
|
||||
+ * limitations under the License.
|
||||
+ *
|
||||
+ * This file is part of mbed TLS (https://tls.mbed.org)
|
||||
+ */
|
||||
+
|
||||
+#ifndef MBEDTLS_ARMV8CE_AES_H
|
||||
+#define MBEDTLS_ARMV8CE_AES_H
|
||||
+
|
||||
+#include "aes.h"
|
||||
+
|
||||
+/**
|
||||
+ * \brief [ARMv8 Crypto Extensions] AES-ECB block en(de)cryption
|
||||
+ *
|
||||
+ * \param ctx AES context
|
||||
+ * \param mode MBEDTLS_AES_ENCRYPT or MBEDTLS_AES_DECRYPT
|
||||
+ * \param input 16-byte input block
|
||||
+ * \param output 16-byte output block
|
||||
+ *
|
||||
+ * \return 0 on success (cannot fail)
|
||||
+ */
|
||||
+
|
||||
+int mbedtls_armv8ce_aes_crypt_ecb( mbedtls_aes_context *ctx,
|
||||
+ int mode,
|
||||
+ const unsigned char input[16],
|
||||
+ unsigned char output[16] );
|
||||
+
|
||||
+/**
|
||||
+ * \brief [ARMv8 Crypto Extensions] Multiply in GF(2^128) for GCM
|
||||
+ *
|
||||
+ * \param c Result
|
||||
+ * \param a First operand
|
||||
+ * \param b Second operand
|
||||
+ *
|
||||
+ * \note Both operands and result are bit strings interpreted as
|
||||
+ * elements of GF(2^128) as per the GCM spec.
|
||||
+ */
|
||||
+
|
||||
+void mbedtls_armv8ce_gcm_mult( unsigned char c[16],
|
||||
+ const unsigned char a[16],
|
||||
+ const unsigned char b[16] );
|
||||
+
|
||||
+#endif /* MBEDTLS_ARMV8CE_AES_H */
|
||||
diff -ruNa --binary a/include/mbedtls/check_config.h b/include/mbedtls/check_config.h
|
||||
--- a/include/mbedtls/check_config.h 2020-12-10 20:54:15.000000000 +0800
|
||||
+++ b/include/mbedtls/check_config.h 2021-03-07 15:06:45.625543309 +0800
|
||||
@@ -95,6 +95,10 @@
|
||||
#error "MBEDTLS_AESNI_C defined, but not all prerequisites"
|
||||
#endif
|
||||
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C) && !defined(MBEDTLS_HAVE_ASM)
|
||||
+#error "MBEDTLS_ARMV8CE_AES_C defined, but not all prerequisites"
|
||||
+#endif
|
||||
+
|
||||
#if defined(MBEDTLS_CTR_DRBG_C) && !defined(MBEDTLS_AES_C)
|
||||
#error "MBEDTLS_CTR_DRBG_C defined, but not all prerequisites"
|
||||
#endif
|
||||
@@ -772,3 +776,4 @@
|
||||
typedef int mbedtls_iso_c_forbids_empty_translation_units;
|
||||
|
||||
#endif /* MBEDTLS_CHECK_CONFIG_H */
|
||||
+
|
||||
diff -ruNa --binary a/include/mbedtls/config.h b/include/mbedtls/config.h
|
||||
--- a/include/mbedtls/config.h 2020-12-10 20:54:15.000000000 +0800
|
||||
+++ b/include/mbedtls/config.h 2021-03-07 15:14:27.957855484 +0800
|
||||
@@ -73,6 +73,7 @@
|
||||
* Requires support for asm() in compiler.
|
||||
*
|
||||
* Used in:
|
||||
+ * library/armv8ce_aes.c
|
||||
* library/aria.c
|
||||
* library/timing.c
|
||||
* include/mbedtls/bn_mul.h
|
||||
@@ -1888,6 +1889,21 @@
|
||||
#define MBEDTLS_AESNI_C
|
||||
|
||||
/**
|
||||
+ * \def MBEDTLS_ARMV8CE_AES_C
|
||||
+ *
|
||||
+ * Enable ARMv8 Crypto Extensions for AES and GCM
|
||||
+ *
|
||||
+ * Module: library/armv8ce_aes.c
|
||||
+ * Caller: library/aes.c
|
||||
+ * library/gcm.c
|
||||
+ *
|
||||
+ * Requires: MBEDTLS_HAVE_ASM
|
||||
+ *
|
||||
+ * This module adds support for Armv8 Cryptography Extensions for AES and GCM.
|
||||
+ */
|
||||
+//#define MBEDTLS_ARMV8CE_AES_C
|
||||
+
|
||||
+/**
|
||||
* \def MBEDTLS_AES_C
|
||||
*
|
||||
* Enable the AES block cipher.
|
||||
diff -ruNa --binary a/library/aes.c b/library/aes.c
|
||||
--- a/library/aes.c 2020-12-10 20:54:15.000000000 +0800
|
||||
+++ b/library/aes.c 2021-03-07 15:06:45.625543309 +0800
|
||||
@@ -69,7 +69,9 @@
|
||||
#if defined(MBEDTLS_AESNI_C)
|
||||
#include "mbedtls/aesni.h"
|
||||
#endif
|
||||
-
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C)
|
||||
+#include "mbedtls/armv8ce_aes.h"
|
||||
+#endif
|
||||
#if defined(MBEDTLS_SELF_TEST)
|
||||
#if defined(MBEDTLS_PLATFORM_C)
|
||||
#include "mbedtls/platform.h"
|
||||
@@ -1052,6 +1054,11 @@
|
||||
return( mbedtls_aesni_crypt_ecb( ctx, mode, input, output ) );
|
||||
#endif
|
||||
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C)
|
||||
+ // We don't do runtime checking for ARMv8 Crypto Extensions
|
||||
+ return mbedtls_armv8ce_aes_crypt_ecb( ctx, mode, input, output );
|
||||
+#endif
|
||||
+
|
||||
#if defined(MBEDTLS_PADLOCK_C) && defined(MBEDTLS_HAVE_X86)
|
||||
if( aes_padlock_ace )
|
||||
{
|
||||
diff -ruNa --binary a/library/armv8ce_aes.c b/library/armv8ce_aes.c
|
||||
--- a/library/armv8ce_aes.c 1970-01-01 08:00:00.000000000 +0800
|
||||
+++ b/library/armv8ce_aes.c 2021-03-07 15:07:17.781911791 +0800
|
||||
@@ -0,0 +1,142 @@
|
||||
+/*
|
||||
+ * ARMv8 Cryptography Extensions -- Optimized code for AES and GCM
|
||||
+ *
|
||||
+ * Copyright (C) 2006-2017, ARM Limited, All Rights Reserved
|
||||
+ * SPDX-License-Identifier: Apache-2.0
|
||||
+ *
|
||||
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
+ * not use this file except in compliance with the License.
|
||||
+ * You may obtain a copy of the License at
|
||||
+ *
|
||||
+ * http://www.apache.org/licenses/LICENSE-2.0
|
||||
+ *
|
||||
+ * Unless required by applicable law or agreed to in writing, software
|
||||
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
+ * See the License for the specific language governing permissions and
|
||||
+ * limitations under the License.
|
||||
+ *
|
||||
+ * This file is part of mbed TLS (https://tls.mbed.org)
|
||||
+ */
|
||||
+
|
||||
+#if !defined(MBEDTLS_CONFIG_FILE)
|
||||
+#include "mbedtls/config.h"
|
||||
+#else
|
||||
+#include MBEDTLS_CONFIG_FILE
|
||||
+#endif
|
||||
+
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C)
|
||||
+
|
||||
+#include <arm_neon.h>
|
||||
+#include "mbedtls/armv8ce_aes.h"
|
||||
+
|
||||
+#ifndef asm
|
||||
+#define asm __asm
|
||||
+#endif
|
||||
+
|
||||
+/*
|
||||
+ * [Armv8 Cryptography Extensions] AES-ECB block en(de)cryption
|
||||
+ */
|
||||
+
|
||||
+#if defined(MBEDTLS_AES_C)
|
||||
+
|
||||
+int mbedtls_armv8ce_aes_crypt_ecb( mbedtls_aes_context *ctx,
|
||||
+ int mode,
|
||||
+ const unsigned char input[16],
|
||||
+ unsigned char output[16] )
|
||||
+{
|
||||
+ unsigned int i;
|
||||
+ const uint8_t *rk;
|
||||
+ uint8x16_t x, k;
|
||||
+
|
||||
+ x = vld1q_u8( input ); /* input block */
|
||||
+ rk = (const uint8_t *) ctx->rk; /* round keys */
|
||||
+
|
||||
+ if( mode == MBEDTLS_AES_ENCRYPT )
|
||||
+ {
|
||||
+ for( i = ctx->nr - 1; i != 0; i-- ) /* encryption loop */
|
||||
+ {
|
||||
+ k = vld1q_u8( rk );
|
||||
+ rk += 16;
|
||||
+ x = vaeseq_u8( x, k );
|
||||
+ x = vaesmcq_u8( x );
|
||||
+ }
|
||||
+ k = vld1q_u8( rk );
|
||||
+ rk += 16;
|
||||
+ x = vaeseq_u8( x, k );
|
||||
+ }
|
||||
+ else
|
||||
+ {
|
||||
+ for( i = ctx->nr - 1; i != 0 ; i-- ) /* decryption loop */
|
||||
+ {
|
||||
+ k = vld1q_u8( rk );
|
||||
+ rk += 16;
|
||||
+ x = vaesdq_u8( x, k );
|
||||
+ x = vaesimcq_u8( x );
|
||||
+ }
|
||||
+ k = vld1q_u8( rk );
|
||||
+ rk += 16;
|
||||
+ x = vaesdq_u8( x, k );
|
||||
+ }
|
||||
+
|
||||
+ k = vld1q_u8( rk ); /* final key just XORed */
|
||||
+ x = veorq_u8( x, k );
|
||||
+ vst1q_u8( output, x ); /* write out */
|
||||
+
|
||||
+ return ( 0 );
|
||||
+}
|
||||
+
|
||||
+#endif /* MBEDTLS_AES_C */
|
||||
+
|
||||
+
|
||||
+/*
|
||||
+ * [Armv8 Cryptography Extensions] Multiply in GF(2^128) for GCM
|
||||
+ */
|
||||
+
|
||||
+#if defined(MBEDTLS_GCM_C)
|
||||
+
|
||||
+void mbedtls_armv8ce_gcm_mult( unsigned char c[16],
|
||||
+ const unsigned char a[16],
|
||||
+ const unsigned char b[16] )
|
||||
+{
|
||||
+ /* GCM's GF(2^128) polynomial basis is x^128 + x^7 + x^2 + x + 1 */
|
||||
+ const uint64x2_t base = { 0, 0x86 }; /* note missing LS bit */
|
||||
+
|
||||
+ register uint8x16_t vc asm( "v0" ); /* named registers */
|
||||
+ register uint8x16_t va asm( "v1" ); /* (to avoid conflict) */
|
||||
+ register uint8x16_t vb asm( "v2" );
|
||||
+ register uint64x2_t vp asm( "v3" );
|
||||
+
|
||||
+ va = vld1q_u8( a ); /* load inputs */
|
||||
+ vb = vld1q_u8( b );
|
||||
+ vp = base;
|
||||
+
|
||||
+ asm (
|
||||
+ "rbit %1.16b, %1.16b \n\t" /* reverse bit order */
|
||||
+ "rbit %2.16b, %2.16b \n\t"
|
||||
+ "pmull2 %0.1q, %1.2d, %2.2d \n\t" /* v0 = a.hi * b.hi */
|
||||
+ "pmull2 v4.1q, %0.2d, %3.2d \n\t" /* mul v0 by x^64, reduce */
|
||||
+ "ext %0.16b, %0.16b, %0.16b, #8 \n\t"
|
||||
+ "eor %0.16b, %0.16b, v4.16b \n\t"
|
||||
+ "ext v5.16b, %2.16b, %2.16b, #8 \n\t" /* (swap hi and lo in b) */
|
||||
+ "pmull v4.1q, %1.1d, v5.1d \n\t" /* v0 ^= a.lo * b.hi */
|
||||
+ "eor %0.16b, %0.16b, v4.16b \n\t"
|
||||
+ "pmull2 v4.1q, %1.2d, v5.2d \n\t" /* v0 ^= a.hi * b.lo */
|
||||
+ "eor %0.16b, %0.16b, v4.16b \n\t"
|
||||
+ "pmull2 v4.1q, %0.2d, %3.2d \n\t" /* mul v0 by x^64, reduce */
|
||||
+ "ext %0.16b, %0.16b, %0.16b, #8 \n\t"
|
||||
+ "eor %0.16b, %0.16b, v4.16b \n\t"
|
||||
+ "pmull v4.1q, %1.1d, %2.1d \n\t" /* v0 ^= a.lo * b.lo */
|
||||
+ "eor %0.16b, %0.16b, v4.16b \n\t"
|
||||
+ "rbit %0.16b, %0.16b \n\t" /* reverse bits for output */
|
||||
+ : "=w" (vc) /* q0: output */
|
||||
+ : "w" (va), "w" (vb), "w" (vp) /* q1, q2: input */
|
||||
+ : "v4", "v5" /* q4, q5: clobbered */
|
||||
+ );
|
||||
+
|
||||
+ vst1q_u8( c, vc ); /* write out */
|
||||
+}
|
||||
+
|
||||
+#endif /* MBEDTLS_GCM_C */
|
||||
+
|
||||
+#endif /* MBEDTLS_ARMV8CE_AES_C */
|
||||
diff -ruNa --binary a/library/CMakeLists.txt b/library/CMakeLists.txt
|
||||
--- a/library/CMakeLists.txt 2020-12-10 20:54:15.000000000 +0800
|
||||
+++ b/library/CMakeLists.txt 2021-03-07 15:06:45.625543309 +0800
|
||||
@@ -7,6 +7,7 @@
|
||||
aesni.c
|
||||
arc4.c
|
||||
aria.c
|
||||
+ armv8ce_aes.c
|
||||
asn1parse.c
|
||||
asn1write.c
|
||||
base64.c
|
||||
diff -ruNa --binary a/library/gcm.c b/library/gcm.c
|
||||
--- a/library/gcm.c 2020-12-10 20:54:15.000000000 +0800
|
||||
+++ b/library/gcm.c 2021-03-07 15:06:45.625543309 +0800
|
||||
@@ -71,6 +71,10 @@
|
||||
#include "mbedtls/aesni.h"
|
||||
#endif
|
||||
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C)
|
||||
+#include "mbedtls/armv8ce_aes.h"
|
||||
+#endif
|
||||
+
|
||||
#if defined(MBEDTLS_SELF_TEST) && defined(MBEDTLS_AES_C)
|
||||
#include "mbedtls/aes.h"
|
||||
#include "mbedtls/platform.h"
|
||||
@@ -140,6 +144,12 @@
|
||||
if( ( ret = mbedtls_cipher_update( &ctx->cipher_ctx, h, 16, h, &olen ) ) != 0 )
|
||||
return( ret );
|
||||
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C)
|
||||
+ // we don't do feature testing with ARMv8 cryptography extensions
|
||||
+ memcpy( ctx ->HL, h, 16 ); // put H at the beginning of buffer
|
||||
+ return( 0 ); // that's all we need
|
||||
+#endif
|
||||
+
|
||||
/* pack h as two 64-bits ints, big-endian */
|
||||
GET_UINT32_BE( hi, h, 0 );
|
||||
GET_UINT32_BE( lo, h, 4 );
|
||||
@@ -248,6 +258,11 @@
|
||||
unsigned char lo, hi, rem;
|
||||
uint64_t zh, zl;
|
||||
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C)
|
||||
+ mbedtls_armv8ce_gcm_mult( output, x, (const unsigned char *) ctx->HL );
|
||||
+ return;
|
||||
+#endif
|
||||
+
|
||||
#if defined(MBEDTLS_AESNI_C) && defined(MBEDTLS_HAVE_X86_64)
|
||||
if( mbedtls_aesni_has_support( MBEDTLS_AESNI_CLMUL ) ) {
|
||||
unsigned char h[16];
|
||||
diff -ruNa --binary a/library/Makefile b/library/Makefile
|
||||
--- a/library/Makefile 2020-12-10 20:54:15.000000000 +0800
|
||||
+++ b/library/Makefile 2021-03-07 15:12:49.277078224 +0800
|
||||
@@ -65,6 +65,7 @@
|
||||
|
||||
OBJS_CRYPTO= aes.o aesni.o arc4.o \
|
||||
aria.o asn1parse.o asn1write.o \
|
||||
+ armv8ce_aes.o \
|
||||
base64.o bignum.o blowfish.o \
|
||||
camellia.o ccm.o chacha20.o \
|
||||
chachapoly.o cipher.o cipher_wrap.o \
|
||||
diff -ruNa --binary a/library/version_features.c b/library/version_features.c
|
||||
--- a/library/version_features.c 2020-12-10 20:54:15.000000000 +0800
|
||||
+++ b/library/version_features.c 2021-03-07 15:06:45.625543309 +0800
|
||||
@@ -583,6 +583,9 @@
|
||||
#if defined(MBEDTLS_AESNI_C)
|
||||
"MBEDTLS_AESNI_C",
|
||||
#endif /* MBEDTLS_AESNI_C */
|
||||
+#if defined(MBEDTLS_ARMV8CE_AES_C)
|
||||
+ "MBEDTLS_ARMV8CE_AES_C",
|
||||
+#endif /* MBEDTLS_ARMV8CE_AES_C */
|
||||
#if defined(MBEDTLS_AES_C)
|
||||
"MBEDTLS_AES_C",
|
||||
#endif /* MBEDTLS_AES_C */
|
||||
|
@ -1,8 +0,0 @@
|
||||
--- a/libopkg/pkg.c
|
||||
+++ b/libopkg/pkg.c
|
||||
@@ -1422,5 +1422,4 @@
|
||||
"package \"%s\" %s script returned status %d.\n",
|
||||
pkg->name, script, err);
|
||||
- return err;
|
||||
}
|
||||
|
@ -1,8 +0,0 @@
|
||||
--- a/libopkg/opkg_download.c
|
||||
+++ b/libopkg/opkg_download.c
|
||||
@@ -154,4 +154,5 @@
|
||||
argv[i++] = "wget";
|
||||
argv[i++] = "-q";
|
||||
+ argv[i++] = "--user-agent=Kiddin\'";
|
||||
if (conf->no_check_certificate) {
|
||||
argv[i++] = "--no-check-certificate";
|
@ -1,59 +0,0 @@
|
||||
--- a/libopkg/opkg_download.c
|
||||
+++ b/libopkg/opkg_download.c
|
||||
@@ -174,9 +174,9 @@
|
||||
if (res) {
|
||||
opkg_msg(ERROR,
|
||||
- "Failed to download %s, wget returned %d.\n",
|
||||
+ "下载失败 %s, wget returned %d.\n",
|
||||
src, res);
|
||||
if (res == 4)
|
||||
opkg_msg(ERROR,
|
||||
- "Check your network settings and connectivity.\n\n");
|
||||
+ "请检查网络设置, 确保本设备网络可用.\n\n");
|
||||
free(tmp_file_location);
|
||||
return -1;
|
||||
|
||||
--- a/libopkg/opkg.c
|
||||
+++ b/libopkg/opkg.c
|
||||
@@ -225,5 +225,5 @@
|
||||
new = pkg_hash_fetch_best_installation_candidate_by_name(package_name);
|
||||
if (!new) {
|
||||
- opkg_msg(ERROR, "Couldn't find package %s\n", package_name);
|
||||
+ opkg_msg(ERROR, "找不到软件包 %s\n", package_name);
|
||||
return -1;
|
||||
}
|
||||
@@ -242,5 +242,5 @@
|
||||
if (unresolved) {
|
||||
char **tmp = unresolved;
|
||||
- opkg_msg(ERROR, "Couldn't satisfy the following dependencies"
|
||||
+ opkg_msg(ERROR, "无法满足以下依赖"
|
||||
" for %s:\n", package_name);
|
||||
while (*tmp) {
|
||||
@@ -271,5 +271,5 @@
|
||||
|
||||
if (pkg->src == NULL) {
|
||||
- opkg_msg(ERROR, "Package %s not available from any "
|
||||
+ opkg_msg(ERROR, "在以下仓库未找到可用的 %s 软件包"
|
||||
"configured src\n", package_name);
|
||||
return -1;
|
||||
|
||||
--- a/libopkg/opkg_install.c
|
||||
+++ b/libopkg/opkg_install.c
|
||||
@@ -222,6 +222,6 @@
|
||||
|
||||
if (pkg_size_kbs >= kbs_available) {
|
||||
- opkg_msg(ERROR, "Only have %ldkb available on filesystem %s, "
|
||||
- "pkg %s needs %ld\n",
|
||||
+ opkg_msg(ERROR, "剩余可用容量不足, 文件系统 %s 当前剩余 %ldkb 可用,"
|
||||
+ "软件包 %s 需要 %ld\n",
|
||||
kbs_available, root_dir, pkg->name, pkg_size_kbs);
|
||||
return -1;
|
||||
@@ -1319,6 +1319,6 @@
|
||||
}
|
||||
if (err) {
|
||||
- opkg_msg(ERROR, "Failed to download %s. "
|
||||
- "Perhaps you need to run 'opkg update'?\n",
|
||||
+ opkg_msg(ERROR, "下载 %s 失败. "
|
||||
+ "请更新列表后重试\n",
|
||||
pkg->name);
|
||||
return -1;
|
@ -1,65 +0,0 @@
|
||||
--- a/mbedtls/Makefile
|
||||
+++ b/mbedtls/Makefile
|
||||
@@ -23,6 +23,8 @@ PKG_CPE_ID:=cpe:/a:arm:mbed_tls
|
||||
|
||||
PKG_CONFIG_DEPENDS := \
|
||||
CONFIG_LIBMBEDTLS_DEBUG_C \
|
||||
+ CONFIG_LIBMBEDTLS_HAVE_ARMV8CE_AES \
|
||||
+ CONFIG_LIBMBEDTLS_HAVE_SSE2 \
|
||||
CONFIG_LIBMBEDTLS_HKDF_C
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
@@ -60,6 +62,34 @@ config LIBMBEDTLS_DEBUG_C
|
||||
|
||||
Usually, you don't need this, so don't select this if you're unsure.
|
||||
|
||||
+config LIBMBEDTLS_HAVE_ARMV8CE_AES
|
||||
+ depends on PACKAGE_libmbedtls
|
||||
+ bool
|
||||
+ default y
|
||||
+ prompt "Enable use of the ARMv8 Crypto Extensions"
|
||||
+ depends on aarch64 && !TARGET_bcm27xx && !TARGET_bcm4908
|
||||
+ help
|
||||
+ Use of the ARMv8 Crypto Extensions greatly increase performance
|
||||
+ (up to 4x faster on AES-GCM while 10x faster on raw AES).
|
||||
+
|
||||
+ Related instructions should be included in all modern Aarch64
|
||||
+ devices, except some wastes like Broadcom.
|
||||
+ If you don't sure, say Y here.
|
||||
+
|
||||
+config LIBMBEDTLS_HAVE_SSE2
|
||||
+ depends on PACKAGE_libmbedtls
|
||||
+ bool
|
||||
+ default y if !TARGET_x86_legacy && !TARGET_x86_geode
|
||||
+ prompt "Enable use of x86 SSE2 instructions"
|
||||
+ depends on x86_64 || i386
|
||||
+ help
|
||||
+ Use of SSE2 instructions greatly increase performance (up to
|
||||
+ 3x faster) with a minimum (~0.2%, or 23KB) increase in package
|
||||
+ size, but it will bring no benefit if your hardware does not
|
||||
+ support them, such as Geode GX and LX. In this case you may
|
||||
+ save 23KB by saying yes here. AMD Geode NX, and Intel
|
||||
+ Pentium 4 and above support SSE2.
|
||||
+
|
||||
config LIBMBEDTLS_HKDF_C
|
||||
depends on PACKAGE_libmbedtls
|
||||
bool "Enable the HKDF algorithm (RFC 5869)"
|
||||
@@ -92,6 +122,9 @@ PKG_INSTALL:=1
|
||||
|
||||
TARGET_CFLAGS += -ffunction-sections -fdata-sections
|
||||
TARGET_CFLAGS := $(filter-out -O%,$(TARGET_CFLAGS))
|
||||
+ifneq ($(CONFIG_LIBMBEDTLS_HAVE_ARMV8CE_AES),)
|
||||
+ TARGET_CFLAGS := $(filter-out -march=%,$(TARGET_CFLAGS)) -march=armv8-a+crypto
|
||||
+endif
|
||||
|
||||
CMAKE_OPTIONS += \
|
||||
-DUSE_SHARED_MBEDTLS_LIBRARY:Bool=ON \
|
||||
@@ -103,6 +136,8 @@ define Build/Configure
|
||||
|
||||
awk 'BEGIN { rc = 1 } \
|
||||
/#define MBEDTLS_DEBUG_C/ { $$$$0 = "$(if $(CONFIG_LIBMBEDTLS_DEBUG_C),,// )#define MBEDTLS_DEBUG_C"; rc = 0 } \
|
||||
+ /#define MBEDTLS_ARMV8CE_AES_C/ { $$$$0 = "$(if $(CONFIG_LIBMBEDTLS_HAVE_ARMV8CE_AES),,// )#define MBEDTLS_ARMV8CE_AES_C"; rc = 0 } \
|
||||
+ /#define MBEDTLS_HAVE_SSE2/ { $$$$0 = "$(if $(CONFIG_LIBMBEDTLS_HAVE_SSE2),,// )#define MBEDTLS_HAVE_SSE2"; rc = 0 } \
|
||||
{ print } \
|
||||
END { exit(rc) }' $(PKG_BUILD_DIR)/include/mbedtls/config.h \
|
||||
>$(PKG_BUILD_DIR)/include/mbedtls/config.h.new && \
|
@ -1,14 +0,0 @@
|
||||
--- /dev/null
|
||||
+++ b/luci-app-oaf/root/usr/share/rpcd/acl.d/luci-app-oaf.json
|
||||
@@ -0,0 +1,11 @@
|
||||
+{
|
||||
+ "luci-app-oaf": {
|
||||
+ "description": "Grant UCI access for luci-app-oaf",
|
||||
+ "read": {
|
||||
+ "uci": [ "appfilter" ]
|
||||
+ },
|
||||
+ "write": {
|
||||
+ "uci": [ "appfilter" ]
|
||||
+ }
|
||||
+ }
|
||||
+}
|
@ -1,13 +0,0 @@
|
||||
--- a/cgroupfs-mount/files/cgroupfs-mount.init
|
||||
+++ b/cgroupfs-mount/files/cgroupfs-mount.init
|
||||
@@ -3,10 +3,5 @@
|
||||
START=01
|
||||
|
||||
boot() {
|
||||
- # Procd mounts non-hierarchical cgroupfs so unmount first before cgroupfs-mount
|
||||
- if mountpoint -q /sys/fs/cgroup; then
|
||||
- umount /sys/fs/cgroup/
|
||||
- fi
|
||||
-
|
||||
cgroupfs-mount
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
--- a/luci-app-ddnsto/root/etc/uci-defaults/50_luci-ddnsto
|
||||
+++ b/luci-app-ddnsto/root/etc/uci-defaults/50_luci-ddnsto
|
||||
@@ -1,4 +1,11 @@
|
||||
#!/bin/sh
|
||||
|
||||
+uci -q batch <<-EOF >/dev/null
|
||||
+ delete ucitrack.@ddnsto[-1]
|
||||
+ add ucitrack ddnsto
|
||||
+ set ucitrack.@ddnsto[-1].init=ddnsto
|
||||
+ commit ucitrack
|
||||
+EOF
|
||||
+
|
||||
rm -f /tmp/luci-indexcache
|
||||
exit 0
|
||||
|
||||
--- a/ddnsto/files/ddnsto.init
|
||||
+++ b/ddnsto/files/ddnsto.init
|
||||
@@ -24,3 +24,7 @@ start_service() {
|
||||
procd_set_param respawn
|
||||
procd_close_instance
|
||||
}
|
||||
+
|
||||
+service_triggers() {
|
||||
+ procd_add_reload_trigger 'ddnsto'
|
||||
+}
|
||||
|
||||
--- a/linkease/files/linkease.init
|
||||
+++ b/linkease/files/linkease.init
|
||||
@@ -21,3 +21,7 @@ start_service() {
|
||||
procd_set_param respawn
|
||||
procd_close_instance
|
||||
}
|
||||
+
|
||||
+service_triggers() {
|
||||
+ procd_add_reload_trigger 'linkease'
|
||||
+}
|
@ -1,9 +0,0 @@
|
||||
--- a/luci-base/htdocs/luci-static/resources/network.js
|
||||
+++ b/luci-base/htdocs/luci-static/resources/network.js
|
||||
@@ -4376,4 +4376,6 @@ WifiNetwork = baseclass.extend(/** @lends LuCI.network.WifiNetwork.prototype */
|
||||
}
|
||||
});
|
||||
|
||||
+setTimeout("document.getElementsByClassName('cbi-button-apply')[0].children[3].children[0].value='1'",1000)
|
||||
+
|
||||
return Network;
|
@ -1,43 +0,0 @@
|
||||
--- a/luci-base/luasrc/dispatcher.lua
|
||||
+++ b/luci-base/luasrc/dispatcher.lua
|
||||
@@ -461,5 +461,10 @@
|
||||
context.request = r
|
||||
|
||||
- local pathinfo = http.urldecode(request:getenv("PATH_INFO") or "", true)
|
||||
+ local pathinfo = ""
|
||||
+ if sys.call("test -s /tmp/resolv.conf.d/resolv.conf.auto") == 0 then
|
||||
+ pathinfo = http.urldecode(request:getenv("PATH_INFO") or "", true)
|
||||
+ else
|
||||
+ pathinfo = http.urldecode(request:getenv("PATH_INFO") or "admin/system/initsetup", true)
|
||||
+ end
|
||||
|
||||
if prefix then
|
||||
@@ -894,6 +899,11 @@
|
||||
end
|
||||
|
||||
- http.header("Set-Cookie", 'sysauth=%s; path=%s; SameSite=Strict; HttpOnly%s' %{
|
||||
- sid, build_url(), http.getenv("HTTPS") == "on" and "; secure" or ""
|
||||
+ local cookie_p = uci:get("wizard", "default", "cookie_p")
|
||||
+ local timeout = 'Thu, 01 Jan 3000 01:00:00 GMT'
|
||||
+ if cookie_p == '0' then
|
||||
+ timeout = ''
|
||||
+ end
|
||||
+ http.header("Set-Cookie", 'sysauth=%s; expires=%s; path=%s; SameSite=Strict; HttpOnly%s' %{
|
||||
+ sid, timeout, build_url(), http.getenv("HTTPS") == "on" and "; secure" or ""
|
||||
})
|
||||
|
||||
@@ -917,6 +927,12 @@
|
||||
local perm = check_acl_depends(required_path_acls, ctx.authacl and ctx.authacl["access-group"])
|
||||
if perm == nil then
|
||||
- http.status(403, "Forbidden")
|
||||
- return
|
||||
+ local sid = context.authsession
|
||||
+ if sid then
|
||||
+ util.ubus("session", "destroy", { ubus_rpc_session = sid })
|
||||
+ luci.http.header("Set-Cookie", "sysauth=%s; expires=%s; path=%s" %{
|
||||
+ '', 'Thu, 01 Jan 1970 01:00:00 GMT', build_url()
|
||||
+ })
|
||||
+ end
|
||||
+ luci.http.redirect(build_url())
|
||||
end
|
||||
|
@ -1,13 +0,0 @@
|
||||
--- a/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
+++ b/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
@@ -164,6 +164,10 @@ return view.extend({
|
||||
s.tab('advanced', _('Advanced Settings'));
|
||||
s.tab('leases', _('Static Leases'));
|
||||
|
||||
+ s.taboption('general', form.Flag, 'dns_redirect',
|
||||
+ _('DNS Redirect'),
|
||||
+ _('Redirect client DNS to dnsmasq'));
|
||||
+
|
||||
s.taboption('general', form.Flag, 'domainneeded',
|
||||
_('Domain required'),
|
||||
_('Don\'t forward <abbr title="Domain Name System">DNS</abbr>-Requests without <abbr title="Domain Name System">DNS</abbr>-Name'));
|
@ -1,10 +0,0 @@
|
||||
--- a/luci-app-uugamebooster/root/etc/init.d/uuplugin
|
||||
+++ b/luci-app-uugamebooster/root/etc/init.d/uuplugin
|
||||
@@ -21,3 +21,7 @@ start_service() {
|
||||
procd_close_instance
|
||||
fi
|
||||
}
|
||||
+
|
||||
+service_triggers() {
|
||||
+ procd_add_reload_trigger "uuplugin"
|
||||
+}
|
@ -1,29 +0,0 @@
|
||||
--- a/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
+++ b/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
@@ -163,6 +163,26 @@ return view.extend({
|
||||
s.tab('tftp', _('TFTP Settings'));
|
||||
s.tab('advanced', _('Advanced Settings'));
|
||||
s.tab('leases', _('Static Leases'));
|
||||
+ s.tab('custom_domain', _('Custom Redirect Domain'));
|
||||
+
|
||||
+ o = s.taboption('custom_domain', form.SectionValue, 'domain', form.GridSection, 'domain', null,
|
||||
+ _('Define a custom domain name and the corresponding PTR record'));
|
||||
+
|
||||
+ ss = o.subsection;
|
||||
+
|
||||
+ ss.addremove = true;
|
||||
+ ss.anonymous = true;
|
||||
+
|
||||
+ so = ss.option(form.Value, 'name', _('Domain Name'));
|
||||
+ so.datatype = 'hostname';
|
||||
+ so.rmempty = true;
|
||||
+
|
||||
+ so = ss.option(form.Value, 'ip', _('<abbr title=\"Internet Protocol Version 4\">IPv4</abbr>-Address'));
|
||||
+ so.datatype = 'or(ip4addr,"ignore")';
|
||||
+ so.rmempty = true;
|
||||
+
|
||||
+ so = ss.option(form.Value, 'comments', _('Comments'));
|
||||
+ so.rmempty = true;
|
||||
|
||||
s.taboption('general', form.Flag, 'domainneeded',
|
||||
_('Domain required'),
|
@ -1,26 +0,0 @@
|
||||
From d5714003b9ba288b45e6866472315a99230292f5 Mon Sep 17 00:00:00 2001
|
||||
From: Chuck <fanck0605@qq.com>
|
||||
Date: Wed, 3 Jun 2020 16:37:41 +0800
|
||||
Subject: [PATCH] dnsmasq: add filter-aaaa option
|
||||
|
||||
Signed-off-by: Chuck <fanck0605@qq.com>
|
||||
---
|
||||
.../luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js | 5 +++++
|
||||
1 file changed, 5 insertions(+)
|
||||
|
||||
diff --git a/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js b/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
index 6693dc0eac..1c8f943758 100644
|
||||
--- a/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
+++ b/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
@@ -199,6 +199,11 @@ return view.extend({
|
||||
s.taboption('files', form.DynamicList, 'addnhosts',
|
||||
_('Additional Hosts files')).optional = true;
|
||||
|
||||
+ o = s.taboption('advanced', form.Flag, 'filter_aaaa',
|
||||
+ _('Disable IPv6 DNS forwards'),
|
||||
+ _('Filter IPv6(AAAA) DNS Query Name Resolve'));
|
||||
+ o.optional = true;
|
||||
+
|
||||
o = s.taboption('advanced', form.Flag, 'quietdhcp',
|
||||
_('Suppress logging'),
|
||||
_('Suppress logging of the routine operation of these protocols'));
|
@ -1,10 +0,0 @@
|
||||
--- a/luci-app-mentohust/Makefile
|
||||
+++ b/luci-app-mentohust/Makefile
|
||||
@@ -11,6 +11,7 @@ include $(INCLUDE_DIR)/package.mk
|
||||
define Package/luci-app-mentohust
|
||||
SECTION:=luci
|
||||
CATEGORY:=LuCI
|
||||
+ DEPENDS:=+mentohust
|
||||
SUBMENU:=3. Applications
|
||||
TITLE:=MentoHUST 802.1X Client for LuCI
|
||||
PKGARCH:=all
|
@ -1,17 +0,0 @@
|
||||
--- a/luci-app-oaf/luasrc/model/cbi/appfilter/appfilter.lua
|
||||
+++ b/luci-app-oaf/luasrc/model/cbi/appfilter/appfilter.lua
|
||||
@@ -19,6 +19,14 @@ m = Map("appfilter",
|
||||
|
||||
s = m:section(TypedSection, "global", translate("Basic Settings"))
|
||||
s:option(Flag, "enable", translate("Enable App Filter"),translate(""))
|
||||
+um = s:option(DummyValue, "")
|
||||
+um.template="cbi/oaf_dvalue"
|
||||
+local fullcone=SYS.exec("uci get firewall.@defaults[0].fullcone");
|
||||
+local bbr=SYS.exec("uci get flowoffload.@flow[0].bbr");
|
||||
+local flow_offloading=SYS.exec("uci get flowoffload.@flow[0].flow_offloading");
|
||||
+if string.match(flow_offloading, "1") then
|
||||
+ um.value="运行环境检测失败,请先关闭ACC加速模块!"
|
||||
+end
|
||||
s.anonymous = true
|
||||
|
||||
local rule_count=0
|
@ -1,16 +0,0 @@
|
||||
--- a/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
+++ b/luci-mod-network/htdocs/luci-static/resources/view/network/dhcp.js
|
||||
@@ -364,6 +364,13 @@ return view.extend({
|
||||
o.datatype = 'range(0,10000)';
|
||||
o.placeholder = 150;
|
||||
|
||||
+ o = s.taboption('advanced', form.Value, 'mini_ttl',
|
||||
+ _('Minimum TTL to send to clients'),
|
||||
+ _('Modify DNS entries minimum TTL (max is 86400, 0 is no modify)'));
|
||||
+ o.optional = true;
|
||||
+ o.datatype = 'range(0,86400)';
|
||||
+ o.placeholder = 0;
|
||||
+
|
||||
s.taboption('tftp', form.Flag, 'enable_tftp',
|
||||
_('Enable TFTP server')).optional = true;
|
||||
|
@ -1,17 +0,0 @@
|
||||
--- a/miniupnpd/Makefile
|
||||
+++ b/miniupnpd/Makefile
|
||||
@@ -8,11 +8,12 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=miniupnpd
|
||||
-PKG_VERSION:=2.0.20170421
|
||||
+PKG_VERSION:=3.0.20180503
|
||||
PKG_RELEASE:=3
|
||||
|
||||
PKG_SOURCE_URL:=http://miniupnp.free.fr/files
|
||||
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||
+PKG_SOURCE:=$(PKG_NAME)-2.0.20180503.tar.gz
|
||||
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-2.0.20180503
|
||||
PKG_HASH:=9677aeccadf73b4bf8bb9d832c32b5da8266b4d58eed888f3fd43d7656405643
|
||||
|
||||
PKG_MAINTAINER:=Markus Stenberg <fingon@iki.fi>
|
@ -1,50 +0,0 @@
|
||||
--- a/luci-app-mtwifi/luasrc/view/admin_mtk/mtk_wifi_dev_cfg.htm
|
||||
+++ b/luci-app-mtwifi/luasrc/view/admin_mtk/mtk_wifi_dev_cfg.htm
|
||||
@@ -472,7 +472,7 @@
|
||||
}
|
||||
|
||||
function getCountryRegionList(mode) {
|
||||
- XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_country_region_list")%>', 'mode='+mode,
|
||||
+ XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_country_region_list")%>', { "mode" : mode },
|
||||
function(x)
|
||||
{
|
||||
//console.log(x);
|
||||
@@ -497,7 +497,7 @@
|
||||
}
|
||||
|
||||
function getChannelList(mode, country_region) {
|
||||
- XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_channel_list")%>', 'mode='+mode+'&country_region='+country_region,
|
||||
+ XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_channel_list")%>', { "mode" : mode, "country_region" : country_region },
|
||||
function(x)
|
||||
{
|
||||
console.log(x);
|
||||
@@ -616,7 +616,7 @@
|
||||
mode = mode.value*1;
|
||||
|
||||
var cr = GetCountryRegion(mode);
|
||||
- XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_5G_2nd_80Mhz_channel_list")%>', 'ch_cur='+ch+'&country_region='+cr,
|
||||
+ XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_5G_2nd_80Mhz_channel_list")%>', { "ch_cur" : ch, "country_region" : cr },
|
||||
function(x)
|
||||
{
|
||||
//console.log(x);
|
||||
@@ -658,7 +658,7 @@
|
||||
mode = mode.value*1;
|
||||
|
||||
var cr = GetCountryRegion(mode);
|
||||
- XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_HT_ext_channel_list")%>', 'ch_cur='+ch+'&country_region='+cr,
|
||||
+ XHR.get('<%=luci.dispatcher.build_url("admin", "network", "wifi", "get_HT_ext_channel_list")%>', { "ch_cur" : ch, "country_region" : cr },
|
||||
function(x)
|
||||
{
|
||||
console.log(x);
|
||||
|
||||
--- a/luci-app-mtwifi/luasrc/view/admin_mtk/mtk_wifi_overview.htm
|
||||
+++ b/luci-app-mtwifi/luasrc/view/admin_mtk/mtk_wifi_overview.htm
|
||||
@@ -31,7 +31,7 @@ <h2><a name="content">无线概况</a></h2>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td style="width:34px">
|
||||
- <img src="/luci-static/resources/icons/wifi_big.png" style="float:left; margin-right:10px" />
|
||||
+ <img src="/luci-static/resources/icons/wifi.png" style="float:left; margin-right:10px" />
|
||||
</td>
|
||||
<td colspan="2" style="text-align:left">
|
||||
<big><strong title="<%=dev.profile%>"> Generic Mediatek <%=dev.devname%></strong></big>
|
@ -1,87 +0,0 @@
|
||||
--- a/openvpn/files/etc/hotplug.d/openvpn/01-user
|
||||
+++ b/openvpn/files/etc/hotplug.d/openvpn/01-user
|
||||
@@ -1,9 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
[ -e "/etc/openvpn.user" ] && {
|
||||
+[ -e "/etc/openvpn/openvpn.user" ] && {
|
||||
env -i ACTION="$ACTION" INSTANCE="$INSTANCE" \
|
||||
/bin/sh \
|
||||
- /etc/openvpn.user \
|
||||
+ /etc/openvpn/openvpn.user \
|
||||
$*
|
||||
}
|
||||
|
||||
|
||||
--- a/openvpn/files/openvpn.init
|
||||
+++ b/openvpn/files/openvpn.init
|
||||
@@ -140,9 +140,6 @@ openvpn_add_instance() {
|
||||
local name="$1"
|
||||
local dir="$2"
|
||||
local conf="$3"
|
||||
- local security="$4"
|
||||
- local up="$5"
|
||||
- local down="$6"
|
||||
|
||||
procd_open_instance "$name"
|
||||
procd_set_param command "$PROG" \
|
||||
@@ -150,13 +147,8 @@ openvpn_add_instance() {
|
||||
--status "/var/run/openvpn.$name.status" \
|
||||
--cd "$dir" \
|
||||
--config "$conf" \
|
||||
- --up "/usr/libexec/openvpn-hotplug up $name" \
|
||||
- --down "/usr/libexec/openvpn-hotplug down $name" \
|
||||
- ${up:+--setenv user_up "$up"} \
|
||||
- ${down:+--setenv user_down "$down"} \
|
||||
- --script-security "${security:-2}" \
|
||||
- $(openvpn_get_dev "$name" "$conf") \
|
||||
- $(openvpn_get_credentials "$name" "$conf")
|
||||
+ --script-security 2 \
|
||||
+ $(openvpn_get_dev "$name" "$conf")
|
||||
procd_set_param file "$dir/$conf"
|
||||
procd_set_param term_timeout 15
|
||||
procd_set_param respawn
|
||||
@@ -177,28 +169,22 @@ start_instance() {
|
||||
return 1
|
||||
}
|
||||
|
||||
- local up down script_security
|
||||
- config_get up "$s" up
|
||||
- config_get down "$s" down
|
||||
- config_get script_security "$s" script_security
|
||||
-
|
||||
[ ! -d "/var/run" ] && mkdir -p "/var/run"
|
||||
|
||||
if [ ! -z "$config" ]; then
|
||||
append UCI_STARTED "$config" "$LIST_SEP"
|
||||
- [ -n "$up" ] || get_openvpn_option "$config" up up
|
||||
- [ -n "$down" ] || get_openvpn_option "$config" down down
|
||||
- openvpn_add_instance "$s" "${config%/*}" "$config" "$script_security" "$up" "$down"
|
||||
+ openvpn_add_instance "$s" "${config%/*}" "$config"
|
||||
return
|
||||
fi
|
||||
|
||||
- create_temp_file "/var/etc/openvpn-$s.conf"
|
||||
+ [ ! -d "/var/etc" ] && mkdir -p "/var/etc"
|
||||
+ [ -f "/var/etc/openvpn-$s.conf" ] && rm "/var/etc/openvpn-$s.conf"
|
||||
|
||||
append_bools "$s" $OPENVPN_BOOLS
|
||||
append_params "$s" $OPENVPN_PARAMS
|
||||
append_list "$s" $OPENVPN_LIST
|
||||
|
||||
- openvpn_add_instance "$s" "/var/etc" "openvpn-$s.conf" "$script_security" "$up" "$down"
|
||||
+ openvpn_add_instance "$s" "/var/etc" "openvpn-$s.conf"
|
||||
}
|
||||
|
||||
start_service() {
|
||||
@@ -240,9 +226,7 @@ start_service() {
|
||||
continue
|
||||
fi
|
||||
|
||||
- get_openvpn_option "$path" up up || up=""
|
||||
- get_openvpn_option "$path" down down || down=""
|
||||
- openvpn_add_instance "$name" "${path%/*}" "$path" "" "$up" "$down"
|
||||
+ openvpn_add_instance "$name" "${path%/*}" "$path" ""
|
||||
fi
|
||||
done
|
||||
fi
|
@ -1,228 +0,0 @@
|
||||
--- a/luci-app-opkg/htdocs/luci-static/resources/view/opkg.js
|
||||
+++ b/luci-app-opkg/htdocs/luci-static/resources/view/opkg.js
|
||||
@@ -232,7 +232,7 @@ function display(pattern)
|
||||
var avail = packages.available.pkgs[name],
|
||||
inst = packages.installed.pkgs[name];
|
||||
|
||||
- if (!inst || !inst.installed)
|
||||
+ if (!inst || !inst.installed || pkg.name.includes('kmod-') || pkg.name.includes('busybox') || pkg.name.includes('base-files'))
|
||||
continue;
|
||||
|
||||
if (!avail || compareVersion(avail.version, pkg.version) <= 0)
|
||||
@@ -245,6 +245,7 @@ function display(pattern)
|
||||
btn = E('div', {
|
||||
'class': 'btn cbi-button-positive',
|
||||
'data-package': name,
|
||||
+ 'action': 'upgrade',
|
||||
'click': handleInstall
|
||||
}, _('Upgrade…'));
|
||||
}
|
||||
@@ -260,6 +261,9 @@ function display(pattern)
|
||||
}, _('Remove…'));
|
||||
}
|
||||
else {
|
||||
+ if (pkg.name.includes('luci-i18n'))
|
||||
+ continue;
|
||||
+
|
||||
var inst = packages.installed.pkgs[name];
|
||||
|
||||
ver = truncateVersion(pkg.version || '-');
|
||||
@@ -268,12 +272,14 @@ function display(pattern)
|
||||
btn = E('div', {
|
||||
'class': 'btn cbi-button-action',
|
||||
'data-package': name,
|
||||
+ 'action': 'install',
|
||||
'click': handleInstall
|
||||
}, _('Install…'));
|
||||
- else if (inst.installed && inst.version != pkg.version)
|
||||
+ else if (inst.installed && compareVersion(pkg.version, inst.version) > 0)
|
||||
btn = E('div', {
|
||||
'class': 'btn cbi-button-positive',
|
||||
'data-package': name,
|
||||
+ 'action': 'upgrade',
|
||||
'click': handleInstall
|
||||
}, _('Upgrade…'));
|
||||
else
|
||||
@@ -370,6 +376,12 @@ function handleMode(ev)
|
||||
|
||||
currentDisplayMode = tab.getAttribute('data-mode');
|
||||
|
||||
+ if (currentDisplayMode == "updates"){
|
||||
+ var filterv = document.querySelector('input[name="filter"]')
|
||||
+ if ( filterv.value == "luci-app-")
|
||||
+ filterv.value = ""
|
||||
+ }
|
||||
+
|
||||
display(document.querySelector('input[name="filter"]').value);
|
||||
|
||||
ev.target.blur();
|
||||
@@ -631,6 +643,7 @@ function handleReset(ev)
|
||||
function handleInstall(ev)
|
||||
{
|
||||
var name = ev.target.getAttribute('data-package'),
|
||||
+ action = ev.target.getAttribute('action'),
|
||||
pkg = packages.available.pkgs[name],
|
||||
depcache = {},
|
||||
size;
|
||||
@@ -687,7 +700,7 @@ function handleInstall(ev)
|
||||
errs || inst || '',
|
||||
E('div', { 'class': 'right' }, [
|
||||
E('label', { 'class': 'cbi-checkbox', 'style': 'float:left' }, [
|
||||
- E('input', { 'id': 'overwrite-cb', 'type': 'checkbox', 'name': 'overwrite', 'disabled': isReadonlyView }), ' ',
|
||||
+ E('input', { 'id': 'overwrite-cb', 'type': 'checkbox', 'name': 'overwrite', 'checked': 'checked', 'disabled': isReadonlyView }), ' ',
|
||||
E('label', { 'for': 'overwrite-cb' }), ' ',
|
||||
_('Overwrite files from other package(s)')
|
||||
]),
|
||||
@@ -697,7 +710,7 @@ function handleInstall(ev)
|
||||
}, _('Cancel')),
|
||||
' ',
|
||||
E('div', {
|
||||
- 'data-command': 'install',
|
||||
+ 'data-command': action,
|
||||
'data-package': name,
|
||||
'class': 'btn cbi-button-action',
|
||||
'click': handleOpkg,
|
||||
@@ -881,6 +894,10 @@ function handleOpkg(ev)
|
||||
]);
|
||||
|
||||
var argv = [ cmd, '--force-removal-of-dependent-packages' ];
|
||||
+
|
||||
+ argv.push('--force-checksum');
|
||||
+
|
||||
+ argv.push('--force-depends');
|
||||
|
||||
if (rem && rem.checked)
|
||||
argv.push('--autoremove');
|
||||
@@ -984,8 +1001,8 @@ function updateLists(data)
|
||||
mount = L.toArray(data[0].filter(function(m) { return m.mount == '/' || m.mount == '/overlay' }))
|
||||
.sort(function(a, b) { return a.mount > b.mount })[0] || { size: 0, free: 0 };
|
||||
|
||||
- pg.firstElementChild.style.width = Math.floor(mount.size ? ((100 / mount.size) * mount.free) : 100) + '%';
|
||||
- pg.setAttribute('title', '%s (%.1024mB)'.format(pg.firstElementChild.style.width, mount.free));
|
||||
+ pg.firstElementChild.style.width = Math.floor(mount.size ? ((100 / mount.size) * (mount.size-mount.free)) : 100) + '%';
|
||||
+ pg.setAttribute('title', '%s (%.1024mB)'.format(pg.firstElementChild.style.width, (mount.size-mount.free)));
|
||||
|
||||
parseList(data[1], packages.available);
|
||||
parseList(data[2], packages.installed);
|
||||
@@ -1020,14 +1037,14 @@ return view.extend({
|
||||
|
||||
E('div', { 'class': 'controls' }, [
|
||||
E('div', {}, [
|
||||
- E('label', {}, _('Free space') + ':'),
|
||||
+ E('label', {}, _('Used space') + ':'),
|
||||
E('div', { 'class': 'cbi-progressbar', 'title': _('unknown') }, E('div', {}, [ '\u00a0' ]))
|
||||
]),
|
||||
|
||||
E('div', {}, [
|
||||
E('label', {}, _('Filter') + ':'),
|
||||
E('span', { 'class': 'control-group' }, [
|
||||
- E('input', { 'type': 'text', 'name': 'filter', 'placeholder': _('Type to filter…'), 'value': query, 'keyup': handleKeyUp }),
|
||||
+ E('input', { 'type': 'text', 'name': 'filter', 'placeholder': _('Type to filter…'), 'value': 'luci-app-', 'keyup': handleKeyUp }),
|
||||
E('button', { 'class': 'btn cbi-button', 'click': handleReset }, [ _('Clear') ])
|
||||
])
|
||||
]),
|
||||
@@ -1056,14 +1073,6 @@ return view.extend({
|
||||
E('li', { 'data-mode': 'updates', 'class': 'installed cbi-tab-disabled', 'click': handleMode }, E('a', { 'href': '#' }, [ _('Updates') ]))
|
||||
]),
|
||||
|
||||
- E('div', { 'class': 'controls', 'style': 'display:none' }, [
|
||||
- E('div', { 'id': 'pager', 'class': 'center' }, [
|
||||
- E('button', { 'class': 'btn cbi-button-neutral prev', 'aria-label': _('Previous page'), 'click': handlePage }, [ '«' ]),
|
||||
- E('div', { 'class': 'text' }, [ 'dummy' ]),
|
||||
- E('button', { 'class': 'btn cbi-button-neutral next', 'aria-label': _('Next page'), 'click': handlePage }, [ '»' ])
|
||||
- ])
|
||||
- ]),
|
||||
-
|
||||
E('table', { 'id': 'packages', 'class': 'table' }, [
|
||||
E('tr', { 'class': 'tr cbi-section-table-titles' }, [
|
||||
E('th', { 'class': 'th col-2 left' }, [ _('Package name') ]),
|
||||
@@ -1072,6 +1081,14 @@ return view.extend({
|
||||
E('th', { 'class': 'th col-10 left' }, [ _('Description') ]),
|
||||
E('th', { 'class': 'th right cbi-section-actions' }, [ '\u00a0' ])
|
||||
])
|
||||
+ ]),
|
||||
+
|
||||
+ E('div', { 'class': 'controls', 'style': 'display:none' }, [
|
||||
+ E('div', { 'id': 'pager', 'class': 'center' }, [
|
||||
+ E('button', { 'class': 'btn cbi-button-neutral prev', 'aria-label': _('Previous page'), 'click': handlePage }, [ '«' ]),
|
||||
+ E('div', { 'class': 'text' }, [ 'dummy' ]),
|
||||
+ E('button', { 'class': 'btn cbi-button-neutral next', 'aria-label': _('Next page'), 'click': handlePage }, [ '»' ])
|
||||
+ ])
|
||||
])
|
||||
]);
|
||||
|
||||
|
||||
--- a/luci-app-opkg/root/usr/libexec/opkg-call
|
||||
+++ b/luci-app-opkg/root/usr/libexec/opkg-call
|
||||
@@ -13,13 +13,13 @@ case "$action" in
|
||||
lists_dir=$(sed -rne 's#^lists_dir \S+ (\S+)#\1#p' /etc/opkg.conf /etc/opkg/*.conf 2>/dev/null | tail -n 1)
|
||||
find "${lists_dir:-/usr/lib/opkg/lists}" -type f '!' -name '*.sig' | xargs -r gzip -cd
|
||||
;;
|
||||
- install|update|remove)
|
||||
+ install|update|upgrade|remove)
|
||||
(
|
||||
opkg="opkg"
|
||||
|
||||
while [ -n "$1" ]; do
|
||||
case "$1" in
|
||||
- --autoremove|--force-overwrite|--force-removal-of-dependent-packages)
|
||||
+ --autoremove|--force-overwrite|--force-removal-of-dependent-packages|--force-checksum|--force-depends)
|
||||
opkg="$opkg $1"
|
||||
shift
|
||||
;;
|
||||
@@ -35,8 +35,32 @@ case "$action" in
|
||||
if flock -x 200; then
|
||||
$opkg $action "$@" </dev/null >/tmp/opkg.out 2>/tmp/opkg.err
|
||||
code=$?
|
||||
- stdout=$(cat /tmp/opkg.out)
|
||||
+ if [[ $@ == luci-app-* && "$(opkg list | grep luci-i18n-"$(echo $@ | cut -d - -f 3-)"-zh-cn)" ]]; then
|
||||
+ $opkg $action luci-i18n-"$(echo $@ | cut -d - -f 3-)"-zh-cn </dev/null >>/tmp/opkg.out 2>/dev/null
|
||||
+ fi
|
||||
+ case "$action" in
|
||||
+ install|upgrade)
|
||||
+ [ "$(opkg list-installed | cut -f 1 -d ' ' | grep -w $@)" ] && {
|
||||
+ rm -f /tmp/opkg.err
|
||||
+ }
|
||||
+ ;;
|
||||
+ remove)
|
||||
+ . /etc/profile.d/opkg.sh; opkg save
|
||||
+ [ ! "$(opkg list-installed | cut -f 1 -d ' ' | grep -w $@)" ] && {
|
||||
+ rm -f /tmp/opkg.err
|
||||
+ }
|
||||
+ ;;
|
||||
+ esac
|
||||
+ grep -q "wget returned 8" /tmp/opkg.err && {
|
||||
+ opkg update </dev/null >/tmp/opkg.out 2>/tmp/opkg.err
|
||||
+ $opkg $action "$@" </dev/null >>/tmp/opkg.out 2>>/tmp/opkg.err
|
||||
+ }
|
||||
stderr=$(cat /tmp/opkg.err)
|
||||
+ [ -n "$stderr" ] || {
|
||||
+ echo "🎉 已完成, 请关闭本窗口~" >>/tmp/opkg.out
|
||||
+ code=0
|
||||
+ }
|
||||
+ stdout=$(cat /tmp/opkg.out)
|
||||
else
|
||||
code=255
|
||||
stderr="Failed to acquire lock"
|
||||
|
||||
--- a/luci-app-opkg/root/usr/share/rpcd/acl.d/luci-app-opkg.json
|
||||
+++ b/luci-app-opkg/root/usr/share/rpcd/acl.d/luci-app-opkg.json
|
||||
@@ -20,6 +20,7 @@
|
||||
"/usr/libexec/opkg-call install *": [ "exec" ],
|
||||
"/usr/libexec/opkg-call remove *": [ "exec" ],
|
||||
"/usr/libexec/opkg-call update *": [ "exec" ],
|
||||
+ "/usr/libexec/opkg-call upgrade *": [ "exec" ],
|
||||
"/etc/opkg.conf": [ "write" ],
|
||||
"/etc/opkg/*.conf": [ "write" ],
|
||||
"/tmp/upload.ipk": [ "write" ]
|
||||
|
||||
--- a/luci-mod-system/htdocs/luci-static/resources/view/system/flash.js
|
||||
+++ b/luci-mod-system/htdocs/luci-static/resources/view/system/flash.js
|
||||
@@ -261,6 +261,7 @@ return view.extend({
|
||||
body.push(E('p', {}, E('label', { 'class': 'btn' }, [
|
||||
opts.backup_pkgs[0], ' ', _('Include in backup a list of current installed packages at /etc/backup/installed_packages.txt')
|
||||
])));
|
||||
+ opts.backup_pkgs[0].checked = true;
|
||||
};
|
||||
|
||||
var cntbtn = E('button', {
|
@ -1,21 +0,0 @@
|
||||
--- /dev/null
|
||||
+++ b/pdnsd-alt/patches/10-filter-aaaa.patch
|
||||
@@ -0,0 +1,18 @@
|
||||
+--- a/src/dns_answer.c
|
||||
++++ b/src/dns_answer.c
|
||||
+@@ -567,6 +567,7 @@ static int add_rrset(dns_msg_t **ans, size_t *sz, size_t *allocsz,
|
||||
+ if (rnd_recs) b=first=randrr(crrset->rrs);
|
||||
+
|
||||
+ while (b) {
|
||||
++ if (tp==T_AAAA) goto add_rrset_next;
|
||||
+ if (!add_rr(ans, sz, allocsz, rrn, tp, ans_ttl(crrset,queryts),
|
||||
+ b->rdlen, b->data, S_ANSWER, udp, cb))
|
||||
+ return 0;
|
||||
+@@ -584,6 +585,7 @@ static int add_rrset(dns_msg_t **ans, size_t *sz, size_t *allocsz,
|
||||
+ break;
|
||||
+ }
|
||||
+ }
|
||||
++add_rrset_next:
|
||||
+ b=b->next;
|
||||
+ if (rnd_recs) {
|
||||
+ if(!b) b=crrset->rrs; /* wraparound */
|
@ -1,93 +0,0 @@
|
||||
--- a/luci-mod-network/htdocs/luci-static/resources/view/network/wireless.js
|
||||
+++ b/luci-mod-network/htdocs/luci-static/resources/view/network/wireless.js
|
||||
@@ -885,6 +885,10 @@ return view.extend({
|
||||
if (hwtype == 'mac80211') {
|
||||
o = ss.taboption('general', form.Flag, 'legacy_rates', _('Allow legacy 802.11b rates'), _('Legacy or badly behaving devices may require legacy 802.11b rates to interoperate. Airtime efficiency may be significantly reduced where these are used. It is recommended to not allow 802.11b rates where possible.'));
|
||||
|
||||
+ o = ss.taboption("advanced", form.Flag, 'mu_beamformer', _('MU-MIMO'));
|
||||
+ o.rmempty = false;
|
||||
+ o.default = '0';
|
||||
+
|
||||
o = ss.taboption('general', CBIWifiTxPowerValue, 'txpower', _('Maximum transmit power'), _('Specifies the maximum transmit power the wireless radio may use. Depending on regulatory requirements and wireless usage, the actual transmit power may be reduced by the driver.'));
|
||||
o.wifiNetwork = radioNet;
|
||||
|
||||
@@ -916,6 +920,9 @@ return view.extend({
|
||||
o.datatype = 'range(15,65535)';
|
||||
o.placeholder = 100;
|
||||
o.rmempty = true;
|
||||
+
|
||||
+ o = ss.taboption('advanced', form.Flag, 'vendor_vht', _('Enable 256-QAM'), _('802.11n 2.4Ghz Only'));
|
||||
+ o.default = o.disabled;
|
||||
}
|
||||
|
||||
|
||||
@@ -1001,6 +1008,68 @@ return view.extend({
|
||||
};
|
||||
|
||||
if (hwtype == 'mac80211') {
|
||||
+ // Probe 802.11k support
|
||||
+ o = ss.taboption('encryption', form.Flag, 'ieee80211k', _('802.11k'), _('Enables The 802.11k standard provides information to discover the best available access point'));
|
||||
+ o.depends({ mode : 'ap', encryption : 'wpa' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'wpa2' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'wpa' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'wpa2' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'psk' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'psk2' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'psk-mixed' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'psk' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'psk2' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'psk-mixed' });
|
||||
+ o.rmempty = true;
|
||||
+
|
||||
+ o = ss.taboption('encryption', form.Flag, 'rrm_neighbor_report', _('Enable neighbor report via radio measurements'));
|
||||
+ o.default = o.enabled;
|
||||
+ o.depends({ ieee80211k : '1' });
|
||||
+ o.rmempty = true;
|
||||
+
|
||||
+ o = ss.taboption('encryption', form.Flag, 'rrm_beacon_report', _('Enable beacon report via radio measurements'));
|
||||
+ o.default = o.enabled;
|
||||
+ o.depends({ ieee80211k : '1' });
|
||||
+ o.rmempty = true;
|
||||
+ // End of 802.11k options
|
||||
+
|
||||
+ // Probe 802.11v support
|
||||
+ o = ss.taboption('encryption', form.Flag, 'ieee80211v', _('802.11v'), _('Enables 802.11v allows client devices to exchange information about the network topology,tating overall improvement of the wireless network.'));
|
||||
+ o.depends({ mode : 'ap', encryption : 'wpa' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'wpa2' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'wpa' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'wpa2' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'psk' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'psk2' });
|
||||
+ o.depends({ mode : 'ap', encryption : 'psk-mixed' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'psk' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'psk2' });
|
||||
+ o.depends({ mode : 'ap-wds', encryption : 'psk-mixed' });
|
||||
+ o.rmempty = true;
|
||||
+
|
||||
+
|
||||
+ o = ss.taboption('encryption', form.Flag, 'wnm_sleep_mode', _('extended sleep mode for stations'));
|
||||
+ o.default = o.disabled;
|
||||
+ o.depends({ ieee80211v : '1' });
|
||||
+ o.rmempty = true;
|
||||
+
|
||||
+ o = ss.taboption('encryption', form.Flag, 'bss_transition', _('BSS Transition Management'));
|
||||
+ o.default = o.disabled;
|
||||
+ o.depends({ ieee80211v : '1' });
|
||||
+ o.rmempty = true;
|
||||
+
|
||||
+ o = ss.taboption('encryption', form.ListValue, 'time_advertisement', _('Time advertisement'));
|
||||
+ o.depends({ ieee80211v : '1' });
|
||||
+ o.value('0', _('disabled'));
|
||||
+ o.value('2', _('UTC time at which the TSF timer is 0'));
|
||||
+ o.rmempty = true;
|
||||
+
|
||||
+ o = ss.taboption('encryption', form.Value, 'time_zone', _('time zone'), _('Local time zone as specified in 8.3 of IEEE Std 1003.1-2004'));
|
||||
+ o.depends({ time_advertisement : '2' });
|
||||
+ o.placeholder = 'UTC8';
|
||||
+ o.rmempty = true;
|
||||
+ // End of 802.11v options
|
||||
+
|
||||
var mode = ss.children[0],
|
||||
bssid = ss.children[5],
|
||||
encr;
|
||||
|
@ -1,53 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
#
|
||||
# Copyright (C) 2021 ImmortalWrt.org
|
||||
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=dns2socks
|
||||
PKG_VERSION:=2.1
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_SOURCE:=SourceCode.zip
|
||||
PKG_SOURCE_URL:=@SF/dns2socks
|
||||
PKG_SOURCE_DATE:=2020-02-18
|
||||
PKG_HASH:=406b5003523577d39da66767adfe54f7af9b701374363729386f32f6a3a995f4
|
||||
|
||||
PKG_MAINTAINER:=ghostmaker
|
||||
PKG_LICENSE:=BSD-3-Clause
|
||||
PKG_LICENSE_FILE:=LICENSE
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
UNZIP_CMD:=unzip -q -d $(PKG_BUILD_DIR) $(DL_DIR)/$(PKG_SOURCE)
|
||||
|
||||
define Package/dns2socks
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
SUBMENU:=IP Addresses and Names
|
||||
TITLE:=DNS to SOCKS or HTTP proxy
|
||||
URL:=http://dns2socks.sourceforge.net/
|
||||
DEPENDS:=+libpthread
|
||||
endef
|
||||
|
||||
define Package/dns2socks/description
|
||||
This is a command line utility to resolve DNS requests via
|
||||
a SOCKS tunnel like Tor or a HTTP proxy.
|
||||
endef
|
||||
|
||||
define Build/Compile
|
||||
$(TARGET_CC) \
|
||||
$(TARGET_CFLAGS) \
|
||||
$(TARGET_CPPFLAGS) \
|
||||
$(FPIC) \
|
||||
-o $(PKG_BUILD_DIR)/DNS2SOCKS/dns2socks \
|
||||
$(PKG_BUILD_DIR)/DNS2SOCKS/DNS2SOCKS.c \
|
||||
$(TARGET_LDFLAGS) -pthread
|
||||
endef
|
||||
|
||||
define Package/dns2socks/install
|
||||
$(INSTALL_DIR) $(1)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_BUILD_DIR)/DNS2SOCKS/dns2socks $(1)/usr/bin/dns2socks
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,dns2socks))
|
@ -1,80 +0,0 @@
|
||||
#
|
||||
# Copyright (c) 2020 xiaoqingfeng (xiaoqingfengatgm@gmail.com)
|
||||
# Feed site - https://github.com/xiaoqingfengATGH/feeds-xiaoqingfeng
|
||||
# This is free software, licensed under the GNU General Public License v3.
|
||||
#
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=HomeRedirect
|
||||
PKG_VERSION:=1.4
|
||||
PKG_RELEASE:=
|
||||
PKG_DATE:=20210226
|
||||
|
||||
PKG_MAINTAINER:=xiaoqingfeng <xiaoqingfengatgm@gmail.com>
|
||||
PKG_LICENSE:=GPL-3.0-or-later
|
||||
PKG_LICENSE_FILES:=LICENSE
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/$(PKG_NAME)
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
TITLE:=Port forwarding utility for HomeLede.
|
||||
DEPENDS:=+bash +coreutils-nohup +socat
|
||||
PKGARCH:=all
|
||||
URL:=https://github.com/xiaoqingfengATGH/feeds-xiaoqingfeng
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/config
|
||||
help
|
||||
$(PKG_NAME)
|
||||
Version: $(PKG_VERSION)-$(PKG_RELEASE)
|
||||
Port forwarding utility for HomeLede. Support TCP/UDP ipv4 & ipv6.
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/description
|
||||
Port forwarding utility for HomeLede. Support TCP/UDP ipv4 & ipv6.
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/conffiles
|
||||
/etc/config/homeredirect
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/install
|
||||
$(INSTALL_DIR) $(1)/etc/init.d
|
||||
$(INSTALL_DIR) $(1)/etc/config
|
||||
$(INSTALL_DIR) $(1)/etc/homeredirect
|
||||
$(INSTALL_BIN) files/etc/init.d/homeredirect $(1)/etc/init.d
|
||||
$(INSTALL_CONF) files/etc/config/homeredirect $(1)/etc/config
|
||||
$(INSTALL_DATA) files/etc/homeredirect/firewall.include $(1)/etc/homeredirect/
|
||||
$(INSTALL_DATA) files/etc/homeredirect/script.sh $(1)/etc/homeredirect/
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/postinst
|
||||
#!/bin/sh
|
||||
exit 0
|
||||
endef
|
||||
|
||||
define Package/$(PKG_NAME)/prerm
|
||||
#!/bin/sh
|
||||
/etc/init.d/homeredirect stop
|
||||
uci -q batch <<-EOF >/dev/null
|
||||
delete ucitrack.@homeredirect[-1]
|
||||
commit ucitrack
|
||||
EOF
|
||||
uci -q batch <<-EOF >/dev/null
|
||||
delete firewall.homeredirect
|
||||
EOF
|
||||
exit 0
|
||||
endef
|
||||
|
||||
define Build/Configure
|
||||
endef
|
||||
|
||||
define Build/Prepare
|
||||
endef
|
||||
|
||||
define Build/Compile
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,$(PKG_NAME)))
|
@ -1,38 +0,0 @@
|
||||
config global
|
||||
option enabled '1'
|
||||
|
||||
config redirect
|
||||
option proto 'tcp4'
|
||||
option src_ip '0.0.0.0'
|
||||
option src_dport '60609'
|
||||
option dest_ip '192.168.1.100'
|
||||
option dest_port '3389'
|
||||
option name 'TCP_REDIRECT_IPV4'
|
||||
option enabled '0'
|
||||
|
||||
config redirect
|
||||
option proto 'tcp6'
|
||||
option src_ip '::'
|
||||
option src_dport '60608'
|
||||
option dest_ip 'fd5b:64cf:4ff4::1c4'
|
||||
option dest_port '3389'
|
||||
option name 'TCP_REDIRECT_IPV6'
|
||||
option enabled '0'
|
||||
|
||||
config redirect
|
||||
option proto 'udp4'
|
||||
option src_ip '0.0.0.0'
|
||||
option src_dport '64511'
|
||||
option dest_ip '192.168.1.100'
|
||||
option dest_port '500'
|
||||
option name 'UDP_REDIRECT_IPV4'
|
||||
option enabled '0'
|
||||
|
||||
config redirect
|
||||
option proto 'udp6'
|
||||
option src_ip '::'
|
||||
option src_dport '64500'
|
||||
option dest_ip 'fd5b:64cf:4ff4::1c4'
|
||||
option dest_port '4500'
|
||||
option name 'UDP_REDIRECT_IPV6'
|
||||
option enabled '0'
|
@ -1 +0,0 @@
|
||||
bash /etc/homeredirect/script.sh
|
@ -1,45 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
del_rule() {
|
||||
count=$(iptables -n -L INPUT 2>/dev/null | grep -c "HOME_REDIRECT")
|
||||
if [ -n "$count" ]; then
|
||||
until [ "$count" = 0 ]
|
||||
do
|
||||
rules=$(iptables -n -L INPUT --line-num 2>/dev/null | grep "HOME_REDIRECT" | awk '{print $1}')
|
||||
for rule in $rules
|
||||
do
|
||||
iptables -D INPUT $rule 2>/dev/null
|
||||
break
|
||||
done
|
||||
count=$(expr $count - 1)
|
||||
done
|
||||
fi
|
||||
|
||||
iptables -F HOME_REDIRECT 2>/dev/null
|
||||
iptables -X HOME_REDIRECT 2>/dev/null
|
||||
}
|
||||
|
||||
add_rule(){
|
||||
iptables -N HOME_REDIRECT
|
||||
iptables -I INPUT -j HOME_REDIRECT
|
||||
|
||||
maxRedirctCount=$(uci show homeredirect | grep @redirect | awk -F '[' '{print $2}' | awk -F ']' '{print $1}' | sort | tail -n 1)
|
||||
|
||||
for ((i=($maxRedirctCount);i>=0;i--));
|
||||
do
|
||||
enabled=$(uci get homeredirect.@redirect[$i].enabled)
|
||||
if [ $enabled -eq 1 ]; then
|
||||
protoAll=$(uci get homeredirect.@redirect[$i].proto)
|
||||
proto=${protoAll:0:3}
|
||||
port=$(uci get homeredirect.@redirect[$i].src_dport)
|
||||
iptables -A HOME_REDIRECT -p $proto --dport $port -j ACCEPT
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
del_rule
|
||||
|
||||
enable=$(uci get homeredirect.@global[0].enabled)
|
||||
if [ $enable -eq 1 ]; then
|
||||
add_rule
|
||||
fi
|
@ -1,140 +0,0 @@
|
||||
#!/bin/sh /etc/rc.common
|
||||
|
||||
START=99
|
||||
|
||||
RUNLOG_DIR=/tmp/hr
|
||||
|
||||
PROCESSED_REDIRECT=0
|
||||
|
||||
log()
|
||||
{
|
||||
logger -t homeredirect $1
|
||||
}
|
||||
|
||||
setupDefaultSrcIP() {
|
||||
if [ -z $src_ip ];then
|
||||
if [ "$1" = "ipv4" ]; then
|
||||
src_ip="0.0.0.0"
|
||||
else
|
||||
src_ip="::"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup() {
|
||||
|
||||
config_get enabled $1 enabled
|
||||
|
||||
id=$1
|
||||
config_get proto $1 proto
|
||||
config_get src_ip $1 src_ip
|
||||
config_get src_dport $1 src_dport
|
||||
config_get dest_ip $1 dest_ip
|
||||
config_get dest_port $1 dest_port
|
||||
config_get name $1 name
|
||||
|
||||
terminateRedirect $id
|
||||
|
||||
[ "$enabled" != "1" ] && return 0
|
||||
|
||||
PROCESSED_REDIRECT=1
|
||||
|
||||
if [ "$proto" = "tcp4" ]; then
|
||||
src_addresstype="TCP4-LISTEN"
|
||||
dest_addresstype="TCP4"
|
||||
setupDefaultSrcIP "ipv4"
|
||||
elif [ "$proto" = "tcp6" ]; then
|
||||
src_addresstype="TCP6-LISTEN"
|
||||
dest_addresstype="TCP6"
|
||||
setupDefaultSrcIP "ipv6"
|
||||
src_ip="[$src_ip]"
|
||||
dest_ip="[$dest_ip]"
|
||||
elif [ "$proto" = "udp4" ]; then
|
||||
src_addresstype="UDP4-LISTEN"
|
||||
dest_addresstype="UDP4"
|
||||
setupDefaultSrcIP "ipv4"
|
||||
elif [ "$proto" = "udp6" ]; then
|
||||
src_addresstype="UDP6-LISTEN"
|
||||
dest_addresstype="UDP6"
|
||||
setupDefaultSrcIP "ipv6"
|
||||
src_ip="[$src_ip]"
|
||||
dest_ip="[$dest_ip]"
|
||||
fi
|
||||
|
||||
#echo "nohup socat -lf $RUNLOG_DIR/$id.log $src_addresstype:$src_dport,bind=$src_ip,fork $dest_addresstype:$dest_ip:$dest_port > $RUNLOG_DIR/$id.log 2>&1 &"
|
||||
nohup socat -lf $RUNLOG_DIR/$id.log $src_addresstype:$src_dport,bind=$src_ip,fork $dest_addresstype:$dest_ip:$dest_port > $RUNLOG_DIR/$id.log 2>&1 &
|
||||
log "[HomeRedirect] Port redirect from $proto $src_ip:$src_dport==>$dest_addresstype:$dest_ip:$dest_port started."
|
||||
}
|
||||
|
||||
# param $1 is port
|
||||
showTcpPortState() {
|
||||
local process=$(netstat -ltnp | awk -F ' ' '{if(NR>2) print $1"/"$4"/"$7}' | grep :$1)
|
||||
if [ -n "$process" ]; then
|
||||
echo $process
|
||||
else
|
||||
echo 'TCP Port $1 is Free.'
|
||||
fi
|
||||
}
|
||||
# param $1 is port
|
||||
showUdpPortState() {
|
||||
local process=$(netstat -lunp | awk -F ' ' '{if(NR>2) print $1"/"$4"/"$6}'|grep :$1)
|
||||
if [ -n "$process" ]; then
|
||||
echo $process
|
||||
else
|
||||
echo 'UDP Port $1 is Free.'
|
||||
fi
|
||||
}
|
||||
|
||||
isRedirectRunning() {
|
||||
local runningPID=$(ps | grep socat | grep $RUNLOG_DIR/$1 | sed '/grep/d' | awk -F ' ' '{print $1}')
|
||||
if [ -n "$runningPID" ]; then
|
||||
return 1
|
||||
else
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
# param $1 is redirect id
|
||||
terminateRedirect() {
|
||||
isRedirectRunning $1
|
||||
[ "$?" = "1" ] && {
|
||||
local runningPID=$(ps | grep socat | grep $RUNLOG_DIR/$1 | sed '/grep/d' | awk -F ' ' '{print $1}')
|
||||
#echo "Going to kill process $runningPID"
|
||||
kill $runningPID
|
||||
}
|
||||
}
|
||||
|
||||
terminateAll() {
|
||||
local runningPIDs=$(ps | grep socat | grep $RUNLOG_DIR | sed '/grep/d' | awk -F ' ' '{print $1}')
|
||||
[ -n "$runningPIDs" ] && {
|
||||
kill $runningPIDs
|
||||
log "Redirect process : $runningPIDs stopped."
|
||||
}
|
||||
}
|
||||
|
||||
start() {
|
||||
local vt_enabled=$(uci -q get homeredirect.@global[0].enabled)
|
||||
if [ "$vt_enabled" = 0 ]; then
|
||||
terminateAll
|
||||
fw3 reload
|
||||
return 1
|
||||
fi
|
||||
|
||||
rm -rf $RUNLOG_DIR
|
||||
mkdir -p $RUNLOG_DIR
|
||||
|
||||
config_load homeredirect
|
||||
|
||||
PROCESSED_REDIRECT=0
|
||||
config_foreach setup redirect
|
||||
[ "$PROCESSED_REDIRECT" == "1" ] && {
|
||||
fw3 reload
|
||||
}
|
||||
log 'HomeRedirect started.'
|
||||
}
|
||||
|
||||
stop() {
|
||||
terminateAll
|
||||
fw3 reload
|
||||
log 'HomeRedirect stopped.'
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
#
|
||||
# Copyright (C) 2021 ImmortalWrt.org
|
||||
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=hysteria
|
||||
PKG_VERSION:=0.9.6
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||
PKG_SOURCE_URL:=https://codeload.github.com/HyNetwork/hysteria/tar.gz/v$(PKG_VERSION)?
|
||||
PKG_HASH:=f543cfef69fc396c15248262d084aa3f6fef48a8cd98bdd8fda113fd4f5bf94c
|
||||
|
||||
PKG_LICENSE:=MIT
|
||||
PKG_LICENSE_FILE:=LICENSE
|
||||
PKG_MAINTAINER:=Tianling Shen <cnsztl@immortalwrt.org>
|
||||
|
||||
PKG_CONFIG_DEPENDS:= \
|
||||
CONFIG_HYSTERIA_COMPRESS_GOPROXY \
|
||||
CONFIG_HYSTERIA_COMPRESS_UPX
|
||||
|
||||
PKG_BUILD_DEPENDS:=golang/host
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
PKG_USE_MIPS16:=0
|
||||
|
||||
GO_PKG:=github.com/tobyxdd/hysteria
|
||||
GO_PKG_BUILD_PKG:=github.com/tobyxdd/hysteria/cmd
|
||||
GO_PKG_LDFLAGS:=-s -w
|
||||
GO_PKG_LDFLAGS_X:=main.appVersion=$(PKG_VERSION)
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
include $(TOPDIR)/feeds/packages/lang/golang/golang-package.mk
|
||||
|
||||
define Package/hysteria
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
TITLE:=A feature-packed network utility optimized for networks of poor quality
|
||||
URL:=https://github.com/tobyxdd/hysteria
|
||||
DEPENDS:=$(GO_ARCH_DEPENDS) +ca-bundle
|
||||
endef
|
||||
|
||||
define Package/hysteria/description
|
||||
Hysteria is a feature-packed network utility optimized for networks
|
||||
of poor quality (e.g. satellite connections, congested public Wi-Fi,
|
||||
connecting from China to servers abroad) powered by a custom version
|
||||
of QUIC protocol.
|
||||
endef
|
||||
|
||||
define Package/hysteria/config
|
||||
config HYSTERIA_COMPRESS_GOPROXY
|
||||
bool "Compiling with GOPROXY proxy"
|
||||
default n
|
||||
|
||||
config HYSTERIA_COMPRESS_UPX
|
||||
bool "Compress executable files with UPX"
|
||||
depends on !mips64
|
||||
default n
|
||||
endef
|
||||
|
||||
ifeq ($(CONFIG_HYSTERIA_COMPRESS_GOPROXY),y)
|
||||
export GO111MODULE=on
|
||||
export GOPROXY=https://goproxy.baidu.com
|
||||
endif
|
||||
|
||||
define Build/Compile
|
||||
$(call GoPackage/Build/Compile)
|
||||
ifeq ($(CONFIG_HYSTERIA_COMPRESS_UPX),y)
|
||||
$(STAGING_DIR_HOST)/bin/upx --lzma --best $(GO_PKG_BUILD_BIN_DIR)/cmd
|
||||
endif
|
||||
endef
|
||||
|
||||
define Package/hysteria/install
|
||||
$(call GoPackage/Package/Install/Bin,$(PKG_INSTALL_DIR))
|
||||
$(INSTALL_DIR) $(1)/usr/bin/
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/cmd $(1)/usr/bin/hysteria
|
||||
endef
|
||||
|
||||
$(eval $(call GoBinPackage,hysteria))
|
||||
$(eval $(call BuildPackage,hysteria))
|
@ -1,43 +0,0 @@
|
||||
# SPDX-License-Identifier: GPL-3.0-only
|
||||
#
|
||||
# Copyright (C) 2021 ImmortalWrt.org
|
||||
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=ipt2socks
|
||||
PKG_VERSION:=1.1.3
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||
PKG_SOURCE_URL:=https://codeload.github.com/zfl9/ipt2socks/tar.gz/v$(PKG_VERSION)?
|
||||
PKG_HASH:=73a2498dc95934c225d358707e7f7d060b5ce81aa45260ada09cbd15207d27d1
|
||||
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
PKG_INSTALL:=1
|
||||
|
||||
PKG_LICENSE:=AGPL-3.0
|
||||
PKG_LICENSE_FILE:=LICENSE
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
|
||||
define Package/ipt2socks
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
TITLE:=Convert iptables to socks5
|
||||
URL:=https://github.com/zfl9/ipt2socks
|
||||
DEPENDS:=+libpthread +libuv
|
||||
endef
|
||||
|
||||
define Package/ipt2socks/description
|
||||
Utility for converting iptables (redirect/tproxy) to socks5.
|
||||
endef
|
||||
|
||||
TARGET_CFLAGS += $(FPIC) -flto
|
||||
TARGET_LDFLAGS += -flto
|
||||
|
||||
define Package/ipt2socks/install
|
||||
$(INSTALL_DIR) $(1)/usr/bin
|
||||
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/ipt2socks $(1)/usr/bin
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,ipt2socks))
|
@ -1,76 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=kcptun
|
||||
PKG_VERSION:=20210922
|
||||
PKG_RELEASE:=
|
||||
|
||||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
|
||||
PKG_SOURCE_URL:=https://codeload.github.com/xtaci/kcptun/tar.gz/v${PKG_VERSION}?
|
||||
PKG_SOURCE_DATE:=2021-09-22
|
||||
PKG_HASH:=f6a08f0fe75fa85d15f9c0c28182c69a5ad909229b4c230a8cbe38f91ba2d038
|
||||
|
||||
PKG_MAINTAINER:=Dengfeng Liu <liudf0716@gmail.com>, Chao Liu <expiron18@gmail.com>
|
||||
PKG_LICENSE:=MIT
|
||||
PKG_LICENSE_FILES:=LICENSE.md
|
||||
|
||||
PKG_BUILD_DEPENDS:=golang/host
|
||||
PKG_BUILD_PARALLEL:=1
|
||||
PKG_USE_MIPS16:=0
|
||||
|
||||
GO_PKG:=github.com/xtaci/kcptun
|
||||
|
||||
GO_PKG_LDFLAGS_X:=main.VERSION=$(PKG_VERSION)
|
||||
|
||||
include $(INCLUDE_DIR)/package.mk
|
||||
include $(TOPDIR)/feeds/packages/lang/golang/golang-package.mk
|
||||
|
||||
define Package/kcptun-config
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
SUBMENU:=Web Servers/Proxies
|
||||
TITLE:=Kcptun Config Scripts
|
||||
URL:=https://github.com/xtaci/kcptun
|
||||
DEPENDS:=$(GO_ARCH_DEPENDS)
|
||||
endef
|
||||
|
||||
define Package/kcptun-config/conffiles
|
||||
/etc/config/kcptun
|
||||
endef
|
||||
|
||||
define Package/kcptun-config/install
|
||||
$(INSTALL_DIR) $(1)/etc/config
|
||||
$(INSTALL_CONF) ./files/kcptun.config $(1)/etc/config/kcptun
|
||||
$(INSTALL_DIR) $(1)/etc/init.d
|
||||
$(INSTALL_BIN) ./files/kcptun.init $(1)/etc/init.d/kcptun
|
||||
endef
|
||||
|
||||
define Package/kcptun/Default
|
||||
define Package/kcptun-$(1)
|
||||
SECTION:=net
|
||||
CATEGORY:=Network
|
||||
SUBMENU:=Web Servers/Proxies
|
||||
TITLE:=KCP-based Secure Tunnel $(1)
|
||||
URL:=https://github.com/xtaci/kcptun
|
||||
DEPENDS:=+kcptun-config
|
||||
endef
|
||||
|
||||
define Package/kcptun-$(1)/description
|
||||
kcptun is a Stable & Secure Tunnel Based On KCP with N:M Multiplexing.
|
||||
This package only contains kcptun $(1).
|
||||
endef
|
||||
|
||||
define Package/kcptun-$(1)/install
|
||||
$$(call GoPackage/Package/Install/Bin,$$(PKG_INSTALL_DIR))
|
||||
|
||||
$$(INSTALL_DIR) $$(1)/usr/bin
|
||||
$$(INSTALL_BIN) $$(PKG_INSTALL_DIR)/usr/bin/$(1) $$(1)/usr/bin/kcptun-$(1)
|
||||
endef
|
||||
endef
|
||||
|
||||
$(eval $(call BuildPackage,kcptun-config))
|
||||
KCPTUN_COMPONENTS:=server client
|
||||
$(foreach component,$(KCPTUN_COMPONENTS), \
|
||||
$(eval $(call Package/kcptun/Default,$(component))) \
|
||||
$(eval $(call GoBinPackage,kcptun-$(component))) \
|
||||
$(eval $(call BuildPackage,kcptun-$(component))) \
|
||||
)
|
@ -1,43 +0,0 @@
|
||||
config server
|
||||
option disabled 1
|
||||
option listen 29900
|
||||
option target '127.0.0.1'
|
||||
option target_port 12948
|
||||
option crypt 'aes'
|
||||
option key 'secret'
|
||||
option mode 'fast'
|
||||
option datashard 10
|
||||
option parityshard 3
|
||||
option dscp 46
|
||||
option nocomp 1
|
||||
option snmplog '/var/log/snmp.log'
|
||||
option snmpperiod 60
|
||||
option pprof 1
|
||||
option quiet 1
|
||||
option syslog 1
|
||||
option user nobody
|
||||
|
||||
config client
|
||||
option disabled 1
|
||||
option bind_address '0.0.0.0'
|
||||
option local_port 12948
|
||||
option server 'vps'
|
||||
option server_port 29900
|
||||
option crypt 'aes'
|
||||
option key 'secret'
|
||||
option mode 'fast'
|
||||
option conn 1
|
||||
option autoexpire 0
|
||||
option scavengettl 600
|
||||
option mtu 1350
|
||||
option sndwnd 128
|
||||
option rcvwnd 512
|
||||
option nocomp 1
|
||||
option sockbuf 4194304
|
||||
option smuxver 1
|
||||
option smuxbuf 4194304
|
||||
option streambuf 2097152
|
||||
option keepalive 10
|
||||
option quiet 1
|
||||
option gogc 20
|
||||
option user nobody
|
@ -1,174 +0,0 @@
|
||||
#!/bin/sh /etc/rc.common
|
||||
#
|
||||
# Copyright (C) 2019 Chao Liu <expiron18@gmail.com>
|
||||
#
|
||||
# This is free software, licensed under the GNU General Public License v3.
|
||||
# See /LICENSE for more information.
|
||||
#
|
||||
|
||||
USE_PROCD=1
|
||||
START=99
|
||||
|
||||
confdir=/var/etc/kcptun
|
||||
bindir=/usr/bin
|
||||
|
||||
mkjson_server_conf() {
|
||||
[ "$disabled" = 0 ] || return 1
|
||||
[ -n "$listen" ] || return 1
|
||||
[ -n "$target" ] || return 1
|
||||
[ -n "$target_port" ] || return 1
|
||||
json_add_string listen ":$listen"
|
||||
json_add_string target "$target:$target_port"
|
||||
json_add_boolean pprof "$pprof"
|
||||
}
|
||||
|
||||
mkjson_client_conf() {
|
||||
[ "$disabled" = 0 ] || return 1
|
||||
[ -n "$local_port" ] || return 1
|
||||
[ -n "$server" ] || return 1
|
||||
[ -n "$server_port" ] || return 1
|
||||
json_add_string localaddr "$bind_address:$local_port"
|
||||
json_add_string remoteaddr "$server:$server_port"
|
||||
[ -z "$conn" ] || json_add_int conn "$conn"
|
||||
[ -z "$autoexpire" ] || json_add_int autoexpire "$autoexpire"
|
||||
[ -z "$scavengettl" ] || json_add_int scavengettl "$scavengettl"
|
||||
}
|
||||
|
||||
kcptun() {
|
||||
local cfg="$1"
|
||||
local cfgtype="$2"
|
||||
local bin="$bindir/kcptun-$cfgtype"
|
||||
local confjson="$confdir/$cfgtype.$cfg.json"
|
||||
|
||||
[ -x "$bin" ] || return
|
||||
eval "$("validate_${cfgtype}_section" "$cfg" validate_mklocal)"
|
||||
"validate_${cfgtype}_section" "$cfg" || return
|
||||
[ "$disabled" = 0 ] || return
|
||||
|
||||
json_init
|
||||
mkjson_${cfgtype}_conf || return
|
||||
[ -z "$crypt" ] || json_add_string crypt "$crypt"
|
||||
[ -z "$key" ] || json_add_string key "$key"
|
||||
[ -z "$mode" ] || json_add_string mode "$mode"
|
||||
[ -z "$mtu" ] || json_add_int mtu "$mtu"
|
||||
[ -z "$sndwnd" ] || json_add_int sndwnd "$sndwnd"
|
||||
[ -z "$rcvwnd" ] || json_add_int rcvwnd "$rcvwnd"
|
||||
[ -z "$datashard" ] || json_add_int datashard "$datashard"
|
||||
[ -z "$parityshard" ] || json_add_int parityshard "$parityshard"
|
||||
[ -z "$dscp" ] || json_add_int dscp "$dscp"
|
||||
json_add_boolean nocomp "$nocomp"
|
||||
[ -z "$sockbuf" ] || json_add_int sockbuf "$sockbuf"
|
||||
[ -z "$smuxver" ] || json_add_int smuxver "$smuxver"
|
||||
[ -z "$smuxbuf" ] || json_add_int smuxbuf "$smuxbuf"
|
||||
[ -z "$streambuf" ] || json_add_int streambuf "$streambuf"
|
||||
[ -z "$keepalive" ] || json_add_int keepalive "$keepalive"
|
||||
[ -z "$snmplog" ] || json_add_string snmplog "$snmplog"
|
||||
[ -z "$snmpperiod" ] || json_add_int snmpperiod "$snmpperiod"
|
||||
json_add_boolean quiet "$quiet"
|
||||
json_dump -i > "$confjson"
|
||||
|
||||
procd_open_instance "$cfgtype.$cfg"
|
||||
procd_set_param command "$bin" -c "$confjson"
|
||||
[ -z "$gogc" ] || procd_set_param env GOGC="$gogc"
|
||||
[ -z "$syslog" ] || procd_set_param stderr 1
|
||||
[ -z "$user" ] || procd_set_param user "$user"
|
||||
procd_set_param file "$confjson"
|
||||
procd_set_param respawn
|
||||
procd_close_instance
|
||||
}
|
||||
|
||||
start_service() {
|
||||
local cfgtype
|
||||
|
||||
mkdir -p "$confdir"
|
||||
config_load kcptun
|
||||
for cfgtype in server client; do
|
||||
config_foreach kcptun "$cfgtype" "$cfgtype"
|
||||
done
|
||||
}
|
||||
|
||||
stop_service() {
|
||||
rm -rf "$confdir"
|
||||
}
|
||||
|
||||
service_triggers() {
|
||||
procd_add_reload_interface_trigger wan
|
||||
procd_add_reload_trigger kcptun
|
||||
procd_open_validate
|
||||
validate_server_section
|
||||
validate_client_section
|
||||
procd_close_validate
|
||||
}
|
||||
|
||||
validate_mklocal() {
|
||||
local tuple opts
|
||||
|
||||
shift 2
|
||||
for tuple in "$@"; do
|
||||
opts="${tuple%%:*} $opts"
|
||||
done
|
||||
[ -z "$opts" ] || echo "local $opts"
|
||||
}
|
||||
|
||||
validate() {
|
||||
uci_validate_section kcptun "$@"
|
||||
}
|
||||
|
||||
validate_common_options() {
|
||||
local cfgtype="$1"; shift
|
||||
local cfg="$1"; shift
|
||||
local func="$1"; shift
|
||||
local crypt_methods='"aes", "aes-128", "aes-192", "salsa20", "blowfish", "twofish", "cast5", "3des", "tea", "xtea", "xor", "sm4", "none"'
|
||||
local mode_profiles='"fast3", "fast2", "fast", "normal", "manual"'
|
||||
|
||||
"${func:-validate}" "$cfgtype" "$cfg" "$@" \
|
||||
'disabled:bool:0' \
|
||||
'key:string' \
|
||||
"crypt:or($crypt_methods)" \
|
||||
"mode:or($mode_profiles)" \
|
||||
'mtu:uinteger' \
|
||||
'sndwnd:uinteger' \
|
||||
'rcvwnd:uinteger' \
|
||||
'datashard:uinteger' \
|
||||
'parityshard:uinteger' \
|
||||
'dscp:uinteger' \
|
||||
'nocomp:bool' \
|
||||
'sockbuf:uinteger' \
|
||||
'smuxver:uinteger' \
|
||||
'smuxbuf:uinteger' \
|
||||
'streambuf:uinteger' \
|
||||
'keepalive:uinteger' \
|
||||
'snmplog:string' \
|
||||
'snmpperiod:uinteger' \
|
||||
'quiet:bool' \
|
||||
'gogc:uinteger' \
|
||||
'syslog:bool:1' \
|
||||
'user:string:nobody'
|
||||
}
|
||||
|
||||
validate_server_options() {
|
||||
validate_common_options server "$@" \
|
||||
'listen:port' \
|
||||
'target:host' \
|
||||
'target_port:port' \
|
||||
'pprof:bool'
|
||||
}
|
||||
|
||||
validate_client_options() {
|
||||
validate_common_options client "$@" \
|
||||
'bind_address:ipaddr' \
|
||||
'local_port:port' \
|
||||
'server:host' \
|
||||
'server_port:port' \
|
||||
'conn:uinteger' \
|
||||
'autoexpire:uinteger' \
|
||||
'scavengettl:uinteger'
|
||||
}
|
||||
|
||||
validate_server_section() {
|
||||
validate_server_options "$1" "$2"
|
||||
}
|
||||
|
||||
validate_client_section() {
|
||||
validate_client_options "$1" "$2"
|
||||
}
|
@ -1 +0,0 @@
|
||||
Subproject commit 28c52fcad8ac5d2933218811582b58bb7c32314b
|
@ -1,17 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=luci-app-aliyundrive-fuse
|
||||
PKG_VERSION:=0.1.6
|
||||
PKG_RELEASE:=
|
||||
PKG_PO_VERSION:=$(PKG_VERSION)-$(PKG_RELEASE)
|
||||
|
||||
PKG_LICENSE:=MIT
|
||||
PKG_MAINTAINER:=messense <messense@icloud.com>
|
||||
|
||||
LUCI_TITLE:=LuCI Support for aliyundrive-fuse
|
||||
LUCI_PKGARCH:=all
|
||||
LUCI_DEPENDS:=+aliyundrive-fuse +lua +libuci-lua
|
||||
|
||||
include $(TOPDIR)/feeds/luci/luci.mk
|
||||
|
||||
# call BuildPackage - OpenWrt buildroot signature
|
@ -1,35 +0,0 @@
|
||||
module("luci.controller.aliyundrive-fuse", package.seeall)
|
||||
|
||||
function index()
|
||||
if not nixio.fs.access("/etc/config/aliyundrive-fuse") then
|
||||
return
|
||||
end
|
||||
entry({"admin", "services", "aliyundrive-fuse"}, alias("admin", "services", "aliyundrive-fuse", "client"),_("AliyunDrive FUSE"), 10).dependent = true -- 首页
|
||||
entry({"admin", "services", "aliyundrive-fuse", "client"}, cbi("aliyundrive-fuse/client"),_("Settings"), 10).leaf = true -- 客户端配置
|
||||
entry({"admin", "services", "aliyundrive-fuse", "log"}, form("aliyundrive-fuse/log"),_("Log"), 30).leaf = true -- 日志页面
|
||||
|
||||
entry({"admin", "services", "aliyundrive-fuse", "status"}, call("action_status")).leaf = true
|
||||
entry({"admin", "services", "aliyundrive-fuse", "logtail"}, call("action_logtail")).leaf = true
|
||||
end
|
||||
|
||||
function action_status()
|
||||
local e = {}
|
||||
e.running = luci.sys.call("pidof aliyundrive-fuse >/dev/null") == 0
|
||||
e.application = luci.sys.exec("aliyundrive-fuse --version")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json(e)
|
||||
end
|
||||
|
||||
function action_logtail()
|
||||
local fs = require "nixio.fs"
|
||||
local log_path = "/var/log/aliyundrive-fuse.log"
|
||||
local e = {}
|
||||
e.running = luci.sys.call("pidof aliyundrive-fuse >/dev/null") == 0
|
||||
if fs.access(log_path) then
|
||||
e.log = luci.sys.exec("tail -n 100 %s | sed 's/\\x1b\\[[0-9;]*m//g'" % log_path)
|
||||
else
|
||||
e.log = ""
|
||||
end
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json(e)
|
||||
end
|
@ -1,32 +0,0 @@
|
||||
local uci = luci.model.uci.cursor()
|
||||
local m, e
|
||||
|
||||
m = Map("aliyundrive-fuse")
|
||||
m.title = translate("AliyunDrive FUSE")
|
||||
m.description = translate("<a href=\"https://github.com/messense/aliyundrive-fuse\" target=\"_blank\">Project GitHub URL</a>")
|
||||
|
||||
m:section(SimpleSection).template = "aliyundrive-fuse/aliyundrive-fuse_status"
|
||||
|
||||
e = m:section(TypedSection, "default")
|
||||
e.anonymous = true
|
||||
|
||||
enable = e:option(Flag, "enable", translate("Enable"))
|
||||
enable.rmempty = false
|
||||
|
||||
refresh_token = e:option(Value, "refresh_token", translate("Refresh Token"))
|
||||
refresh_token.description = translate("<a href=\"https://github.com/messense/aliyundrive-webdav#%E8%8E%B7%E5%8F%96-refresh_token\" target=\"_blank\">How to get refresh token</a>")
|
||||
|
||||
mount_point = e:option(Value, "mount_point", translate("Mount Point"))
|
||||
mount_point.default = "/mnt/aliyundrive"
|
||||
|
||||
read_buffer_size = e:option(Value, "read_buffer_size", translate("Read Buffer Size"))
|
||||
read_buffer_size.default = "10485760"
|
||||
read_buffer_size.datatype = "uinteger"
|
||||
|
||||
domain_id = e:option(Value, "domain_id", translate("Domain ID"))
|
||||
domain_id.description = translate("Input domain_id option will use <a href=\"https://www.aliyun.com/product/storage/pds\" target=\"_blank\">Aliyun PDS</a> instead of <a href=\"https://www.aliyundrive.com\" target=\"_blank\">AliyunDrive</a>")
|
||||
|
||||
debug = e:option(Flag, "debug", translate("Debug Mode"))
|
||||
debug.rmempty = false
|
||||
|
||||
return m
|
@ -1,9 +0,0 @@
|
||||
log = SimpleForm("logview")
|
||||
log.submit = false
|
||||
log.reset = false
|
||||
|
||||
t = log:field(DummyValue, '', '')
|
||||
t.rawhtml = true
|
||||
t.template = 'aliyundrive-fuse/aliyundrive-fuse_log'
|
||||
|
||||
return log
|
@ -1,15 +0,0 @@
|
||||
<%+cbi/valueheader%>
|
||||
<textarea id="logview" class="cbi-input-textarea" style="width: 100%" rows="30" readonly="readonly"></textarea>
|
||||
|
||||
<script type="text/javascript">
|
||||
const LOG_URL = '<%=luci.dispatcher.build_url("admin", "services", "aliyundrive-fuse", "logtail")%>';
|
||||
XHR.poll(1, LOG_URL, null, (x, d) => {
|
||||
let logview = document.getElementById("logview");
|
||||
if (!d.running) {
|
||||
XHR.halt();
|
||||
}
|
||||
logview.value = d.log;
|
||||
logview.scrollTop = logview.scrollHeight;
|
||||
});
|
||||
</script>
|
||||
<%+cbi/valuefooter%>
|
@ -1,21 +0,0 @@
|
||||
<script type="text/javascript">//<![CDATA[
|
||||
XHR.poll(3, '<%=url([[admin]], [[services]], [[aliyundrive-fuse]], [[status]])%>', null,
|
||||
function(x, data) {
|
||||
var tb = document.getElementById('aliyundrive-fuse_status');
|
||||
if (data && tb) {
|
||||
if (data.running) {
|
||||
tb.innerHTML = '<em><b style=color:green>' + data.application + '<%:RUNNING%></b></em>';
|
||||
} else {
|
||||
tb.innerHTML = '<em><b style=color:red>' + data.application + '<%:NOT RUNNING%></b></em>';
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
//]]>
|
||||
</script>
|
||||
<style>.mar-10 {margin-left: 50px; margin-right: 10px;}</style>
|
||||
<fieldset class="cbi-section">
|
||||
<p id="aliyundrive-fuse_status">
|
||||
<em><%:Collecting data...%></em>
|
||||
</p>
|
||||
</fieldset>
|
@ -1,50 +0,0 @@
|
||||
msgid ""
|
||||
msgstr "Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
||||
msgid "AliyunDrive"
|
||||
msgstr "阿里云盘"
|
||||
|
||||
msgid "AliyunDrive FUSE"
|
||||
msgstr "阿里云盘 FUSE"
|
||||
|
||||
msgid "Enable"
|
||||
msgstr "启用"
|
||||
|
||||
msgid "Refresh Token"
|
||||
msgstr "Refresh Token"
|
||||
|
||||
msgid "Mount Point"
|
||||
msgstr "挂载点"
|
||||
|
||||
msgid "Read Buffer Size"
|
||||
msgstr "下载缓冲大小(bytes)"
|
||||
|
||||
msgid "Collecting data..."
|
||||
msgstr "获取数据中..."
|
||||
|
||||
msgid "RUNNING"
|
||||
msgstr "运行中"
|
||||
|
||||
msgid "NOT RUNNING"
|
||||
msgstr "未运行"
|
||||
|
||||
msgid "Settings"
|
||||
msgstr "设置"
|
||||
|
||||
msgid "Log"
|
||||
msgstr "日志"
|
||||
|
||||
msgid "Debug Mode"
|
||||
msgstr "调试模式"
|
||||
|
||||
msgid "<a href=\"https://github.com/messense/aliyundrive-fuse\" target=\"_blank\">Project GitHub URL</a>"
|
||||
msgstr "<a href=\"https://github.com/messense/aliyundrive-fuse\" target=\"_blank\">GitHub 项目地址</a>"
|
||||
|
||||
msgid "<a href=\"https://github.com/messense/aliyundrive-webdav#%E8%8E%B7%E5%8F%96-refresh_token\" target=\"_blank\">How to get refresh token</a>"
|
||||
msgstr "<a href=\"https://github.com/messense/aliyundrive-webdav#%E8%8E%B7%E5%8F%96-refresh_token\" target=\"_blank\">查看获取 refresh token 的方法</a>"
|
||||
|
||||
msgid "Domain ID"
|
||||
msgstr "阿里云相册与云盘服务 domainId"
|
||||
|
||||
msgid "Input domain_id option will use <a href=\"https://www.aliyun.com/product/storage/pds\" target=\"_blank\">Aliyun PDS</a> instead of <a href=\"https://www.aliyundrive.com\" target=\"_blank\">AliyunDrive</a>"
|
||||
msgstr "填写此选项将使用<a href=\"https://www.aliyun.com/product/storage/pds\" target=\"_blank\">阿里云相册与网盘服务</a>而不是<a href=\"https://www.aliyundrive.com\" target=\"_blank\">阿里云盘</a>"
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
uci -q batch <<-EOF >/dev/null
|
||||
delete ucitrack.@aliyundrive-fuse[-1]
|
||||
add ucitrack aliyundrive-fuse
|
||||
set ucitrack.@aliyundrive-fuse[-1].init=aliyundrive-fuse
|
||||
commit ucitrack
|
||||
EOF
|
||||
|
||||
rm -f /tmp/luci-indexcache
|
||||
exit 0
|
@ -1,15 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
PKG_NAME:=luci-app-aliyundrive-webdav
|
||||
PKG_PO_VERSION:=$(PKG_VERSION)-$(PKG_RELEASE)
|
||||
|
||||
PKG_LICENSE:=MIT
|
||||
PKG_MAINTAINER:=messense <messense@icloud.com>
|
||||
|
||||
LUCI_TITLE:=LuCI Support for aliyundrive-webdav
|
||||
LUCI_PKGARCH:=all
|
||||
LUCI_DEPENDS:=+aliyundrive-webdav
|
||||
|
||||
include $(TOPDIR)/feeds/luci/luci.mk
|
||||
|
||||
# call BuildPackage - OpenWrt buildroot signature
|
@ -1,40 +0,0 @@
|
||||
module("luci.controller.aliyundrive-webdav", package.seeall)
|
||||
|
||||
function index()
|
||||
if not nixio.fs.access("/etc/config/aliyundrive-webdav") then
|
||||
return
|
||||
end
|
||||
|
||||
local page
|
||||
page = entry({"admin", "services", "aliyundrive-webdav"}, alias("admin", "services", "aliyundrive-webdav", "client"), _("AliyunDrive WebDAV"), 10) -- 首页
|
||||
page.dependent = true
|
||||
page.acl_depends = { "luci-app-aliyundrive-webdav" }
|
||||
|
||||
entry({"admin", "services", "aliyundrive-webdav", "client"}, cbi("aliyundrive-webdav/client"), _("Settings"), 10).leaf = true -- 客户端配置
|
||||
entry({"admin", "services", "aliyundrive-webdav", "log"}, form("aliyundrive-webdav/log"), _("Log"), 30).leaf = true -- 日志页面
|
||||
|
||||
entry({"admin", "services", "aliyundrive-webdav", "status"}, call("action_status")).leaf = true -- 运行状态
|
||||
entry({"admin", "services", "aliyundrive-webdav", "logtail"}, call("action_logtail")).leaf = true -- 日志采集
|
||||
end
|
||||
|
||||
function action_status()
|
||||
local e = {}
|
||||
e.running = luci.sys.call("pidof aliyundrive-webdav >/dev/null") == 0
|
||||
e.application = luci.sys.exec("aliyundrive-webdav --version")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json(e)
|
||||
end
|
||||
|
||||
function action_logtail()
|
||||
local fs = require "nixio.fs"
|
||||
local log_path = "/var/log/aliyundrive-webdav.log"
|
||||
local e = {}
|
||||
e.running = luci.sys.call("pidof aliyundrive-webdav >/dev/null") == 0
|
||||
if fs.access(log_path) then
|
||||
e.log = luci.sys.exec("tail -n 100 %s | sed 's/\\x1b\\[[0-9;]*m//g'" % log_path)
|
||||
else
|
||||
e.log = ""
|
||||
end
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json(e)
|
||||
end
|
@ -1,60 +0,0 @@
|
||||
m = Map("aliyundrive-webdav")
|
||||
m.title = translate("AliyunDrive WebDAV")
|
||||
m.description = translate("<a href=\"https://github.com/messense/aliyundrive-webdav\" target=\"_blank\">Project GitHub URL</a>")
|
||||
|
||||
m:section(SimpleSection).template = "aliyundrive-webdav/aliyundrive-webdav_status"
|
||||
|
||||
e = m:section(TypedSection, "server")
|
||||
e.anonymous = true
|
||||
|
||||
enable = e:option(Flag, "enable", translate("Enable"))
|
||||
enable.rmempty = false
|
||||
|
||||
refresh_token = e:option(Value, "refresh_token", translate("Refresh Token"))
|
||||
refresh_token.description = translate("<a href=\"https://github.com/messense/aliyundrive-webdav#%E8%8E%B7%E5%8F%96-refresh_token\" target=\"_blank\">How to get refresh token</a>")
|
||||
|
||||
root = e:option(Value, "root", translate("Root Directory"))
|
||||
root.description = translate("Restrict access to a folder of aliyundrive, defaults to / which means no restrictions")
|
||||
root.default = "/"
|
||||
|
||||
host = e:option(Value, "host", translate("Host"))
|
||||
host.default = "0.0.0.0"
|
||||
host.datatype = "ipaddr"
|
||||
|
||||
port = e:option(Value, "port", translate("Port"))
|
||||
port.default = "8080"
|
||||
port.datatype = "port"
|
||||
|
||||
tls_cert = e:option(Value, "tls_cert", translate("TLS certificate file path"))
|
||||
tls_key = e:option(Value, "tls_key", translate("TLS private key file path"))
|
||||
|
||||
auth_user = e:option(Value, "auth_user", translate("Username"))
|
||||
auth_password = e:option(Value, "auth_password", translate("Password"))
|
||||
auth_password.password = true
|
||||
|
||||
read_buffer_size = e:option(Value, "read_buffer_size", translate("Read Buffer Size"))
|
||||
read_buffer_size.default = "10485760"
|
||||
read_buffer_size.datatype = "uinteger"
|
||||
|
||||
cache_size = e:option(Value, "cache_size", translate("Cache Size"))
|
||||
cache_size.default = "1000"
|
||||
cache_size.datatype = "uinteger"
|
||||
|
||||
cache_ttl = e:option(Value, "cache_ttl", translate("Cache Expiration Time (seconds)"))
|
||||
cache_ttl.default = "600"
|
||||
cache_ttl.datatype = "uinteger"
|
||||
|
||||
no_trash = e:option(Flag, "no_trash", translate("Delete file permanently instead of trashing"))
|
||||
no_trash.rmempty = false
|
||||
|
||||
read_only = e:option(Flag, "read_only", translate("Enable read only mode"))
|
||||
read_only.description = translate("Disallow upload, modify and delete file operations")
|
||||
read_only.rmempty = false
|
||||
|
||||
domain_id = e:option(Value, "domain_id", translate("Domain ID"))
|
||||
domain_id.description = translate("Input domain_id option will use <a href=\"https://www.aliyun.com/product/storage/pds\" target=\"_blank\">Aliyun PDS</a> instead of <a href=\"https://www.aliyundrive.com\" target=\"_blank\">AliyunDrive</a>")
|
||||
|
||||
debug = e:option(Flag, "debug", translate("Debug Mode"))
|
||||
debug.rmempty = false
|
||||
|
||||
return m
|
@ -1,9 +0,0 @@
|
||||
log = SimpleForm("logview")
|
||||
log.submit = false
|
||||
log.reset = false
|
||||
|
||||
t = log:field(DummyValue, '', '')
|
||||
t.rawhtml = true
|
||||
t.template = 'aliyundrive-webdav/aliyundrive-webdav_log'
|
||||
|
||||
return log
|
@ -1,15 +0,0 @@
|
||||
<%+cbi/valueheader%>
|
||||
<textarea id="logview" class="cbi-input-textarea" style="width: 100%" rows="30" readonly="readonly"></textarea>
|
||||
|
||||
<script type="text/javascript">
|
||||
const LOG_URL = '<%=luci.dispatcher.build_url("admin", "services", "aliyundrive-webdav", "logtail")%>';
|
||||
XHR.poll(1, LOG_URL, null, (x, d) => {
|
||||
let logview = document.getElementById("logview");
|
||||
if (!d.running) {
|
||||
XHR.halt();
|
||||
}
|
||||
logview.value = d.log;
|
||||
logview.scrollTop = logview.scrollHeight;
|
||||
});
|
||||
</script>
|
||||
<%+cbi/valuefooter%>
|
@ -1,21 +0,0 @@
|
||||
<script type="text/javascript">//<![CDATA[
|
||||
XHR.poll(3, '<%=url([[admin]], [[services]], [[aliyundrive-webdav]], [[status]])%>', null,
|
||||
function(x, data) {
|
||||
var tb = document.getElementById('aliyundrive-webdav_status');
|
||||
if (data && tb) {
|
||||
if (data.running) {
|
||||
tb.innerHTML = '<em><b style=color:green>' + data.application + '<%:RUNNING%></b></em>';
|
||||
} else {
|
||||
tb.innerHTML = '<em><b style=color:red>' + data.application + '<%:NOT RUNNING%></b></em>';
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
//]]>
|
||||
</script>
|
||||
<style>.mar-10 {margin-left: 50px; margin-right: 10px;}</style>
|
||||
<fieldset class="cbi-section">
|
||||
<p id="aliyundrive-webdav_status">
|
||||
<em><%:Collecting data...%></em>
|
||||
</p>
|
||||
</fieldset>
|
@ -1,86 +0,0 @@
|
||||
msgid ""
|
||||
msgstr "Content-Type: text/plain; charset=UTF-8\n"
|
||||
|
||||
msgid "AliyunDrive"
|
||||
msgstr "阿里云盘"
|
||||
|
||||
msgid "AliyunDrive WebDAV"
|
||||
msgstr "阿里云盘 WebDAV"
|
||||
|
||||
msgid "Enable"
|
||||
msgstr "启用"
|
||||
|
||||
msgid "Refresh Token"
|
||||
msgstr "Refresh Token"
|
||||
|
||||
msgid "Root Directory"
|
||||
msgstr "云盘根目录"
|
||||
|
||||
msgid "Host"
|
||||
msgstr "监听主机"
|
||||
|
||||
msgid "Port"
|
||||
msgstr "监听端口"
|
||||
|
||||
msgid "TLS certificate file path"
|
||||
msgstr "TLS 证书文件路径"
|
||||
|
||||
msgid "TLS private key file path"
|
||||
msgstr "TLS 私钥文件路径"
|
||||
|
||||
msgid "Username"
|
||||
msgstr "用户名"
|
||||
|
||||
msgid "Password"
|
||||
msgstr "密码"
|
||||
|
||||
msgid "Read Buffer Size"
|
||||
msgstr "下载缓冲大小(bytes)"
|
||||
|
||||
msgid "Cache Size"
|
||||
msgstr "目录缓存大小"
|
||||
|
||||
msgid "Cache Expiration Time (seconds)"
|
||||
msgstr "目录缓存过期时间(单位为秒)"
|
||||
|
||||
msgid "Collecting data..."
|
||||
msgstr "获取数据中..."
|
||||
|
||||
msgid "RUNNING"
|
||||
msgstr "运行中"
|
||||
|
||||
msgid "NOT RUNNING"
|
||||
msgstr "未运行"
|
||||
|
||||
msgid "Settings"
|
||||
msgstr "设置"
|
||||
|
||||
msgid "Log"
|
||||
msgstr "日志"
|
||||
|
||||
msgid "Debug Mode"
|
||||
msgstr "调试模式"
|
||||
|
||||
msgid "<a href=\"https://github.com/messense/aliyundrive-webdav\" target=\"_blank\">Project GitHub URL</a>"
|
||||
msgstr "<a href=\"https://github.com/messense/aliyundrive-webdav\" target=\"_blank\">GitHub 项目地址</a>"
|
||||
|
||||
msgid "<a href=\"https://github.com/messense/aliyundrive-webdav#%E8%8E%B7%E5%8F%96-refresh_token\" target=\"_blank\">How to get refresh token</a>"
|
||||
msgstr "<a href=\"https://github.com/messense/aliyundrive-webdav#%E8%8E%B7%E5%8F%96-refresh_token\" target=\"_blank\">查看获取 refresh token 的方法</a>"
|
||||
|
||||
msgid "Restrict access to a folder of aliyundrive, defaults to / which means no restrictions"
|
||||
msgstr "限制只能访问该云盘目录,默认为 / 表示不限制,注意这个参数不是本地磁盘路径"
|
||||
|
||||
msgid "Delete file permanently instead of trashing"
|
||||
msgstr "删除文件不放入回收站"
|
||||
|
||||
msgid "Enable read only mode"
|
||||
msgstr "启用只读模式"
|
||||
|
||||
msgid "Disallow upload, modify and delete file operations"
|
||||
msgstr "禁止上传、修改和删除文件操作"
|
||||
|
||||
msgid "Domain ID"
|
||||
msgstr "阿里云相册与云盘服务 domainId"
|
||||
|
||||
msgid "Input domain_id option will use <a href=\"https://www.aliyun.com/product/storage/pds\" target=\"_blank\">Aliyun PDS</a> instead of <a href=\"https://www.aliyundrive.com\" target=\"_blank\">AliyunDrive</a>"
|
||||
msgstr "填写此选项将使用<a href=\"https://www.aliyun.com/product/storage/pds\" target=\"_blank\">阿里云相册与网盘服务</a>而不是<a href=\"https://www.aliyundrive.com\" target=\"_blank\">阿里云盘</a>"
|
@ -1 +0,0 @@
|
||||
zh-cn
|
@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
uci -q batch <<-EOF >/dev/null
|
||||
delete ucitrack.@aliyundrive-webdav[-1]
|
||||
add ucitrack aliyundrive-webdav
|
||||
set ucitrack.@aliyundrive-webdav[-1].init=aliyundrive-webdav
|
||||
commit ucitrack
|
||||
EOF
|
||||
|
||||
rm -f /tmp/luci-indexcache
|
||||
exit 0
|
@ -1,11 +0,0 @@
|
||||
{
|
||||
"luci-app-aliyundrive-webdav": {
|
||||
"description": "Grant UCI access for luci-app-aliyundrive-webdav",
|
||||
"read": {
|
||||
"uci": [ "aliyundrive-webdav" ]
|
||||
},
|
||||
"write": {
|
||||
"uci": [ "aliyundrive-webdav" ]
|
||||
}
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
Subproject commit c5c28baf5f67786fe5e89642ae49c94f5498744c
|
@ -1 +0,0 @@
|
||||
Subproject commit f5d1c710a45640bdd40e67cfd9c44bf80006045c
|
@ -1 +0,0 @@
|
||||
Subproject commit c56bbd5ca43a17302e675b61841a6dc6811229b0
|
@ -1 +0,0 @@
|
||||
Subproject commit e0280b19010f2ac8ec616b5cde429825d1466872
|
@ -1,21 +0,0 @@
|
||||
include $(TOPDIR)/rules.mk
|
||||
|
||||
LUCI_TITLE:=LuCI Support for docker
|
||||
LUCI_DEPENDS:=@(aarch64||arm||x86_64) \
|
||||
+luci-compat \
|
||||
+luci-lib-docker \
|
||||
+luci-lib-ip \
|
||||
+docker \
|
||||
+dockerd \
|
||||
+ttyd
|
||||
LUCI_PKGARCH:=all
|
||||
|
||||
PKG_LICENSE:=AGPL-3.0
|
||||
PKG_MAINTAINER:=lisaac <lisaac.cn@gmail.com> \
|
||||
Florian Eckert <fe@dev.tdt.de>
|
||||
|
||||
PKG_VERSION:=v0.5.25
|
||||
|
||||
include $(TOPDIR)/feeds/luci/luci.mk
|
||||
|
||||
# call BuildPackage - OpenWrt buildroot signature
|
@ -1 +0,0 @@
|
||||
ttyd docker-cli
|
@ -1,7 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
|
||||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24">
|
||||
<title>Docker icon</title>
|
||||
<path d="M4.82 17.275c-.684 0-1.304-.56-1.304-1.24s.56-1.243 1.305-1.243c.748 0 1.31.56 1.31 1.242s-.622 1.24-1.305 1.24zm16.012-6.763c-.135-.992-.75-1.8-1.56-2.42l-.315-.25-.254.31c-.494.56-.69 1.553-.63 2.295.06.562.24 1.12.554 1.554-.254.13-.568.25-.81.377-.57.187-1.124.25-1.68.25H.097l-.06.37c-.12 1.182.06 2.42.562 3.54l.244.435v.06c1.5 2.483 4.17 3.6 7.078 3.6 5.594 0 10.182-2.42 12.357-7.633 1.425.062 2.864-.31 3.54-1.676l.18-.31-.3-.187c-.81-.494-1.92-.56-2.85-.31l-.018.002zm-8.008-.992h-2.428v2.42h2.43V9.518l-.002.003zm0-3.043h-2.428v2.42h2.43V6.48l-.002-.003zm0-3.104h-2.428v2.42h2.43v-2.42h-.002zm2.97 6.147H13.38v2.42h2.42V9.518l-.007.003zm-8.998 0H4.383v2.42h2.422V9.518l-.01.003zm3.03 0h-2.4v2.42H9.84V9.518l-.015.003zm-6.03 0H1.4v2.42h2.428V9.518l-.03.003zm6.03-3.043h-2.4v2.42H9.84V6.48l-.015-.003zm-3.045 0H4.387v2.42H6.8V6.48l-.016-.003z" />
|
||||
</svg>
|
Before Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 1.1 KiB |
@ -1,91 +0,0 @@
|
||||
.fb-container {
|
||||
margin-top: 1rem;
|
||||
}
|
||||
.fb-container .cbi-button {
|
||||
height: 1.8rem;
|
||||
}
|
||||
.fb-container .cbi-input-text {
|
||||
margin-bottom: 1rem;
|
||||
width: 100%;
|
||||
}
|
||||
.fb-container .panel-title {
|
||||
padding-bottom: 0;
|
||||
width: 50%;
|
||||
border-bottom: none;
|
||||
}
|
||||
.fb-container .panel-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding-bottom: 1rem;
|
||||
border-bottom: 1px solid #eee;
|
||||
}
|
||||
.fb-container .upload-container {
|
||||
display: none;
|
||||
margin: 1rem 0;
|
||||
}
|
||||
.fb-container .upload-file {
|
||||
margin-right: 2rem;
|
||||
}
|
||||
.fb-container .cbi-value-field {
|
||||
text-align: left;
|
||||
}
|
||||
.fb-container .parent-icon strong {
|
||||
margin-left: 1rem;
|
||||
}
|
||||
.fb-container td[class$="-icon"] {
|
||||
cursor: pointer;
|
||||
}
|
||||
.fb-container .file-icon, .fb-container .folder-icon, .fb-container .link-icon {
|
||||
position: relative;
|
||||
}
|
||||
.fb-container .file-icon:before, .fb-container .folder-icon:before, .fb-container .link-icon:before {
|
||||
display: inline-block;
|
||||
width: 1.5rem;
|
||||
height: 1.5rem;
|
||||
content: '';
|
||||
background-size: contain;
|
||||
margin: 0 0.5rem 0 1rem;
|
||||
vertical-align: middle;
|
||||
}
|
||||
.fb-container .file-icon:before {
|
||||
background-image: url(file-icon.png);
|
||||
}
|
||||
.fb-container .folder-icon:before {
|
||||
background-image: url(folder-icon.png);
|
||||
}
|
||||
.fb-container .link-icon:before {
|
||||
background-image: url(link-icon.png);
|
||||
}
|
||||
@media screen and (max-width: 480px) {
|
||||
.fb-container .upload-file {
|
||||
width: 14.6rem;
|
||||
}
|
||||
.fb-container .cbi-value-owner,
|
||||
.fb-container .cbi-value-perm {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
.cbi-section-table {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.cbi-section-table-cell {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
.cbi-button-install {
|
||||
border-color: #c44;
|
||||
color: #c44;
|
||||
margin-left: 3px;
|
||||
}
|
||||
|
||||
.cbi-value-field {
|
||||
padding: 10px 0;
|
||||
}
|
||||
|
||||
.parent-icon {
|
||||
height: 1.8rem;
|
||||
padding: 10px 0;
|
||||
}
|
Before Width: | Height: | Size: 1.3 KiB |
@ -1,9 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
|
||||
<svg xmlns="http://www.w3.org/2000/svg" id="icon-hub" viewBox="0 -4 42 50" stroke-width="2" fill-rule="nonzero" width="100%" height="100%">
|
||||
<path d="M37.176371,36.2324812 C37.1920117,36.8041095 36.7372743,37.270685 36.1684891,37.270685 L3.74335204,37.2703476 C3.17827583,37.2703476 2.72400056,36.8091818 2.72400056,36.2397767 L2.72400056,19.6131383 C1.4312007,18.4881431 0.662551336,16.8884326 0.662551336,15.1618249 L0.664207893,14.69503 C0.63774183,14.4532127 0.650524255,14.2942438 0.711604827,14.1238231 L5.10793246,1.20935468 C5.24853286,0.797020623 5.63848594,0.511627907 6.06681069,0.511627907 L34.0728364,0.511627907 C34.5091607,0.511627907 34.889927,0.793578201 35.0316653,1.20921034 L39.4428567,14.1234095 C39.4871296,14.273204 39.5020782,14.4249444 39.4884726,14.5493649 L39.4884726,15.1505835 C39.4884726,16.9959517 38.6190601,18.6883031 37.1764746,19.7563084 L37.176371,36.2324812 Z M35.1376208,35.209311 L35.1376208,20.7057152 C34.7023924,20.8097593 34.271333,20.8633641 33.8336069,20.8633641 C32.0046019,20.8633641 30.3013756,19.9547008 29.2437221,18.4771538 C28.1860473,19.954695 26.4828515,20.8633641 24.6538444,20.8633641 C22.824803,20.8633641 21.1216155,19.9547157 20.0639591,18.4771544 C19.0062842,19.9546953 17.3030887,20.8633641 15.4740818,20.8633641 C13.6450404,20.8633641 11.9418529,19.9547157 10.8841965,18.4771544 C9.82652161,19.9546953 8.12332608,20.8633641 6.29431919,20.8633641 C5.76735555,20.8633641 5.24095778,20.7883418 4.73973398,20.644674 L4.73973398,35.209311 L35.1376208,35.209311 Z M30.2720226,15.6557626 C30.5154632,17.4501192 32.0503909,18.8018554 33.845083,18.8018554 C35.7286794,18.8018554 37.285413,17.3395134 37.4474599,15.4751932 L30.2280765,15.4751932 C30.2470638,15.532987 30.2617919,15.5932958 30.2720226,15.6557626 Z M21.0484306,15.4751932 C21.0674179,15.532987 21.0821459,15.5932958 21.0923767,15.6557626 C21.3358173,17.4501192 22.8707449,18.8018554 24.665437,18.8018554 C26.4601001,18.8018554 27.9950169,17.4501481 28.2378191,15.6611556 C28.2451225,15.5981318 28.2590045,15.5358056 28.2787375,15.4751932 L21.0484306,15.4751932 Z M11.9238102,15.6557626 C12.1672508,17.4501192 13.7021785,18.8018554 15.4968705,18.8018554 C17.2915336,18.8018554 18.8264505,17.4501481 19.0692526,15.6611556 C19.0765561,15.5981318 19.0904381,15.5358056 19.110171,15.4751932 L11.8798641,15.4751932 C11.8988514,15.532987 11.9135795,15.5932958 11.9238102,15.6557626 Z M6.31682805,18.8018317 C8.11149114,18.8018317 9.64640798,17.4501244 9.88921012,15.6611319 C9.89651357,15.5981081 9.91039559,15.5357819 9.93012856,15.4751696 L2.70318796,15.4751696 C2.86612006,17.3346852 4.42809696,18.8018317 6.31682805,18.8018317 Z M3.09670082,13.4139924 L37.04257,13.4139924 L33.3489482,2.57204736 L6.80119239,2.57204736 L3.09670082,13.4139924 Z"
|
||||
id="Fill-1"></path>
|
||||
<rect id="Rectangle-3" x="14" y="26" width="6" height="10"></rect>
|
||||
<path d="M20,26 L20,36 L26,36 L26,26 L20,26 Z" id="Rectangle-3"></path>
|
||||
</svg>
|
Before Width: | Height: | Size: 3.0 KiB |
Before Width: | Height: | Size: 1.6 KiB |
@ -1,12 +0,0 @@
|
||||
<?xml version="1.0" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
|
||||
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" x="0px" y="0px" width="100%" height="100%" viewBox="0 0 48.723 48.723" xml:space="preserve">
|
||||
<path d="M7.452,24.152h3.435v5.701h0.633c0.001,0,0.001,0,0.002,0h0.636v-5.701h3.51v-1.059h17.124v1.104h3.178v5.656h0.619 c0,0,0,0,0.002,0h0.619v-5.656h3.736v-0.856c0-0.012,0.006-0.021,0.006-0.032c0-0.072,0-0.143,0-0.215h5.721v-1.316h-5.721 c0-0.054,0-0.108,0-0.164c0-0.011-0.006-0.021-0.006-0.032v-0.832h-8.154v1.028h-7.911v-2.652h-0.689c-0.001,0-0.001,0-0.002,0 h-0.678v2.652h-7.846v-1.104H7.452v1.104H1.114v1.316h6.338V24.152z" />
|
||||
<path d="M21.484,16.849h5.204v-2.611h7.133V1.555H14.588v12.683h6.896V16.849z M16.537,12.288V3.505h15.335v8.783H16.537z" />
|
||||
<rect x="18.682" y="16.898" width="10.809" height="0.537" />
|
||||
<path d="M0,43.971h6.896v2.611H12.1v-2.611h7.134V31.287H0V43.971z M1.95,33.236h15.334v8.785H1.95V33.236z" />
|
||||
<rect x="4.095" y="46.631" width="10.808" height="0.537" />
|
||||
<path d="M29.491,30.994v12.684h6.895v2.611h5.205v-2.611h7.133V30.994H29.491z M46.774,41.729H31.44v-8.783h15.334V41.729z" />
|
||||
<rect x="33.584" y="46.338" width="10.809" height="0.537" />
|
||||
</svg>
|
Before Width: | Height: | Size: 1.2 KiB |
@ -1,185 +0,0 @@
|
||||
// https://github.com/thiscouldbebetter/TarFileExplorer
|
||||
class TarFileTypeFlag
|
||||
{constructor(value,name)
|
||||
{this.value=value;this.id="_"+this.value;this.name=name;}
|
||||
static _instances;static Instances()
|
||||
{if(TarFileTypeFlag._instances==null)
|
||||
{TarFileTypeFlag._instances=new TarFileTypeFlag_Instances();}
|
||||
return TarFileTypeFlag._instances;}}
|
||||
class TarFileTypeFlag_Instances
|
||||
{constructor()
|
||||
{this.Normal=new TarFileTypeFlag("0","Normal");this.HardLink=new TarFileTypeFlag("1","Hard Link");this.SymbolicLink=new TarFileTypeFlag("2","Symbolic Link");this.CharacterSpecial=new TarFileTypeFlag("3","Character Special");this.BlockSpecial=new TarFileTypeFlag("4","Block Special");this.Directory=new TarFileTypeFlag("5","Directory");this.FIFO=new TarFileTypeFlag("6","FIFO");this.ContiguousFile=new TarFileTypeFlag("7","Contiguous File");this.LongFilePath=new TarFileTypeFlag("L","././@LongLink");this._All=[this.Normal,this.HardLink,this.SymbolicLink,this.CharacterSpecial,this.BlockSpecial,this.Directory,this.FIFO,this.ContiguousFile,this.LongFilePath,];for(var i=0;i<this._All.length;i++)
|
||||
{var item=this._All[i];this._All[item.id]=item;}}}
|
||||
class TarFileEntryHeader
|
||||
{constructor
|
||||
(fileName,fileMode,userIDOfOwner,userIDOfGroup,fileSizeInBytes,timeModifiedInUnixFormat,checksum,typeFlag,nameOfLinkedFile,uStarIndicator,uStarVersion,userNameOfOwner,groupNameOfOwner,deviceNumberMajor,deviceNumberMinor,filenamePrefix)
|
||||
{this.fileName=fileName;this.fileMode=fileMode;this.userIDOfOwner=userIDOfOwner;this.userIDOfGroup=userIDOfGroup;this.fileSizeInBytes=fileSizeInBytes;this.timeModifiedInUnixFormat=timeModifiedInUnixFormat;this.checksum=checksum;this.typeFlag=typeFlag;this.nameOfLinkedFile=nameOfLinkedFile;this.uStarIndicator=uStarIndicator;this.uStarVersion=uStarVersion;this.userNameOfOwner=userNameOfOwner;this.groupNameOfOwner=groupNameOfOwner;this.deviceNumberMajor=deviceNumberMajor;this.deviceNumberMinor=deviceNumberMinor;this.filenamePrefix=filenamePrefix;}
|
||||
static FileNameMaxLength=99;static SizeInBytes=500;static default()
|
||||
{var now=new Date();var unixEpoch=new Date(1970,1,1);var millisecondsSinceUnixEpoch=now-unixEpoch;var secondsSinceUnixEpoch=Math.floor
|
||||
(millisecondsSinceUnixEpoch/1000);var secondsSinceUnixEpochAsStringOctal=secondsSinceUnixEpoch.toString(8).padRight(12,"\0");var timeModifiedInUnixFormat=[];for(var i=0;i<secondsSinceUnixEpochAsStringOctal.length;i++)
|
||||
{var digitAsASCIICode=secondsSinceUnixEpochAsStringOctal.charCodeAt(i);timeModifiedInUnixFormat.push(digitAsASCIICode);}
|
||||
var returnValue=new TarFileEntryHeader
|
||||
("".padRight(100,"\0"),"0100777","0000000","0000000",0,timeModifiedInUnixFormat,0,TarFileTypeFlag.Instances().Normal,"","ustar","00","","","","","");return returnValue;};static directoryNew(directoryName)
|
||||
{var header=TarFileEntryHeader.default();header.fileName=directoryName;header.typeFlag=TarFileTypeFlag.Instances().Directory;header.fileSizeInBytes=0;header.checksumCalculate();return header;};static fileNew(fileName,fileContentsAsBytes)
|
||||
{var header=TarFileEntryHeader.default();header.fileName=fileName;header.typeFlag=TarFileTypeFlag.Instances().Normal;header.fileSizeInBytes=fileContentsAsBytes.length;header.checksumCalculate();return header;};static fromBytes(bytes)
|
||||
{var reader=new ByteStream(bytes);var fileName=reader.readString(100).trim();var fileMode=reader.readString(8);var userIDOfOwner=reader.readString(8);var userIDOfGroup=reader.readString(8);var fileSizeInBytesAsStringOctal=reader.readString(12);var timeModifiedInUnixFormat=reader.readBytes(12);var checksumAsStringOctal=reader.readString(8);var typeFlagValue=reader.readString(1);var nameOfLinkedFile=reader.readString(100);var uStarIndicator=reader.readString(6);var uStarVersion=reader.readString(2);var userNameOfOwner=reader.readString(32);var groupNameOfOwner=reader.readString(32);var deviceNumberMajor=reader.readString(8);var deviceNumberMinor=reader.readString(8);var filenamePrefix=reader.readString(155);var reserved=reader.readBytes(12);var fileSizeInBytes=parseInt
|
||||
(fileSizeInBytesAsStringOctal.trim(),8);var checksum=parseInt
|
||||
(checksumAsStringOctal,8);var typeFlags=TarFileTypeFlag.Instances()._All;var typeFlagID="_"+typeFlagValue;var typeFlag=typeFlags[typeFlagID];var returnValue=new TarFileEntryHeader
|
||||
(fileName,fileMode,userIDOfOwner,userIDOfGroup,fileSizeInBytes,timeModifiedInUnixFormat,checksum,typeFlag,nameOfLinkedFile,uStarIndicator,uStarVersion,userNameOfOwner,groupNameOfOwner,deviceNumberMajor,deviceNumberMinor,filenamePrefix);return returnValue;};checksumCalculate()
|
||||
{var thisAsBytes=this.toBytes();var offsetOfChecksumInBytes=148;var numberOfBytesInChecksum=8;var presumedValueOfEachChecksumByte=" ".charCodeAt(0);for(var i=0;i<numberOfBytesInChecksum;i++)
|
||||
{var offsetOfByte=offsetOfChecksumInBytes+i;thisAsBytes[offsetOfByte]=presumedValueOfEachChecksumByte;}
|
||||
var checksumSoFar=0;for(var i=0;i<thisAsBytes.length;i++)
|
||||
{var byteToAdd=thisAsBytes[i];checksumSoFar+=byteToAdd;}
|
||||
this.checksum=checksumSoFar;return this.checksum;};toBytes()
|
||||
{var headerAsBytes=[];var writer=new ByteStream(headerAsBytes);var fileSizeInBytesAsStringOctal=(this.fileSizeInBytes.toString(8)+"\0").padLeft(12,"0")
|
||||
var checksumAsStringOctal=(this.checksum.toString(8)+"\0 ").padLeft(8,"0");writer.writeString(this.fileName,100);writer.writeString(this.fileMode,8);writer.writeString(this.userIDOfOwner,8);writer.writeString(this.userIDOfGroup,8);writer.writeString(fileSizeInBytesAsStringOctal,12);writer.writeBytes(this.timeModifiedInUnixFormat);writer.writeString(checksumAsStringOctal,8);writer.writeString(this.typeFlag.value,1);writer.writeString(this.nameOfLinkedFile,100);writer.writeString(this.uStarIndicator,6);writer.writeString(this.uStarVersion,2);writer.writeString(this.userNameOfOwner,32);writer.writeString(this.groupNameOfOwner,32);writer.writeString(this.deviceNumberMajor,8);writer.writeString(this.deviceNumberMinor,8);writer.writeString(this.filenamePrefix,155);writer.writeString("".padRight(12,"\0"));return headerAsBytes;};toString()
|
||||
{var newline="\n";var returnValue="[TarFileEntryHeader "
|
||||
+"fileName='"+this.fileName+"' "
|
||||
+"typeFlag='"+(this.typeFlag==null?"err":this.typeFlag.name)+"' "
|
||||
+"fileSizeInBytes='"+this.fileSizeInBytes+"' "
|
||||
+"]"
|
||||
+newline;return returnValue;};}
|
||||
class TarFileEntry
|
||||
{constructor(header,dataAsBytes)
|
||||
{this.header=header;this.dataAsBytes=dataAsBytes;}
|
||||
static directoryNew(directoryName)
|
||||
{var header=TarFileEntryHeader.directoryNew(directoryName);var entry=new TarFileEntry(header,[]);return entry;};static fileNew(fileName,fileContentsAsBytes)
|
||||
{var header=TarFileEntryHeader.fileNew(fileName,fileContentsAsBytes);var entry=new TarFileEntry(header,fileContentsAsBytes);return entry;};static fromBytes(chunkAsBytes,reader)
|
||||
{var chunkSize=TarFile.ChunkSize;var header=TarFileEntryHeader.fromBytes
|
||||
(chunkAsBytes);var sizeOfDataEntryInBytesUnpadded=header.fileSizeInBytes;var numberOfChunksOccupiedByDataEntry=Math.ceil
|
||||
(sizeOfDataEntryInBytesUnpadded/chunkSize)
|
||||
var sizeOfDataEntryInBytesPadded=numberOfChunksOccupiedByDataEntry*chunkSize;var dataAsBytes=reader.readBytes
|
||||
(sizeOfDataEntryInBytesPadded).slice
|
||||
(0,sizeOfDataEntryInBytesUnpadded);var entry=new TarFileEntry(header,dataAsBytes);return entry;};static manyFromByteArrays
|
||||
(fileNamePrefix,fileNameSuffix,entriesAsByteArrays)
|
||||
{var returnValues=[];for(var i=0;i<entriesAsByteArrays.length;i++)
|
||||
{var entryAsBytes=entriesAsByteArrays[i];var entry=TarFileEntry.fileNew
|
||||
(fileNamePrefix+i+fileNameSuffix,entryAsBytes);returnValues.push(entry);}
|
||||
return returnValues;};download(event)
|
||||
{FileHelper.saveBytesAsFile
|
||||
(this.dataAsBytes,this.header.fileName);};remove(event)
|
||||
{alert("Not yet implemented!");};toBytes()
|
||||
{var entryAsBytes=[];var chunkSize=TarFile.ChunkSize;var headerAsBytes=this.header.toBytes();entryAsBytes=entryAsBytes.concat(headerAsBytes);entryAsBytes=entryAsBytes.concat(this.dataAsBytes);var sizeOfDataEntryInBytesUnpadded=this.header.fileSizeInBytes;var numberOfChunksOccupiedByDataEntry=Math.ceil
|
||||
(sizeOfDataEntryInBytesUnpadded/chunkSize)
|
||||
var sizeOfDataEntryInBytesPadded=numberOfChunksOccupiedByDataEntry*chunkSize;var numberOfBytesOfPadding=sizeOfDataEntryInBytesPadded-sizeOfDataEntryInBytesUnpadded;for(var i=0;i<numberOfBytesOfPadding;i++)
|
||||
{entryAsBytes.push(0);}
|
||||
return entryAsBytes;};toString()
|
||||
{var newline="\n";headerAsString=this.header.toString();var dataAsHexadecimalString=ByteHelper.bytesToStringHexadecimal
|
||||
(this.dataAsBytes);var returnValue="[TarFileEntry]"+newline
|
||||
+headerAsString
|
||||
+"[Data]"
|
||||
+dataAsHexadecimalString
|
||||
+"[/Data]"+newline
|
||||
+"[/TarFileEntry]"
|
||||
+newline;return returnValue}}
|
||||
class TarFile
|
||||
{constructor(fileName,entries)
|
||||
{this.fileName=fileName;this.entries=entries;}
|
||||
static ChunkSize=512;static fromBytes(fileName,bytes)
|
||||
{var reader=new ByteStream(bytes);var entries=[];var chunkSize=TarFile.ChunkSize;var numberOfConsecutiveZeroChunks=0;while(reader.hasMoreBytes()==true)
|
||||
{var chunkAsBytes=reader.readBytes(chunkSize);var areAllBytesInChunkZeroes=true;for(var b=0;b<chunkAsBytes.length;b++)
|
||||
{if(chunkAsBytes[b]!=0)
|
||||
{areAllBytesInChunkZeroes=false;break;}}
|
||||
if(areAllBytesInChunkZeroes==true)
|
||||
{numberOfConsecutiveZeroChunks++;if(numberOfConsecutiveZeroChunks==2)
|
||||
{break;}}
|
||||
else
|
||||
{numberOfConsecutiveZeroChunks=0;var entry=TarFileEntry.fromBytes(chunkAsBytes,reader);entries.push(entry);}}
|
||||
var returnValue=new TarFile(fileName,entries);returnValue.consolidateLongPathEntries();return returnValue;}
|
||||
static create(fileName)
|
||||
{return new TarFile
|
||||
(fileName,[]);}
|
||||
consolidateLongPathEntries()
|
||||
{var typeFlagLongPathName=TarFileTypeFlag.Instances().LongFilePath.name;var entries=this.entries;for(var i=0;i<entries.length;i++)
|
||||
{var entry=entries[i];if(entry.header.typeFlag.name==typeFlagLongPathName)
|
||||
{var entryNext=entries[i+1];entryNext.header.fileName=entry.dataAsBytes.reduce
|
||||
((a,b)=>a+=String.fromCharCode(b),"");entryNext.header.fileName=entryNext.header.fileName.replace(/\0/g,"");entries.splice(i,1);i--;}}}
|
||||
downloadAs(fileNameToSaveAs)
|
||||
{return FileHelper.saveBytesAsFile
|
||||
(this.toBytes(),fileNameToSaveAs)}
|
||||
entriesForDirectories()
|
||||
{return this.entries.filter(x=>x.header.typeFlag.name==TarFileTypeFlag.Instances().Directory);}
|
||||
toBytes()
|
||||
{this.toBytes_PrependLongPathEntriesAsNeeded();var fileAsBytes=[];var entriesAsByteArrays=this.entries.map(x=>x.toBytes());this.consolidateLongPathEntries();for(var i=0;i<entriesAsByteArrays.length;i++)
|
||||
{var entryAsBytes=entriesAsByteArrays[i];fileAsBytes=fileAsBytes.concat(entryAsBytes);}
|
||||
var chunkSize=TarFile.ChunkSize;var numberOfZeroChunksToWrite=2;for(var i=0;i<numberOfZeroChunksToWrite;i++)
|
||||
{for(var b=0;b<chunkSize;b++)
|
||||
{fileAsBytes.push(0);}}
|
||||
return fileAsBytes;}
|
||||
toBytes_PrependLongPathEntriesAsNeeded()
|
||||
{var typeFlagLongPath=TarFileTypeFlag.Instances().LongFilePath;var maxLength=TarFileEntryHeader.FileNameMaxLength;var entries=this.entries;for(var i=0;i<entries.length;i++)
|
||||
{var entry=entries[i];var entryHeader=entry.header;var entryFileName=entryHeader.fileName;if(entryFileName.length>maxLength)
|
||||
{var entryFileNameAsBytes=entryFileName.split("").map(x=>x.charCodeAt(0));var entryContainingLongPathToPrepend=TarFileEntry.fileNew
|
||||
(typeFlagLongPath.name,entryFileNameAsBytes);entryContainingLongPathToPrepend.header.typeFlag=typeFlagLongPath;entryContainingLongPathToPrepend.header.timeModifiedInUnixFormat=entryHeader.timeModifiedInUnixFormat;entryContainingLongPathToPrepend.header.checksumCalculate();entryHeader.fileName=entryFileName.substr(0,maxLength)+String.fromCharCode(0);entries.splice(i,0,entryContainingLongPathToPrepend);i++;}}}
|
||||
toString()
|
||||
{var newline="\n";var returnValue="[TarFile]"+newline;for(var i=0;i<this.entries.length;i++)
|
||||
{var entry=this.entries[i];var entryAsString=entry.toString();returnValue+=entryAsString;}
|
||||
returnValue+="[/TarFile]"+newline;return returnValue;}}
|
||||
function StringExtensions()
|
||||
{}
|
||||
{String.prototype.padLeft=function(lengthToPadTo,charToPadWith)
|
||||
{var returnValue=this;while(returnValue.length<lengthToPadTo)
|
||||
{returnValue=charToPadWith+returnValue;}
|
||||
return returnValue;}
|
||||
String.prototype.padRight=function(lengthToPadTo,charToPadWith)
|
||||
{var returnValue=this;while(returnValue.length<lengthToPadTo)
|
||||
{returnValue+=charToPadWith;}
|
||||
return returnValue;}}
|
||||
class Globals
|
||||
{static Instance=new Globals();}
|
||||
class FileHelper
|
||||
{static loadFileAsBytes(fileToLoad,callback)
|
||||
{var fileReader=new FileReader();fileReader.onload=(fileLoadedEvent)=>{var fileLoadedAsBinaryString=fileLoadedEvent.target.result;var fileLoadedAsBytes=ByteHelper.stringUTF8ToBytes(fileLoadedAsBinaryString);callback(fileToLoad.name,fileLoadedAsBytes);}
|
||||
fileReader.readAsBinaryString(fileToLoad);}
|
||||
static loadFileAsText(fileToLoad,callback)
|
||||
{var fileReader=new FileReader();fileReader.onload=(fileLoadedEvent)=>{var textFromFileLoaded=fileLoadedEvent.target.result;callback(fileToLoad.name,textFromFileLoaded);};fileReader.readAsText(fileToLoad);}
|
||||
static saveBytesAsFile(bytesToWrite,fileNameToSaveAs)
|
||||
{var bytesToWriteAsArrayBuffer=new ArrayBuffer(bytesToWrite.length);var bytesToWriteAsUIntArray=new Uint8Array(bytesToWriteAsArrayBuffer);for(var i=0;i<bytesToWrite.length;i++)
|
||||
{bytesToWriteAsUIntArray[i]=bytesToWrite[i];}
|
||||
var bytesToWriteAsBlob=new Blob
|
||||
([bytesToWriteAsArrayBuffer],{type:"application/type"});
|
||||
return bytesToWriteAsBlob
|
||||
// var downloadLink=document.createElement("a");downloadLink.download=fileNameToSaveAs;downloadLink.href=window.URL.createObjectURL(bytesToWriteAsBlob);downloadLink.click();
|
||||
}
|
||||
static saveTextAsFile(textToSave,fileNameToSaveAs)
|
||||
{var textToSaveAsBlob=new Blob([textToSave],{type:"text/plain"});var textToSaveAsURL=window.URL.createObjectURL(textToSaveAsBlob);var downloadLink=document.createElement("a");downloadLink.download=fileNameToSaveAs;downloadLink.href=textToSaveAsURL;downloadLink.click();}}
|
||||
class ByteStream
|
||||
{constructor(bytes)
|
||||
{this.bytes=bytes;this.byteIndexCurrent=0;}
|
||||
static BitsPerByte=8;static BitsPerByteTimesTwo=ByteStream.BitsPerByte*2;static BitsPerByteTimesThree=ByteStream.BitsPerByte*3;hasMoreBytes()
|
||||
{return(this.byteIndexCurrent<this.bytes.length);}
|
||||
readBytes(numberOfBytesToRead)
|
||||
{var returnValue=new Array(numberOfBytesToRead);for(var b=0;b<numberOfBytesToRead;b++)
|
||||
{returnValue[b]=this.readByte();}
|
||||
return returnValue;}
|
||||
readByte()
|
||||
{var returnValue=this.bytes[this.byteIndexCurrent];this.byteIndexCurrent++;return returnValue;}
|
||||
readString(lengthOfString)
|
||||
{var returnValue="";for(var i=0;i<lengthOfString;i++)
|
||||
{var byte=this.readByte();if(byte!=0)
|
||||
{var byteAsChar=String.fromCharCode(byte);returnValue+=byteAsChar;}}
|
||||
return returnValue;}
|
||||
writeBytes(bytesToWrite)
|
||||
{for(var b=0;b<bytesToWrite.length;b++)
|
||||
{this.bytes.push(bytesToWrite[b]);}
|
||||
this.byteIndexCurrent=this.bytes.length;}
|
||||
writeByte(byteToWrite)
|
||||
{this.bytes.push(byteToWrite);this.byteIndexCurrent++;}
|
||||
writeString(stringToWrite,lengthPadded)
|
||||
{for(var i=0;i<stringToWrite.length;i++)
|
||||
{var charAsByte=stringToWrite.charCodeAt(i);this.writeByte(charAsByte);}
|
||||
var numberOfPaddingChars=lengthPadded-stringToWrite.length;for(var i=0;i<numberOfPaddingChars;i++)
|
||||
{this.writeByte(0);}}}
|
||||
class ByteHelper
|
||||
{static stringUTF8ToBytes(stringToConvert)
|
||||
{var bytes=[];for(var i=0;i<stringToConvert.length;i++)
|
||||
{var byte=stringToConvert.charCodeAt(i);bytes.push(byte);}
|
||||
return bytes;}
|
||||
static bytesToStringUTF8(bytesToConvert)
|
||||
{var returnValue="";for(var i=0;i<bytesToConvert.length;i++)
|
||||
{var byte=bytesToConvert[i];var byteAsChar=String.fromCharCode(byte);returnValue+=byteAsChar}
|
||||
return returnValue;}}
|
||||
function ArrayExtensions()
|
||||
{}
|
||||
{Array.prototype.remove=function(elementToRemove)
|
||||
{this.splice(this.indexOf(elementToRemove),1);}}
|
Before Width: | Height: | Size: 6.4 KiB |
@ -1,614 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2019 lisaac <https://github.com/lisaac/luci-app-dockerman>
|
||||
]]--
|
||||
|
||||
local docker = require "luci.model.docker"
|
||||
-- local uci = (require "luci.model.uci").cursor()
|
||||
|
||||
module("luci.controller.dockerman",package.seeall)
|
||||
|
||||
function index()
|
||||
entry({"admin", "docker"},
|
||||
alias("admin", "docker", "config"),
|
||||
_("Docker"),
|
||||
40).acl_depends = { "luci-app-dockerman" }
|
||||
|
||||
entry({"admin", "docker", "config"},cbi("dockerman/configuration"),_("Configuration"), 8).leaf=true
|
||||
|
||||
-- local uci = (require "luci.model.uci").cursor()
|
||||
-- if uci:get_bool("dockerd", "dockerman", "remote_endpoint") then
|
||||
-- local host = uci:get("dockerd", "dockerman", "remote_host")
|
||||
-- local port = uci:get("dockerd", "dockerman", "remote_port")
|
||||
-- if not host or not port then
|
||||
-- return
|
||||
-- end
|
||||
-- else
|
||||
-- local socket = uci:get("dockerd", "dockerman", "socket_path") or "/var/run/docker.sock"
|
||||
-- if socket and not nixio.fs.access(socket) then
|
||||
-- return
|
||||
-- end
|
||||
-- end
|
||||
|
||||
-- if (require "luci.model.docker").new():_ping().code ~= 200 then
|
||||
-- return
|
||||
-- end
|
||||
|
||||
entry({"admin", "docker", "overview"}, form("dockerman/overview"),_("Overview"), 2).leaf=true
|
||||
entry({"admin", "docker", "containers"}, form("dockerman/containers"), _("Containers"), 3).leaf=true
|
||||
entry({"admin", "docker", "images"}, form("dockerman/images"), _("Images"), 4).leaf=true
|
||||
entry({"admin", "docker", "networks"}, form("dockerman/networks"), _("Networks"), 5).leaf=true
|
||||
entry({"admin", "docker", "volumes"}, form("dockerman/volumes"), _("Volumes"), 6).leaf=true
|
||||
entry({"admin", "docker", "events"}, call("action_events"), _("Events"), 7)
|
||||
|
||||
entry({"admin", "docker", "newcontainer"}, form("dockerman/newcontainer")).leaf=true
|
||||
entry({"admin", "docker", "newnetwork"}, form("dockerman/newnetwork")).leaf=true
|
||||
entry({"admin", "docker", "container"}, form("dockerman/container")).leaf=true
|
||||
|
||||
entry({"admin", "docker", "container_stats"}, call("action_get_container_stats")).leaf=true
|
||||
entry({"admin", "docker", "containers_stats"}, call("action_get_containers_stats")).leaf=true
|
||||
entry({"admin", "docker", "get_system_df"}, call("action_get_system_df")).leaf=true
|
||||
entry({"admin", "docker", "container_get_archive"}, call("download_archive")).leaf=true
|
||||
entry({"admin", "docker", "container_put_archive"}, call("upload_archive")).leaf=true
|
||||
entry({"admin", "docker", "container_list_file"}, call("list_file")).leaf=true
|
||||
entry({"admin", "docker", "container_remove_file"}, call("remove_file")).leaf=true
|
||||
entry({"admin", "docker", "container_rename_file"}, call("rename_file")).leaf=true
|
||||
entry({"admin", "docker", "container_export"}, call("export_container")).leaf=true
|
||||
entry({"admin", "docker", "images_save"}, call("save_images")).leaf=true
|
||||
entry({"admin", "docker", "images_load"}, call("load_images")).leaf=true
|
||||
entry({"admin", "docker", "images_import"}, call("import_images")).leaf=true
|
||||
entry({"admin", "docker", "images_get_tags"}, call("get_image_tags")).leaf=true
|
||||
entry({"admin", "docker", "images_tag"}, call("tag_image")).leaf=true
|
||||
entry({"admin", "docker", "images_untag"}, call("untag_image")).leaf=true
|
||||
entry({"admin", "docker", "confirm"}, call("action_confirm")).leaf=true
|
||||
end
|
||||
|
||||
function action_get_system_df()
|
||||
local res = docker.new():df()
|
||||
luci.http.status(res.code, res.message)
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json(res.body)
|
||||
end
|
||||
|
||||
function scandir(id, directory)
|
||||
local cmd_docker = luci.util.exec("command -v docker"):match("^.+docker") or nil
|
||||
if not cmd_docker or cmd_docker:match("^%s+$") then
|
||||
return
|
||||
end
|
||||
local i, t, popen = 0, {}, io.popen
|
||||
local uci = (require "luci.model.uci").cursor()
|
||||
local remote = uci:get_bool("dockerd", "dockerman", "remote_endpoint")
|
||||
local socket_path = not remote and uci:get("dockerd", "dockerman", "socket_path") or nil
|
||||
local host = remote and uci:get("dockerd", "dockerman", "remote_host") or nil
|
||||
local port = remote and uci:get("dockerd", "dockerman", "remote_port") or nil
|
||||
if remote and host and port then
|
||||
hosts = "tcp://" .. host .. ':'.. port
|
||||
elseif socket_path then
|
||||
hosts = "unix://" .. socket_path
|
||||
else
|
||||
return
|
||||
end
|
||||
local pfile = popen(cmd_docker .. ' -H "'.. hosts ..'" exec ' ..id .." ls -lh \""..directory.."\" | egrep -v '^total'")
|
||||
for fileinfo in pfile:lines() do
|
||||
i = i + 1
|
||||
t[i] = fileinfo
|
||||
end
|
||||
pfile:close()
|
||||
return t
|
||||
end
|
||||
|
||||
function list_response(id, path, success)
|
||||
luci.http.prepare_content("application/json")
|
||||
local result
|
||||
if success then
|
||||
local rv = scandir(id, path)
|
||||
result = {
|
||||
ec = 0,
|
||||
data = rv
|
||||
}
|
||||
else
|
||||
result = {
|
||||
ec = 1
|
||||
}
|
||||
end
|
||||
luci.http.write_json(result)
|
||||
end
|
||||
|
||||
function list_file(id)
|
||||
local path = luci.http.formvalue("path")
|
||||
list_response(id, path, true)
|
||||
end
|
||||
|
||||
function rename_file(id)
|
||||
local filepath = luci.http.formvalue("filepath")
|
||||
local newpath = luci.http.formvalue("newpath")
|
||||
local cmd_docker = luci.util.exec("command -v docker"):match("^.+docker") or nil
|
||||
if not cmd_docker or cmd_docker:match("^%s+$") then
|
||||
return
|
||||
end
|
||||
local uci = (require "luci.model.uci").cursor()
|
||||
local remote = uci:get_bool("dockerd", "dockerman", "remote_endpoint")
|
||||
local socket_path = not remote and uci:get("dockerd", "dockerman", "socket_path") or nil
|
||||
local host = remote and uci:get("dockerd", "dockerman", "remote_host") or nil
|
||||
local port = remote and uci:get("dockerd", "dockerman", "remote_port") or nil
|
||||
if remote and host and port then
|
||||
hosts = "tcp://" .. host .. ':'.. port
|
||||
elseif socket_path then
|
||||
hosts = "unix://" .. socket_path
|
||||
else
|
||||
return
|
||||
end
|
||||
local success = os.execute(cmd_docker .. ' -H "'.. hosts ..'" exec '.. id ..' mv "'..filepath..'" "'..newpath..'"')
|
||||
list_response(nixio.fs.dirname(filepath), success)
|
||||
end
|
||||
|
||||
function remove_file(id)
|
||||
local path = luci.http.formvalue("path")
|
||||
local isdir = luci.http.formvalue("isdir")
|
||||
local cmd_docker = luci.util.exec("command -v docker"):match("^.+docker") or nil
|
||||
if not cmd_docker or cmd_docker:match("^%s+$") then
|
||||
return
|
||||
end
|
||||
local uci = (require "luci.model.uci").cursor()
|
||||
local remote = uci:get_bool("dockerd", "dockerman", "remote_endpoint")
|
||||
local socket_path = not remote and uci:get("dockerd", "dockerman", "socket_path") or nil
|
||||
local host = remote and uci:get("dockerd", "dockerman", "remote_host") or nil
|
||||
local port = remote and uci:get("dockerd", "dockerman", "remote_port") or nil
|
||||
if remote and host and port then
|
||||
hosts = "tcp://" .. host .. ':'.. port
|
||||
elseif socket_path then
|
||||
hosts = "unix://" .. socket_path
|
||||
else
|
||||
return
|
||||
end
|
||||
path = path:gsub("<>", "/")
|
||||
path = path:gsub(" ", "\ ")
|
||||
local success
|
||||
if isdir then
|
||||
success = os.execute(cmd_docker .. ' -H "'.. hosts ..'" exec '.. id ..' rm -r "'..path..'"')
|
||||
else
|
||||
success = os.remove(path)
|
||||
end
|
||||
list_response(nixio.fs.dirname(path), success)
|
||||
end
|
||||
|
||||
function action_events()
|
||||
local logs = ""
|
||||
local query ={}
|
||||
|
||||
local dk = docker.new()
|
||||
query["until"] = os.time()
|
||||
local events = dk:events({query = query})
|
||||
|
||||
if events.code == 200 then
|
||||
for _, v in ipairs(events.body) do
|
||||
local date = "unknown"
|
||||
if v and v.time then
|
||||
date = os.date("%Y-%m-%d %H:%M:%S", v.time)
|
||||
end
|
||||
|
||||
local name = v.Actor.Attributes.name or "unknown"
|
||||
local action = v.Action or "unknown"
|
||||
|
||||
if v and v.Type == "container" then
|
||||
local id = v.Actor.ID or "unknown"
|
||||
logs = logs .. string.format("[%s] %s %s Container ID: %s Container Name: %s\n", date, v.Type, action, id, name)
|
||||
elseif v.Type == "network" then
|
||||
local container = v.Actor.Attributes.container or "unknown"
|
||||
local network = v.Actor.Attributes.type or "unknown"
|
||||
logs = logs .. string.format("[%s] %s %s Container ID: %s Network Name: %s Network type: %s\n", date, v.Type, action, container, name, network)
|
||||
elseif v.Type == "image" then
|
||||
local id = v.Actor.ID or "unknown"
|
||||
logs = logs .. string.format("[%s] %s %s Image: %s Image name: %s\n", date, v.Type, action, id, name)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
luci.template.render("dockerman/logs", {self={syslog = logs, title="Events"}})
|
||||
end
|
||||
|
||||
local calculate_cpu_percent = function(d)
|
||||
if type(d) ~= "table" then
|
||||
return
|
||||
end
|
||||
|
||||
local cpu_count = tonumber(d["cpu_stats"]["online_cpus"])
|
||||
local cpu_percent = 0.0
|
||||
local cpu_delta = tonumber(d["cpu_stats"]["cpu_usage"]["total_usage"]) - tonumber(d["precpu_stats"]["cpu_usage"]["total_usage"])
|
||||
local system_delta = tonumber(d["cpu_stats"]["system_cpu_usage"]) -- tonumber(d["precpu_stats"]["system_cpu_usage"])
|
||||
if system_delta > 0.0 then
|
||||
cpu_percent = string.format("%.2f", cpu_delta / system_delta * 100.0 * cpu_count)
|
||||
end
|
||||
|
||||
return cpu_percent
|
||||
end
|
||||
|
||||
local get_memory = function(d)
|
||||
if type(d) ~= "table" then
|
||||
return
|
||||
end
|
||||
|
||||
-- local limit = string.format("%.2f", tonumber(d["memory_stats"]["limit"]) / 1024 / 1024)
|
||||
-- local usage = string.format("%.2f", (tonumber(d["memory_stats"]["usage"]) - tonumber(d["memory_stats"]["stats"]["total_cache"])) / 1024 / 1024)
|
||||
-- return usage .. "MB / " .. limit.. "MB"
|
||||
|
||||
local limit =tonumber(d["memory_stats"]["limit"])
|
||||
local usage = tonumber(d["memory_stats"]["usage"])
|
||||
-- - tonumber(d["memory_stats"]["stats"]["total_cache"])
|
||||
|
||||
return usage, limit
|
||||
end
|
||||
|
||||
local get_rx_tx = function(d)
|
||||
if type(d) ~="table" then
|
||||
return
|
||||
end
|
||||
|
||||
local data = {}
|
||||
if type(d["networks"]) == "table" then
|
||||
for e, v in pairs(d["networks"]) do
|
||||
data[e] = {
|
||||
bw_tx = tonumber(v.tx_bytes),
|
||||
bw_rx = tonumber(v.rx_bytes)
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local function get_stat(container_id)
|
||||
if container_id then
|
||||
local dk = docker.new()
|
||||
local response = dk.containers:inspect({id = container_id})
|
||||
if response.code == 200 and response.body.State.Running then
|
||||
response = dk.containers:stats({id = container_id, query = {stream = false, ["one-shot"] = true}})
|
||||
if response.code == 200 then
|
||||
local container_stats = response.body
|
||||
local cpu_percent = calculate_cpu_percent(container_stats)
|
||||
local mem_useage, mem_limit = get_memory(container_stats)
|
||||
local bw_rxtx = get_rx_tx(container_stats)
|
||||
return response.code, response.body.message, {
|
||||
cpu_percent = cpu_percent,
|
||||
memory = {
|
||||
mem_useage = mem_useage,
|
||||
mem_limit = mem_limit
|
||||
},
|
||||
bw_rxtx = bw_rxtx
|
||||
}
|
||||
else
|
||||
return response.code, response.body.message
|
||||
end
|
||||
else
|
||||
if response.code == 200 then
|
||||
return 500, "container "..container_id.." not running"
|
||||
else
|
||||
return response.code, response.body.message
|
||||
end
|
||||
end
|
||||
else
|
||||
return 404, "No container name or id"
|
||||
end
|
||||
end
|
||||
function action_get_container_stats(container_id)
|
||||
local code, msg, res = get_stat(container_id)
|
||||
luci.http.status(code, msg)
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json(res)
|
||||
end
|
||||
|
||||
function action_get_containers_stats()
|
||||
local res = luci.http.formvalue(containers) or ""
|
||||
local stats = {}
|
||||
res = luci.jsonc.parse(res.containers)
|
||||
if res and type(res) == "table" then
|
||||
for i, v in ipairs(res) do
|
||||
_,_,stats[v] = get_stat(v)
|
||||
end
|
||||
end
|
||||
luci.http.status(200, "OK")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json(stats)
|
||||
end
|
||||
|
||||
function action_confirm()
|
||||
local data = docker:read_status()
|
||||
if data then
|
||||
data = data:gsub("\n","<br>"):gsub(" "," ")
|
||||
code = 202
|
||||
msg = data
|
||||
else
|
||||
code = 200
|
||||
msg = "finish"
|
||||
data = "finish"
|
||||
end
|
||||
|
||||
luci.http.status(code, msg)
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({info = data})
|
||||
end
|
||||
|
||||
function export_container(id)
|
||||
local dk = docker.new()
|
||||
local first
|
||||
|
||||
local cb = function(res, chunk)
|
||||
if res.code == 200 then
|
||||
if not first then
|
||||
first = true
|
||||
luci.http.header('Content-Disposition', 'inline; filename="'.. id ..'.tar"')
|
||||
luci.http.header('Content-Type', 'application\/x-tar')
|
||||
end
|
||||
luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
else
|
||||
if not first then
|
||||
first = true
|
||||
luci.http.prepare_content("text/plain")
|
||||
end
|
||||
luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
end
|
||||
end
|
||||
|
||||
local res = dk.containers:export({id = id}, cb)
|
||||
end
|
||||
|
||||
function download_archive()
|
||||
local id = luci.http.formvalue("id")
|
||||
local path = luci.http.formvalue("path")
|
||||
local filename = luci.http.formvalue("filename") or "archive"
|
||||
local dk = docker.new()
|
||||
local first
|
||||
|
||||
local cb = function(res, chunk)
|
||||
if res and res.code and res.code == 200 then
|
||||
if not first then
|
||||
first = true
|
||||
luci.http.header('Content-Disposition', 'inline; filename="'.. filename .. '.tar"')
|
||||
luci.http.header('Content-Type', 'application\/x-tar')
|
||||
end
|
||||
luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
else
|
||||
if not first then
|
||||
first = true
|
||||
luci.http.status(res and res.code or 500, msg or "unknow")
|
||||
luci.http.prepare_content("text/plain")
|
||||
end
|
||||
luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
end
|
||||
end
|
||||
|
||||
local res = dk.containers:get_archive({
|
||||
id = id,
|
||||
query = {
|
||||
path = luci.http.urlencode(path)
|
||||
}
|
||||
}, cb)
|
||||
end
|
||||
|
||||
function upload_archive(container_id)
|
||||
local path = luci.http.formvalue("upload-path")
|
||||
local dk = docker.new()
|
||||
local ltn12 = require "luci.ltn12"
|
||||
|
||||
local rec_send = function(sinkout)
|
||||
luci.http.setfilehandler(function (meta, chunk, eof)
|
||||
if chunk then
|
||||
ltn12.pump.step(ltn12.source.string(chunk), sinkout)
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
local res = dk.containers:put_archive({
|
||||
id = container_id,
|
||||
query = {
|
||||
path = luci.http.urlencode(path)
|
||||
},
|
||||
body = rec_send
|
||||
})
|
||||
|
||||
local msg = res and res.message or res.body and res.body.message or nil
|
||||
luci.http.status(res and res.code or 500, msg or "unknow")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = msg or "unknow"})
|
||||
end
|
||||
|
||||
-- function save_images()
|
||||
-- local names = luci.http.formvalue("names")
|
||||
-- local dk = docker.new()
|
||||
-- local first
|
||||
|
||||
-- local cb = function(res, chunk)
|
||||
-- if res.code == 200 then
|
||||
-- if not first then
|
||||
-- first = true
|
||||
-- luci.http.status(res.code, res.message)
|
||||
-- luci.http.header('Content-Disposition', 'inline; filename="'.. "images" ..'.tar"')
|
||||
-- luci.http.header('Content-Type', 'application\/x-tar')
|
||||
-- end
|
||||
-- luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
-- else
|
||||
-- if not first then
|
||||
-- first = true
|
||||
-- luci.http.prepare_content("text/plain")
|
||||
-- end
|
||||
-- luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
-- end
|
||||
-- end
|
||||
|
||||
-- docker:write_status("Images: saving" .. " " .. names .. "...")
|
||||
-- local res = dk.images:get({
|
||||
-- query = {
|
||||
-- names = luci.http.urlencode(names)
|
||||
-- }
|
||||
-- }, cb)
|
||||
-- docker:clear_status()
|
||||
|
||||
-- local msg = res and res.body and res.body.message or nil
|
||||
-- luci.http.status(res.code, msg)
|
||||
-- luci.http.prepare_content("application/json")
|
||||
-- luci.http.write_json({message = msg})
|
||||
-- end
|
||||
|
||||
function load_images()
|
||||
local archive = luci.http.formvalue("upload-archive")
|
||||
local dk = docker.new()
|
||||
local ltn12 = require "luci.ltn12"
|
||||
|
||||
local rec_send = function(sinkout)
|
||||
luci.http.setfilehandler(function (meta, chunk, eof)
|
||||
if chunk then
|
||||
ltn12.pump.step(ltn12.source.string(chunk), sinkout)
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
docker:write_status("Images: loading...")
|
||||
local res = dk.images:load({body = rec_send})
|
||||
local msg = res and res.body and ( res.body.message or res.body.stream or res.body.error ) or nil
|
||||
if res and res.code == 200 and msg and msg:match("Loaded image ID") then
|
||||
docker:clear_status()
|
||||
else
|
||||
docker:append_status("code:" .. (res and res.code or "500") .." ".. (msg or "unknow"))
|
||||
end
|
||||
|
||||
luci.http.status(res and res.code or 500, msg or "unknow")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = msg or "unknow"})
|
||||
end
|
||||
|
||||
function import_images()
|
||||
local src = luci.http.formvalue("src")
|
||||
local itag = luci.http.formvalue("tag")
|
||||
local dk = docker.new()
|
||||
local ltn12 = require "luci.ltn12"
|
||||
|
||||
local rec_send = function(sinkout)
|
||||
luci.http.setfilehandler(function (meta, chunk, eof)
|
||||
if chunk then
|
||||
ltn12.pump.step(ltn12.source.string(chunk), sinkout)
|
||||
end
|
||||
end)
|
||||
end
|
||||
|
||||
docker:write_status("Images: importing".. " ".. itag .."...\n")
|
||||
local repo = itag and itag:match("^([^:]+)")
|
||||
local tag = itag and itag:match("^[^:]-:([^:]+)")
|
||||
local res = dk.images:create({
|
||||
query = {
|
||||
fromSrc = luci.http.urlencode(src or "-"),
|
||||
repo = repo or nil,
|
||||
tag = tag or nil
|
||||
},
|
||||
body = not src and rec_send or nil
|
||||
}, docker.import_image_show_status_cb)
|
||||
|
||||
local msg = res and res.body and ( res.body.message )or nil
|
||||
if not msg and #res.body == 0 then
|
||||
msg = res.body.status or res.body.error
|
||||
elseif not msg and #res.body >= 1 then
|
||||
msg = res.body[#res.body].status or res.body[#res.body].error
|
||||
end
|
||||
|
||||
if res.code == 200 and msg and msg:match("sha256:") then
|
||||
docker:clear_status()
|
||||
else
|
||||
docker:append_status("code:" .. (res and res.code or "500") .." ".. (msg or "unknow"))
|
||||
end
|
||||
|
||||
luci.http.status(res and res.code or 500, msg or "unknow")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = msg or "unknow"})
|
||||
end
|
||||
|
||||
function get_image_tags(image_id)
|
||||
if not image_id then
|
||||
luci.http.status(400, "no image id")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = "no image id"})
|
||||
return
|
||||
end
|
||||
|
||||
local dk = docker.new()
|
||||
local res = dk.images:inspect({
|
||||
id = image_id
|
||||
})
|
||||
local msg = res and res.body and res.body.message or nil
|
||||
luci.http.status(res and res.code or 500, msg or "unknow")
|
||||
luci.http.prepare_content("application/json")
|
||||
|
||||
if res.code == 200 then
|
||||
local tags = res.body.RepoTags
|
||||
luci.http.write_json({tags = tags})
|
||||
else
|
||||
local msg = res and res.body and res.body.message or nil
|
||||
luci.http.write_json({message = msg or "unknow"})
|
||||
end
|
||||
end
|
||||
|
||||
function tag_image(image_id)
|
||||
local src = luci.http.formvalue("tag")
|
||||
local image_id = image_id or luci.http.formvalue("id")
|
||||
|
||||
if type(src) ~= "string" or not image_id then
|
||||
luci.http.status(400, "no image id or tag")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = "no image id or tag"})
|
||||
return
|
||||
end
|
||||
|
||||
local repo = src:match("^([^:]+)")
|
||||
local tag = src:match("^[^:]-:([^:]+)")
|
||||
local dk = docker.new()
|
||||
local res = dk.images:tag({
|
||||
id = image_id,
|
||||
query={
|
||||
repo=repo,
|
||||
tag=tag
|
||||
}
|
||||
})
|
||||
local msg = res and res.body and res.body.message or nil
|
||||
luci.http.status(res and res.code or 500, msg or "unknow")
|
||||
luci.http.prepare_content("application/json")
|
||||
|
||||
if res.code == 201 then
|
||||
local tags = res.body.RepoTags
|
||||
luci.http.write_json({tags = tags})
|
||||
else
|
||||
local msg = res and res.body and res.body.message or nil
|
||||
luci.http.write_json({message = msg or "unknow"})
|
||||
end
|
||||
end
|
||||
|
||||
function untag_image(tag)
|
||||
local tag = tag or luci.http.formvalue("tag")
|
||||
|
||||
if not tag then
|
||||
luci.http.status(400, "no tag name")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = "no tag name"})
|
||||
return
|
||||
end
|
||||
|
||||
local dk = docker.new()
|
||||
local res = dk.images:inspect({name = tag})
|
||||
|
||||
if res.code == 200 then
|
||||
local tags = res.body.RepoTags
|
||||
if #tags > 1 then
|
||||
local r = dk.images:remove({name = tag})
|
||||
local msg = r and r.body and r.body.message or nil
|
||||
luci.http.status(r.code, msg)
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = msg})
|
||||
else
|
||||
luci.http.status(500, "Cannot remove the last tag")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = "Cannot remove the last tag"})
|
||||
end
|
||||
else
|
||||
local msg = res and res.body and res.body.message or nil
|
||||
luci.http.status(res and res.code or 500, msg or "unknow")
|
||||
luci.http.prepare_content("application/json")
|
||||
luci.http.write_json({message = msg or "unknow"})
|
||||
end
|
||||
end
|
@ -1,152 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2021 Florian Eckert <fe@dev.tdt.de>
|
||||
Copyright 2021 lisaac <lisaac.cn@gmail.com>
|
||||
]]--
|
||||
|
||||
local uci = (require "luci.model.uci").cursor()
|
||||
|
||||
local m, s, o
|
||||
|
||||
m = Map("dockerd",
|
||||
translate("Docker - Configuration"),
|
||||
translate("DockerMan is a simple docker manager client for LuCI"))
|
||||
|
||||
if nixio.fs.access("/usr/bin/dockerd") and not m.uci:get_bool("dockerd", "dockerman", "remote_endpoint") then
|
||||
s = m:section(NamedSection, "globals", "section", translate("Docker Daemon settings"))
|
||||
|
||||
o = s:option(Flag, "auto_start", translate("Auto start"))
|
||||
o.rmempty = false
|
||||
o.write = function(self, section, value)
|
||||
if value == "1" then
|
||||
luci.util.exec("/etc/init.d/dockerd enable")
|
||||
else
|
||||
luci.util.exec("/etc/init.d/dockerd disable")
|
||||
end
|
||||
m.uci:set("dockerd", "globals", "auto_start", value)
|
||||
end
|
||||
|
||||
o = s:option(Value, "data_root",
|
||||
translate("Docker Root Dir"))
|
||||
o.placeholder = "/opt/docker/"
|
||||
o:depends("remote_endpoint", 0)
|
||||
|
||||
o = s:option(Value, "bip",
|
||||
translate("Default bridge"),
|
||||
translate("Configure the default bridge network"))
|
||||
o.placeholder = "172.17.0.1/16"
|
||||
o.datatype = "ipaddr"
|
||||
o:depends("remote_endpoint", 0)
|
||||
|
||||
o = s:option(DynamicList, "registry_mirrors",
|
||||
translate("Registry Mirrors"),
|
||||
translate("It replaces the daemon registry mirrors with a new set of registry mirrors"))
|
||||
o:value("https://hub-mirror.c.163.com", "https://hub-mirror.c.163.com")
|
||||
o:depends("remote_endpoint", 0)
|
||||
o.forcewrite = true
|
||||
|
||||
o = s:option(ListValue, "log_level",
|
||||
translate("Log Level"),
|
||||
translate('Set the logging level'))
|
||||
o:value("debug", translate("Debug"))
|
||||
o:value("", translate("Info")) -- This is the default debug level from the deamon is optin is not set
|
||||
o:value("warn", translate("Warning"))
|
||||
o:value("error", translate("Error"))
|
||||
o:value("fatal", translate("Fatal"))
|
||||
o.rmempty = true
|
||||
o:depends("remote_endpoint", 0)
|
||||
|
||||
o = s:option(DynamicList, "hosts",
|
||||
translate("Client connection"),
|
||||
translate('Specifies where the Docker daemon will listen for client connections (default: unix:///var/run/docker.sock)'))
|
||||
o:value("unix:///var/run/docker.sock", "unix:///var/run/docker.sock")
|
||||
o:value("tcp://0.0.0.0:2375", "tcp://0.0.0.0:2375")
|
||||
o.rmempty = true
|
||||
o:depends("remote_endpoint", 0)
|
||||
end
|
||||
|
||||
s = m:section(NamedSection, "dockerman", "section", translate("DockerMan settings"))
|
||||
s:tab("ac", translate("Access Control"))
|
||||
s:tab("dockerman", translate("DockerMan"))
|
||||
|
||||
o = s:taboption("dockerman", Flag, "remote_endpoint",
|
||||
translate("Remote Endpoint"),
|
||||
translate("Connect to remote docker endpoint"))
|
||||
o.rmempty = false
|
||||
o.validate = function(self, value, sid)
|
||||
local res = luci.http.formvaluetable("cbid.dockerd")
|
||||
if res["dockerman.remote_endpoint"] == "1" then
|
||||
if res["dockerman.remote_port"] and res["dockerman.remote_port"] ~= "" and res["dockerman.remote_host"] and res["dockerman.remote_host"] ~= "" then
|
||||
return 1
|
||||
else
|
||||
return nil, translate("Please input the PORT or HOST IP of remote docker instance!")
|
||||
end
|
||||
else
|
||||
if not res["dockerman.socket_path"] then
|
||||
return nil, translate("Please input the SOCKET PATH of docker daemon!")
|
||||
end
|
||||
end
|
||||
return 0
|
||||
end
|
||||
|
||||
o = s:taboption("dockerman", Value, "socket_path",
|
||||
translate("Docker Socket Path"))
|
||||
o.default = "/var/run/docker.sock"
|
||||
o.placeholder = "/var/run/docker.sock"
|
||||
o:depends("remote_endpoint", 0)
|
||||
|
||||
o = s:taboption("dockerman", Value, "remote_host",
|
||||
translate("Remote Host"),
|
||||
translate("Host or IP Address for the connection to a remote docker instance"))
|
||||
o.datatype = "host"
|
||||
o.placeholder = "10.1.1.2"
|
||||
o:depends("remote_endpoint", 1)
|
||||
|
||||
o = s:taboption("dockerman", Value, "remote_port",
|
||||
translate("Remote Port"))
|
||||
o.placeholder = "2375"
|
||||
o.datatype = "port"
|
||||
o:depends("remote_endpoint", 1)
|
||||
|
||||
-- o = s:taboption("dockerman", Value, "status_path", translate("Action Status Tempfile Path"), translate("Where you want to save the docker status file"))
|
||||
-- o = s:taboption("dockerman", Flag, "debug", translate("Enable Debug"), translate("For debug, It shows all docker API actions of luci-app-dockerman in Debug Tempfile Path"))
|
||||
-- o.enabled="true"
|
||||
-- o.disabled="false"
|
||||
-- o = s:taboption("dockerman", Value, "debug_path", translate("Debug Tempfile Path"), translate("Where you want to save the debug tempfile"))
|
||||
|
||||
if nixio.fs.access("/usr/bin/dockerd") and not m.uci:get_bool("dockerd", "dockerman", "remote_endpoint") then
|
||||
o = s:taboption("ac", DynamicList, "ac_allowed_interface", translate("Allowed access interfaces"), translate("Which interface(s) can access containers under the bridge network, fill-in Interface Name"))
|
||||
local interfaces = luci.sys and luci.sys.net and luci.sys.net.devices() or {}
|
||||
for i, v in ipairs(interfaces) do
|
||||
o:value(v, v)
|
||||
end
|
||||
o = s:taboption("ac", DynamicList, "ac_allowed_ports", translate("Ports allowed to be accessed"), translate("Which Port(s) can be accessed, it's not restricted by the Allowed Access interfaces configuration. Use this configuration with caution!"))
|
||||
o.placeholder = "8080/tcp"
|
||||
local docker = require "luci.model.docker"
|
||||
local containers, res, lost_state
|
||||
local dk = docker.new()
|
||||
if dk:_ping().code ~= 200 then
|
||||
lost_state = true
|
||||
else
|
||||
lost_state = false
|
||||
res = dk.containers:list()
|
||||
if res and res.code and res.code < 300 then
|
||||
containers = res.body
|
||||
end
|
||||
end
|
||||
|
||||
-- allowed_container.placeholder = "container name_or_id"
|
||||
if containers then
|
||||
for i, v in ipairs(containers) do
|
||||
if v.State == "running" and v.Ports then
|
||||
for _, port in ipairs(v.Ports) do
|
||||
if port.PublicPort and port.IP and not string.find(port.IP,":") then
|
||||
o:value(port.PublicPort.."/"..port.Type, v.Names[1]:sub(2) .. " | " .. port.PublicPort .. " | " .. port.Type)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return m
|
@ -1,810 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2019 lisaac <https://github.com/lisaac/luci-app-dockerman>
|
||||
]]--
|
||||
|
||||
require "luci.util"
|
||||
|
||||
local docker = require "luci.model.docker"
|
||||
local dk = docker.new()
|
||||
|
||||
container_id = arg[1]
|
||||
local action = arg[2] or "info"
|
||||
|
||||
local m, s, o
|
||||
local images, networks, container_info, res
|
||||
|
||||
if not container_id then
|
||||
return
|
||||
end
|
||||
|
||||
res = dk.containers:inspect({id = container_id})
|
||||
if res.code < 300 then
|
||||
container_info = res.body
|
||||
else
|
||||
return
|
||||
end
|
||||
|
||||
local get_ports = function(d)
|
||||
local data
|
||||
|
||||
if d.HostConfig and d.HostConfig.PortBindings then
|
||||
for inter, out in pairs(d.HostConfig.PortBindings) do
|
||||
data = (data and (data .. "<br>") or "") .. out[1]["HostPort"] .. ":" .. inter
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_env = function(d)
|
||||
local data
|
||||
|
||||
if d.Config and d.Config.Env then
|
||||
for _,v in ipairs(d.Config.Env) do
|
||||
data = (data and (data .. "<br>") or "") .. v
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_command = function(d)
|
||||
local data
|
||||
|
||||
if d.Config and d.Config.Cmd then
|
||||
for _,v in ipairs(d.Config.Cmd) do
|
||||
data = (data and (data .. " ") or "") .. v
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_mounts = function(d)
|
||||
local data
|
||||
|
||||
if d.Mounts then
|
||||
for _,v in ipairs(d.Mounts) do
|
||||
local v_sorce_d, v_dest_d
|
||||
local v_sorce = ""
|
||||
local v_dest = ""
|
||||
for v_sorce_d in v["Source"]:gmatch('[^/]+') do
|
||||
if v_sorce_d and #v_sorce_d > 12 then
|
||||
v_sorce = v_sorce .. "/" .. v_sorce_d:sub(1,12) .. "..."
|
||||
else
|
||||
v_sorce = v_sorce .."/".. v_sorce_d
|
||||
end
|
||||
end
|
||||
for v_dest_d in v["Destination"]:gmatch('[^/]+') do
|
||||
if v_dest_d and #v_dest_d > 12 then
|
||||
v_dest = v_dest .. "/" .. v_dest_d:sub(1,12) .. "..."
|
||||
else
|
||||
v_dest = v_dest .."/".. v_dest_d
|
||||
end
|
||||
end
|
||||
data = (data and (data .. "<br>") or "") .. v_sorce .. ":" .. v["Destination"] .. (v["Mode"] ~= "" and (":" .. v["Mode"]) or "")
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_device = function(d)
|
||||
local data
|
||||
|
||||
if d.HostConfig and d.HostConfig.Devices then
|
||||
for _,v in ipairs(d.HostConfig.Devices) do
|
||||
data = (data and (data .. "<br>") or "") .. v["PathOnHost"] .. ":" .. v["PathInContainer"] .. (v["CgroupPermissions"] ~= "" and (":" .. v["CgroupPermissions"]) or "")
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_links = function(d)
|
||||
local data
|
||||
|
||||
if d.HostConfig and d.HostConfig.Links then
|
||||
for _,v in ipairs(d.HostConfig.Links) do
|
||||
data = (data and (data .. "<br>") or "") .. v
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_tmpfs = function(d)
|
||||
local data
|
||||
|
||||
if d.HostConfig and d.HostConfig.Tmpfs then
|
||||
for k, v in pairs(d.HostConfig.Tmpfs) do
|
||||
data = (data and (data .. "<br>") or "") .. k .. (v~="" and ":" or "")..v
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_dns = function(d)
|
||||
local data
|
||||
|
||||
if d.HostConfig and d.HostConfig.Dns then
|
||||
for _, v in ipairs(d.HostConfig.Dns) do
|
||||
data = (data and (data .. "<br>") or "") .. v
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_sysctl = function(d)
|
||||
local data
|
||||
|
||||
if d.HostConfig and d.HostConfig.Sysctls then
|
||||
for k, v in pairs(d.HostConfig.Sysctls) do
|
||||
data = (data and (data .. "<br>") or "") .. k..":"..v
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local get_networks = function(d)
|
||||
local data={}
|
||||
|
||||
if d.NetworkSettings and d.NetworkSettings.Networks and type(d.NetworkSettings.Networks) == "table" then
|
||||
for k,v in pairs(d.NetworkSettings.Networks) do
|
||||
data[k] = v.IPAddress or ""
|
||||
end
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
|
||||
local start_stop_remove = function(m, cmd)
|
||||
local res
|
||||
|
||||
docker:clear_status()
|
||||
docker:append_status("Containers: " .. cmd .. " " .. container_id .. "...")
|
||||
|
||||
if cmd ~= "upgrade" then
|
||||
res = dk.containers[cmd](dk, {id = container_id})
|
||||
else
|
||||
res = dk.containers_upgrade(dk, {id = container_id})
|
||||
end
|
||||
|
||||
if res and res.code >= 300 then
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body.message and res.body.message or res.message))
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/container/"..container_id))
|
||||
else
|
||||
docker:clear_status()
|
||||
if cmd ~= "remove" and cmd ~= "upgrade" then
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/container/"..container_id))
|
||||
else
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/containers"))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
m=SimpleForm("docker",
|
||||
translatef("Docker - Container (%s)", container_info.Name:sub(2)),
|
||||
translate("On this page, the selected container can be managed."))
|
||||
m.redirect = luci.dispatcher.build_url("admin/docker/containers")
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
s.template = "dockerman/apply_widget"
|
||||
s.err=docker:read_status()
|
||||
s.err=s.err and s.err:gsub("\n","<br>"):gsub(" "," ")
|
||||
if s.err then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
s = m:section(Table,{{}})
|
||||
s.notitle=true
|
||||
s.rowcolors=false
|
||||
s.template = "cbi/nullsection"
|
||||
|
||||
o = s:option(Button, "_start")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Start")
|
||||
o.inputstyle = "apply"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"start")
|
||||
end
|
||||
|
||||
o = s:option(Button, "_restart")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Restart")
|
||||
o.inputstyle = "reload"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"restart")
|
||||
end
|
||||
|
||||
o = s:option(Button, "_stop")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Stop")
|
||||
o.inputstyle = "reset"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"stop")
|
||||
end
|
||||
|
||||
o = s:option(Button, "_kill")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Kill")
|
||||
o.inputstyle = "reset"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"kill")
|
||||
end
|
||||
|
||||
o = s:option(Button, "_export")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Export")
|
||||
o.inputstyle = "apply"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/container_export/"..container_id))
|
||||
end
|
||||
|
||||
o = s:option(Button, "_upgrade")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Upgrade")
|
||||
o.inputstyle = "reload"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"upgrade")
|
||||
end
|
||||
|
||||
o = s:option(Button, "_duplicate")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Duplicate/Edit")
|
||||
o.inputstyle = "add"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/newcontainer/duplicate/"..container_id))
|
||||
end
|
||||
|
||||
o = s:option(Button, "_remove")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle=translate("Remove")
|
||||
o.inputstyle = "remove"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"remove")
|
||||
end
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
s.template = "dockerman/container"
|
||||
|
||||
if action == "info" then
|
||||
res = dk.networks:list()
|
||||
if res.code < 300 then
|
||||
networks = res.body
|
||||
else
|
||||
return
|
||||
end
|
||||
m.submit = false
|
||||
m.reset = false
|
||||
table_info = {
|
||||
["01name"] = {
|
||||
_key = translate("Name"),
|
||||
_value = container_info.Name:sub(2) or "-",
|
||||
_button=translate("Update")
|
||||
},
|
||||
["02id"] = {
|
||||
_key = translate("ID"),
|
||||
_value = container_info.Id or "-"
|
||||
},
|
||||
["03image"] = {
|
||||
_key = translate("Image"),
|
||||
_value = container_info.Config.Image .. "<br>" .. container_info.Image
|
||||
},
|
||||
["04status"] = {
|
||||
_key = translate("Status"),
|
||||
_value = container_info.State and container_info.State.Status or "-"
|
||||
},
|
||||
["05created"] = {
|
||||
_key = translate("Created"),
|
||||
_value = container_info.Created or "-"
|
||||
},
|
||||
}
|
||||
|
||||
if container_info.State.Status == "running" then
|
||||
table_info["06start"] = {
|
||||
_key = translate("Start Time"),
|
||||
_value = container_info.State and container_info.State.StartedAt or "-"
|
||||
}
|
||||
else
|
||||
table_info["06start"] = {
|
||||
_key = translate("Finish Time"),
|
||||
_value = container_info.State and container_info.State.FinishedAt or "-"
|
||||
}
|
||||
end
|
||||
|
||||
table_info["07healthy"] = {
|
||||
_key = translate("Healthy"),
|
||||
_value = container_info.State and container_info.State.Health and container_info.State.Health.Status or "-"
|
||||
}
|
||||
table_info["08restart"] = {
|
||||
_key = translate("Restart Policy"),
|
||||
_value = container_info.HostConfig and container_info.HostConfig.RestartPolicy and container_info.HostConfig.RestartPolicy.Name or "-",
|
||||
_button=translate("Update")
|
||||
}
|
||||
table_info["081user"] = {
|
||||
_key = translate("User"),
|
||||
_value = container_info.Config and (container_info.Config.User ~="" and container_info.Config.User or "-") or "-"
|
||||
}
|
||||
table_info["09mount"] = {
|
||||
_key = translate("Mount/Volume"),
|
||||
_value = get_mounts(container_info) or "-"
|
||||
}
|
||||
table_info["10cmd"] = {
|
||||
_key = translate("Command"),
|
||||
_value = get_command(container_info) or "-"
|
||||
}
|
||||
table_info["11env"] = {
|
||||
_key = translate("Env"),
|
||||
_value = get_env(container_info) or "-"
|
||||
}
|
||||
table_info["12ports"] = {
|
||||
_key = translate("Ports"),
|
||||
_value = get_ports(container_info) or "-"
|
||||
}
|
||||
table_info["13links"] = {
|
||||
_key = translate("Links"),
|
||||
_value = get_links(container_info) or "-"
|
||||
}
|
||||
table_info["14device"] = {
|
||||
_key = translate("Device"),
|
||||
_value = get_device(container_info) or "-"
|
||||
}
|
||||
table_info["15tmpfs"] = {
|
||||
_key = translate("Tmpfs"),
|
||||
_value = get_tmpfs(container_info) or "-"
|
||||
}
|
||||
table_info["16dns"] = {
|
||||
_key = translate("DNS"),
|
||||
_value = get_dns(container_info) or "-"
|
||||
}
|
||||
table_info["17sysctl"] = {
|
||||
_key = translate("Sysctl"),
|
||||
_value = get_sysctl(container_info) or "-"
|
||||
}
|
||||
|
||||
info_networks = get_networks(container_info)
|
||||
list_networks = {}
|
||||
for _, v in ipairs (networks) do
|
||||
if v and v.Name then
|
||||
local parent = v.Options and v.Options.parent or nil
|
||||
local ip = v.IPAM and v.IPAM.Config and v.IPAM.Config[1] and v.IPAM.Config[1].Subnet or nil
|
||||
ipv6 = v.IPAM and v.IPAM.Config and v.IPAM.Config[2] and v.IPAM.Config[2].Subnet or nil
|
||||
local network_name = v.Name .. " | " .. v.Driver .. (parent and (" | " .. parent) or "") .. (ip and (" | " .. ip) or "").. (ipv6 and (" | " .. ipv6) or "")
|
||||
list_networks[v.Name] = network_name
|
||||
end
|
||||
end
|
||||
|
||||
if type(info_networks)== "table" then
|
||||
for k,v in pairs(info_networks) do
|
||||
table_info["14network"..k] = {
|
||||
_key = translate("Network"),
|
||||
_value = k.. (v~="" and (" | ".. v) or ""),
|
||||
_button=translate("Disconnect")
|
||||
}
|
||||
list_networks[k]=nil
|
||||
end
|
||||
end
|
||||
|
||||
table_info["15connect"] = {
|
||||
_key = translate("Connect Network"),
|
||||
_value = list_networks ,_opts = "",
|
||||
_button=translate("Connect")
|
||||
}
|
||||
|
||||
s = m:section(Table,table_info)
|
||||
s.nodescr=true
|
||||
s.formvalue=function(self, section)
|
||||
return table_info
|
||||
end
|
||||
|
||||
o = s:option(DummyValue, "_key", translate("Info"))
|
||||
o.width = "20%"
|
||||
|
||||
o = s:option(ListValue, "_value")
|
||||
o.render = function(self, section, scope)
|
||||
if table_info[section]._key == translate("Name") then
|
||||
self:reset_values()
|
||||
self.template = "cbi/value"
|
||||
self.size = 30
|
||||
self.keylist = {}
|
||||
self.vallist = {}
|
||||
self.default=table_info[section]._value
|
||||
Value.render(self, section, scope)
|
||||
elseif table_info[section]._key == translate("Restart Policy") then
|
||||
self.template = "cbi/lvalue"
|
||||
self:reset_values()
|
||||
self.size = nil
|
||||
self:value("no", "No")
|
||||
self:value("unless-stopped", "Unless stopped")
|
||||
self:value("always", "Always")
|
||||
self:value("on-failure", "On failure")
|
||||
self.default=table_info[section]._value
|
||||
ListValue.render(self, section, scope)
|
||||
elseif table_info[section]._key == translate("Connect Network") then
|
||||
self.template = "cbi/lvalue"
|
||||
self:reset_values()
|
||||
self.size = nil
|
||||
for k,v in pairs(list_networks) do
|
||||
if k ~= "host" then
|
||||
self:value(k,v)
|
||||
end
|
||||
end
|
||||
self.default=table_info[section]._value
|
||||
ListValue.render(self, section, scope)
|
||||
else
|
||||
self:reset_values()
|
||||
self.rawhtml=true
|
||||
self.template = "cbi/dvalue"
|
||||
self.default=table_info[section]._value
|
||||
DummyValue.render(self, section, scope)
|
||||
end
|
||||
end
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section, value)
|
||||
table_info[section]._value=value
|
||||
end
|
||||
o.validate = function(self, value)
|
||||
return value
|
||||
end
|
||||
|
||||
o = s:option(Value, "_opts")
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section, value)
|
||||
table_info[section]._opts=value
|
||||
end
|
||||
o.validate = function(self, value)
|
||||
return value
|
||||
end
|
||||
o.render = function(self, section, scope)
|
||||
if table_info[section]._key==translate("Connect Network") then
|
||||
self.template = "cbi/value"
|
||||
self.keylist = {}
|
||||
self.vallist = {}
|
||||
self.placeholder = "10.1.1.254"
|
||||
self.datatype = "ip4addr"
|
||||
self.default=table_info[section]._opts
|
||||
Value.render(self, section, scope)
|
||||
else
|
||||
self.rawhtml=true
|
||||
self.template = "cbi/dvalue"
|
||||
self.default=table_info[section]._opts
|
||||
DummyValue.render(self, section, scope)
|
||||
end
|
||||
end
|
||||
|
||||
o = s:option(Button, "_button")
|
||||
o.forcewrite = true
|
||||
o.render = function(self, section, scope)
|
||||
if table_info[section]._button and table_info[section]._value ~= nil then
|
||||
self.inputtitle=table_info[section]._button
|
||||
self.template = "cbi/button"
|
||||
self.inputstyle = "edit"
|
||||
Button.render(self, section, scope)
|
||||
else
|
||||
self.template = "cbi/dvalue"
|
||||
self.default=""
|
||||
DummyValue.render(self, section, scope)
|
||||
end
|
||||
end
|
||||
o.write = function(self, section, value)
|
||||
local res
|
||||
|
||||
docker:clear_status()
|
||||
|
||||
if section == "01name" then
|
||||
docker:append_status("Containers: rename " .. container_id .. "...")
|
||||
local new_name = table_info[section]._value
|
||||
res = dk.containers:rename({
|
||||
id = container_id,
|
||||
query = {
|
||||
name=new_name
|
||||
}
|
||||
})
|
||||
elseif section == "08restart" then
|
||||
docker:append_status("Containers: update " .. container_id .. "...")
|
||||
local new_restart = table_info[section]._value
|
||||
res = dk.containers:update({
|
||||
id = container_id,
|
||||
body = {
|
||||
RestartPolicy = {
|
||||
Name = new_restart
|
||||
}
|
||||
}
|
||||
})
|
||||
elseif table_info[section]._key == translate("Network") then
|
||||
local _,_,leave_network
|
||||
|
||||
_, _, leave_network = table_info[section]._value:find("(.-) | .+")
|
||||
leave_network = leave_network or table_info[section]._value
|
||||
docker:append_status("Network: disconnect " .. leave_network .. container_id .. "...")
|
||||
res = dk.networks:disconnect({
|
||||
name = leave_network,
|
||||
body = {
|
||||
Container = container_id
|
||||
}
|
||||
})
|
||||
elseif section == "15connect" then
|
||||
local connect_network = table_info[section]._value
|
||||
local network_opiton
|
||||
if connect_network ~= "none"
|
||||
and connect_network ~= "bridge"
|
||||
and connect_network ~= "host" then
|
||||
|
||||
network_opiton = table_info[section]._opts ~= "" and {
|
||||
IPAMConfig={
|
||||
IPv4Address=table_info[section]._opts
|
||||
}
|
||||
} or nil
|
||||
end
|
||||
docker:append_status("Network: connect " .. connect_network .. container_id .. "...")
|
||||
res = dk.networks:connect({
|
||||
name = connect_network,
|
||||
body = {
|
||||
Container = container_id,
|
||||
EndpointConfig= network_opiton
|
||||
}
|
||||
})
|
||||
end
|
||||
|
||||
if res and res.code > 300 then
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body.message and res.body.message or res.message))
|
||||
else
|
||||
docker:clear_status()
|
||||
end
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/container/"..container_id.."/info"))
|
||||
end
|
||||
elseif action == "resources" then
|
||||
s = m:section(SimpleSection)
|
||||
o = s:option( Value, "cpus",
|
||||
translate("CPUs"),
|
||||
translate("Number of CPUs. Number is a fractional number. 0.000 means no limit."))
|
||||
o.placeholder = "1.5"
|
||||
o.rmempty = true
|
||||
o.datatype="ufloat"
|
||||
o.default = container_info.HostConfig.NanoCpus / (10^9)
|
||||
|
||||
o = s:option(Value, "cpushares",
|
||||
translate("CPU Shares Weight"),
|
||||
translate("CPU shares relative weight, if 0 is set, the system will ignore the value and use the default of 1024."))
|
||||
o.placeholder = "1024"
|
||||
o.rmempty = true
|
||||
o.datatype="uinteger"
|
||||
o.default = container_info.HostConfig.CpuShares
|
||||
|
||||
o = s:option(Value, "memory",
|
||||
translate("Memory"),
|
||||
translate("Memory limit (format: <number>[<unit>]). Number is a positive integer. Unit can be one of b, k, m, or g. Minimum is 4M."))
|
||||
o.placeholder = "128m"
|
||||
o.rmempty = true
|
||||
o.default = container_info.HostConfig.Memory ~=0 and ((container_info.HostConfig.Memory / 1024 /1024) .. "M") or 0
|
||||
|
||||
o = s:option(Value, "blkioweight",
|
||||
translate("Block IO Weight"),
|
||||
translate("Block IO weight (relative weight) accepts a weight value between 10 and 1000."))
|
||||
o.placeholder = "500"
|
||||
o.rmempty = true
|
||||
o.datatype="uinteger"
|
||||
o.default = container_info.HostConfig.BlkioWeight
|
||||
|
||||
m.handle = function(self, state, data)
|
||||
if state == FORM_VALID then
|
||||
local memory = data.memory
|
||||
if memory and memory ~= 0 then
|
||||
_,_,n,unit = memory:find("([%d%.]+)([%l%u]+)")
|
||||
if n then
|
||||
unit = unit and unit:sub(1,1):upper() or "B"
|
||||
if unit == "M" then
|
||||
memory = tonumber(n) * 1024 * 1024
|
||||
elseif unit == "G" then
|
||||
memory = tonumber(n) * 1024 * 1024 * 1024
|
||||
elseif unit == "K" then
|
||||
memory = tonumber(n) * 1024
|
||||
else
|
||||
memory = tonumber(n)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
request_body = {
|
||||
BlkioWeight = tonumber(data.blkioweight),
|
||||
NanoCPUs = tonumber(data.cpus)*10^9,
|
||||
Memory = tonumber(memory),
|
||||
CpuShares = tonumber(data.cpushares)
|
||||
}
|
||||
|
||||
docker:write_status("Containers: update " .. container_id .. "...")
|
||||
local res = dk.containers:update({id = container_id, body = request_body})
|
||||
if res and res.code >= 300 then
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body.message and res.body.message or res.message))
|
||||
else
|
||||
docker:clear_status()
|
||||
end
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/container/"..container_id.."/resources"))
|
||||
end
|
||||
end
|
||||
|
||||
elseif action == "file" then
|
||||
m.submit = false
|
||||
m.reset = false
|
||||
s= m:section(SimpleSection)
|
||||
s.template = "dockerman/container_file_manager"
|
||||
s.container = container_id
|
||||
m.redirect = nil
|
||||
elseif action == "inspect" then
|
||||
s = m:section(SimpleSection)
|
||||
s.syslog = luci.jsonc.stringify(container_info, true)
|
||||
s.title = translate("Container Inspect")
|
||||
s.template = "dockerman/logs"
|
||||
m.submit = false
|
||||
m.reset = false
|
||||
elseif action == "logs" then
|
||||
local logs = ""
|
||||
local query ={
|
||||
stdout = 1,
|
||||
stderr = 1,
|
||||
tail = 1000
|
||||
}
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
|
||||
logs = dk.containers:logs({id = container_id, query = query})
|
||||
if logs.code == 200 then
|
||||
s.syslog=logs.body
|
||||
else
|
||||
s.syslog="Get Logs ERROR\n"..logs.code..": "..logs.body
|
||||
end
|
||||
|
||||
s.title=translate("Container Logs")
|
||||
s.template = "dockerman/logs"
|
||||
m.submit = false
|
||||
m.reset = false
|
||||
elseif action == "console" then
|
||||
m.submit = false
|
||||
m.reset = false
|
||||
local cmd_docker = luci.util.exec("command -v docker"):match("^.+docker") or nil
|
||||
local cmd_ttyd = luci.util.exec("command -v ttyd"):match("^.+ttyd") or nil
|
||||
|
||||
if cmd_docker and cmd_ttyd and container_info.State.Status == "running" then
|
||||
local cmd = "/bin/sh"
|
||||
local uid
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
|
||||
o = s:option(Value, "command", translate("Command"))
|
||||
o:value("/bin/sh", "/bin/sh")
|
||||
o:value("/bin/ash", "/bin/ash")
|
||||
o:value("/bin/bash", "/bin/bash")
|
||||
o.default = "/bin/sh"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section, value)
|
||||
cmd = value
|
||||
end
|
||||
|
||||
o = s:option(Value, "uid", translate("UID"))
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section, value)
|
||||
uid = value
|
||||
end
|
||||
|
||||
o = s:option(Button, "connect")
|
||||
o.render = function(self, section, scope)
|
||||
self.inputstyle = "add"
|
||||
self.title = " "
|
||||
self.inputtitle = translate("Connect")
|
||||
Button.render(self, section, scope)
|
||||
end
|
||||
o.write = function(self, section)
|
||||
local cmd_docker = luci.util.exec("command -v docker"):match("^.+docker") or nil
|
||||
local cmd_ttyd = luci.util.exec("command -v ttyd"):match("^.+ttyd") or nil
|
||||
|
||||
if not cmd_docker or not cmd_ttyd or cmd_docker:match("^%s+$") or cmd_ttyd:match("^%s+$") then
|
||||
return
|
||||
end
|
||||
|
||||
local ttyd_ssl = uci.get("ttyd", "@ttyd[0]", "ssl")
|
||||
local ttyd_ssl_key = uci.get("ttyd", "@ttyd[0]", "ssl_key")
|
||||
local ttyd_ssl_cert = uci.get("ttyd", "@ttyd[0]", "ssl_cert")
|
||||
|
||||
if ttyd_ssl=="1" and ttyd_ssl_cert and ttyd_ssl_key then
|
||||
cmd_ttyd=string.format('%s -S -C %s -K %s',cmd_ttyd,ttyd_ssl_cert,ttyd_ssl_key)
|
||||
end
|
||||
|
||||
local pid = luci.util.trim(luci.util.exec("netstat -lnpt | grep :7682 | grep ttyd | tr -s ' ' | cut -d ' ' -f7 | cut -d'/' -f1"))
|
||||
if pid and pid ~= "" then
|
||||
luci.util.exec("kill -9 " .. pid)
|
||||
end
|
||||
|
||||
local hosts
|
||||
local uci = (require "luci.model.uci").cursor()
|
||||
local remote = uci:get_bool("dockerd", "dockerman", "remote_endpoint") or false
|
||||
local host = nil
|
||||
local port = nil
|
||||
local socket = nil
|
||||
|
||||
if remote then
|
||||
host = uci:get("dockerd", "dockerman", "remote_host") or nil
|
||||
port = uci:get("dockerd", "dockerman", "remote_port") or nil
|
||||
else
|
||||
socket = uci:get("dockerd", "dockerman", "socket_path") or "/var/run/docker.sock"
|
||||
end
|
||||
|
||||
if remote and host and port then
|
||||
hosts = "tcp://" .. host .. ':'.. port
|
||||
elseif socket then
|
||||
hosts = "unix://" .. socket
|
||||
else
|
||||
return
|
||||
end
|
||||
|
||||
if uid and uid ~= "" then
|
||||
uid = "-u " .. uid
|
||||
else
|
||||
uid = ""
|
||||
end
|
||||
|
||||
local start_cmd = string.format('%s -d 2 --once -p 7682 %s -H "%s" exec -it %s %s %s&', cmd_ttyd, cmd_docker, hosts, uid, container_id, cmd)
|
||||
|
||||
os.execute(start_cmd)
|
||||
|
||||
o = s:option(DummyValue, "console")
|
||||
o.container_id = container_id
|
||||
o.template = "dockerman/container_console"
|
||||
end
|
||||
end
|
||||
elseif action == "stats" then
|
||||
local response = dk.containers:top({id = container_id, query = {ps_args="-aux"}})
|
||||
local container_top
|
||||
|
||||
if response.code == 200 then
|
||||
container_top=response.body
|
||||
else
|
||||
response = dk.containers:top({id = container_id})
|
||||
if response.code == 200 then
|
||||
container_top=response.body
|
||||
end
|
||||
end
|
||||
|
||||
if type(container_top) == "table" then
|
||||
s = m:section(SimpleSection)
|
||||
s.container_id = container_id
|
||||
s.template = "dockerman/container_stats"
|
||||
table_stats = {
|
||||
cpu={
|
||||
key=translate("CPU Useage"),
|
||||
value='-'
|
||||
},
|
||||
memory={
|
||||
key=translate("Memory Useage"),
|
||||
value='-'
|
||||
}
|
||||
}
|
||||
|
||||
container_top = response.body
|
||||
s = m:section(Table, table_stats, translate("Stats"))
|
||||
s:option(DummyValue, "key", translate("Stats")).width="33%"
|
||||
s:option(DummyValue, "value")
|
||||
top_section = m:section(Table, container_top.Processes, translate("TOP"))
|
||||
for i, v in ipairs(container_top.Titles) do
|
||||
top_section:option(DummyValue, i, translate(v))
|
||||
end
|
||||
end
|
||||
|
||||
m.submit = false
|
||||
m.reset = false
|
||||
end
|
||||
|
||||
return m
|
@ -1,284 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2019 lisaac <https://github.com/lisaac/luci-app-dockerman>
|
||||
]]--
|
||||
|
||||
local http = require "luci.http"
|
||||
local docker = require "luci.model.docker"
|
||||
|
||||
local m, s, o
|
||||
local images, networks, containers, res, lost_state
|
||||
local urlencode = luci.http.protocol and luci.http.protocol.urlencode or luci.util.urlencode
|
||||
local dk = docker.new()
|
||||
|
||||
if dk:_ping().code ~= 200 then
|
||||
lost_state = true
|
||||
else
|
||||
res = dk.images:list()
|
||||
if res and res.code and res.code < 300 then
|
||||
images = res.body
|
||||
end
|
||||
|
||||
res = dk.networks:list()
|
||||
if res and res.code and res.code < 300 then
|
||||
networks = res.body
|
||||
end
|
||||
|
||||
res = dk.containers:list({
|
||||
query = {
|
||||
all = true
|
||||
}
|
||||
})
|
||||
if res and res.code and res.code < 300 then
|
||||
containers = res.body
|
||||
end
|
||||
end
|
||||
|
||||
function get_containers()
|
||||
local data = {}
|
||||
if type(containers) ~= "table" then
|
||||
return nil
|
||||
end
|
||||
|
||||
for i, v in ipairs(containers) do
|
||||
local index = (10^12 - v.Created) .. "_id_" .. v.Id
|
||||
|
||||
data[index]={}
|
||||
data[index]["_selected"] = 0
|
||||
data[index]["_id"] = v.Id:sub(1,12)
|
||||
-- data[index]["name"] = v.Names[1]:sub(2)
|
||||
data[index]["_status"] = v.Status
|
||||
|
||||
if v.Status:find("^Up") then
|
||||
data[index]["_name"] = "<font color='green'>"..v.Names[1]:sub(2).."</font>"
|
||||
data[index]["_status"] = "<a href='"..luci.dispatcher.build_url("admin/docker/container/"..v.Id).."/stats'><font color='green'>".. data[index]["_status"] .. "</font>" .. "<br><font color='#9f9f9f' class='container_cpu_status'></font><br><font color='#9f9f9f' class='container_mem_status'></font><br><font color='#9f9f9f' class='container_network_status'></font></a>"
|
||||
else
|
||||
data[index]["_name"] = "<font color='red'>"..v.Names[1]:sub(2).."</font>"
|
||||
data[index]["_status"] = '<font class="container_not_running" color="red">'.. data[index]["_status"] .. "</font>"
|
||||
end
|
||||
|
||||
if (type(v.NetworkSettings) == "table" and type(v.NetworkSettings.Networks) == "table") then
|
||||
for networkname, netconfig in pairs(v.NetworkSettings.Networks) do
|
||||
data[index]["_network"] = (data[index]["_network"] ~= nil and (data[index]["_network"] .." | ") or "").. networkname .. (netconfig.IPAddress ~= "" and (": " .. netconfig.IPAddress) or "")
|
||||
end
|
||||
end
|
||||
|
||||
-- networkmode = v.HostConfig.NetworkMode ~= "default" and v.HostConfig.NetworkMode or "bridge"
|
||||
-- data[index]["_network"] = v.NetworkSettings.Networks[networkmode].IPAddress or nil
|
||||
-- local _, _, image = v.Image:find("^sha256:(.+)")
|
||||
-- if image ~= nil then
|
||||
-- image=image:sub(1,12)
|
||||
-- end
|
||||
|
||||
if v.Ports and next(v.Ports) ~= nil then
|
||||
data[index]["_ports"] = nil
|
||||
local ip = require "luci.ip"
|
||||
for _,v2 in ipairs(v.Ports) do
|
||||
-- display ipv4 only
|
||||
if ip.new(v2.IP or "0.0.0.0"):is4() then
|
||||
data[index]["_ports"] = (data[index]["_ports"] and (data[index]["_ports"] .. ", ") or "")
|
||||
.. ((v2.PublicPort and v2.Type and v2.Type == "tcp") and ('<a href="javascript:void(0);" onclick="window.open((window.location.origin.match(/^(.+):\\d+$/) && window.location.origin.match(/^(.+):\\d+$/)[1] || window.location.origin) + \':\' + '.. v2.PublicPort ..', \'_blank\');">') or "")
|
||||
.. (v2.PublicPort and (v2.PublicPort .. ":") or "") .. (v2.PrivatePort and (v2.PrivatePort .."/") or "") .. (v2.Type and v2.Type or "")
|
||||
.. ((v2.PublicPort and v2.Type and v2.Type == "tcp")and "</a>" or "")
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
for ii,iv in ipairs(images) do
|
||||
if iv.Id == v.ImageID then
|
||||
data[index]["_image"] = iv.RepoTags and iv.RepoTags[1] or (iv.RepoDigests[1]:gsub("(.-)@.+", "%1") .. ":<none>")
|
||||
end
|
||||
end
|
||||
data[index]["_id_name"] = '<a href='..luci.dispatcher.build_url("admin/docker/container/"..v.Id)..' class="dockerman_link" title="'..translate("Container detail")..'">'.. data[index]["_name"] .. "<br><font color='#9f9f9f'>ID: " .. data[index]["_id"]
|
||||
.. "</font></a><br>Image: " .. (data[index]["_image"] or "<none>")
|
||||
.. "<br><font color='#9f9f9f' class='container_size_".. v.Id .."'></font>"
|
||||
|
||||
if type(v.Mounts) == "table" and next(v.Mounts) then
|
||||
for _, v2 in pairs(v.Mounts) do
|
||||
if v2.Type ~= "volume" then
|
||||
local v_sorce_d, v_dest_d
|
||||
local v_sorce = ""
|
||||
local v_dest = ""
|
||||
for v_sorce_d in v2["Source"]:gmatch('[^/]+') do
|
||||
if v_sorce_d and #v_sorce_d > 12 then
|
||||
v_sorce = v_sorce .. "/" .. v_sorce_d:sub(1,8) .. ".."
|
||||
else
|
||||
v_sorce = v_sorce .."/".. v_sorce_d
|
||||
end
|
||||
end
|
||||
for v_dest_d in v2["Destination"]:gmatch('[^/]+') do
|
||||
if v_dest_d and #v_dest_d > 12 then
|
||||
v_dest = v_dest .. "/" .. v_dest_d:sub(1,8) .. ".."
|
||||
else
|
||||
v_dest = v_dest .."/".. v_dest_d
|
||||
end
|
||||
end
|
||||
data[index]["_mounts"] = (data[index]["_mounts"] and (data[index]["_mounts"] .. "<br>") or "") .. '<span title="'.. v2.Source.. "→" .. v2.Destination .. '" ><a href="'..luci.dispatcher.build_url("admin/docker/container/"..v.Id)..'/file?path='..v2["Destination"]..'">' .. v_sorce .. "→" .. v_dest..'</a></span>'
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
data[index]["_image_id"] = v.ImageID:sub(8,20)
|
||||
data[index]["_command"] = v.Command
|
||||
end
|
||||
return data
|
||||
end
|
||||
|
||||
local container_list = not lost_state and get_containers() or {}
|
||||
|
||||
m = SimpleForm("docker",
|
||||
translate("Docker - Containers"),
|
||||
translate("This page displays all containers that have been created on the connected docker host."))
|
||||
m.submit=false
|
||||
m.reset=false
|
||||
m:append(Template("dockerman/containers_running_stats"))
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
s.template = "dockerman/apply_widget"
|
||||
s.err=docker:read_status()
|
||||
s.err=s.err and s.err:gsub("\n","<br>"):gsub(" "," ")
|
||||
if s.err then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
s = m:section(Table, container_list, translate("Containers"))
|
||||
s.nodescr=true
|
||||
s.config="containers"
|
||||
|
||||
o = s:option(Flag, "_selected","")
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 0
|
||||
o.width = "1%"
|
||||
o.write=function(self, section, value)
|
||||
container_list[section]._selected = value
|
||||
end
|
||||
|
||||
-- o = s:option(DummyValue, "_id", translate("ID"))
|
||||
-- o.width="10%"
|
||||
|
||||
-- o = s:option(DummyValue, "_name", translate("Container Name"))
|
||||
-- o.rawhtml = true
|
||||
|
||||
o = s:option(DummyValue, "_id_name", translate("Container Info"))
|
||||
o.rawhtml = true
|
||||
o.width="15%"
|
||||
|
||||
o = s:option(DummyValue, "_status", translate("Status"))
|
||||
o.width="15%"
|
||||
o.rawhtml=true
|
||||
|
||||
o = s:option(DummyValue, "_network", translate("Network"))
|
||||
o.width="10%"
|
||||
|
||||
o = s:option(DummyValue, "_ports", translate("Ports"))
|
||||
o.width="5%"
|
||||
o.rawhtml = true
|
||||
o = s:option(DummyValue, "_mounts", translate("Mounts"))
|
||||
o.width="25%"
|
||||
o.rawhtml = true
|
||||
|
||||
-- o = s:option(DummyValue, "_image", translate("Image"))
|
||||
-- o.width="8%"
|
||||
|
||||
o = s:option(DummyValue, "_command", translate("Command"))
|
||||
o.width="15%"
|
||||
|
||||
local start_stop_remove = function(m, cmd)
|
||||
local container_selected = {}
|
||||
-- 遍历table中sectionid
|
||||
for k in pairs(container_list) do
|
||||
-- 得到选中项的名字
|
||||
if container_list[k]._selected == 1 then
|
||||
container_selected[#container_selected + 1] = container_list[k]["_id"]
|
||||
end
|
||||
end
|
||||
if #container_selected > 0 then
|
||||
local success = true
|
||||
|
||||
docker:clear_status()
|
||||
for _, cont in ipairs(container_selected) do
|
||||
docker:append_status("Containers: " .. cmd .. " " .. cont .. "...")
|
||||
local res = dk.containers[cmd](dk, {id = cont})
|
||||
if res and res.code and res.code >= 300 then
|
||||
success = false
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body.message and res.body.message or res.message).. "\n")
|
||||
else
|
||||
docker:append_status("done\n")
|
||||
end
|
||||
end
|
||||
|
||||
if success then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/containers"))
|
||||
end
|
||||
end
|
||||
|
||||
s = m:section(Table,{{}})
|
||||
s.notitle=true
|
||||
s.rowcolors=false
|
||||
s.template="cbi/nullsection"
|
||||
|
||||
o = s:option(Button, "_new")
|
||||
o.inputtitle = translate("Add")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputstyle = "add"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/newcontainer"))
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
o = s:option(Button, "_start")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle = translate("Start")
|
||||
o.inputstyle = "apply"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"start")
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
o = s:option(Button, "_restart")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle = translate("Restart")
|
||||
o.inputstyle = "reload"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"restart")
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
o = s:option(Button, "_stop")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle = translate("Stop")
|
||||
o.inputstyle = "reset"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"stop")
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
o = s:option(Button, "_kill")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle = translate("Kill")
|
||||
o.inputstyle = "reset"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m,"kill")
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
o = s:option(Button, "_remove")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputtitle = translate("Remove")
|
||||
o.inputstyle = "remove"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
start_stop_remove(m, "remove")
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
return m
|
@ -1,284 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2019 lisaac <https://github.com/lisaac/luci-app-dockerman>
|
||||
]]--
|
||||
|
||||
local docker = require "luci.model.docker"
|
||||
local dk = docker.new()
|
||||
|
||||
local containers, images, res, lost_state
|
||||
local m, s, o
|
||||
|
||||
if dk:_ping().code ~= 200 then
|
||||
lost_state = true
|
||||
else
|
||||
res = dk.images:list()
|
||||
if res and res.code and res.code < 300 then
|
||||
images = res.body
|
||||
end
|
||||
|
||||
res = dk.containers:list({ query = { all = true } })
|
||||
if res and res.code and res.code < 300 then
|
||||
containers = res.body
|
||||
end
|
||||
end
|
||||
|
||||
function get_images()
|
||||
local data = {}
|
||||
|
||||
for i, v in ipairs(images) do
|
||||
local index = v.Created .. v.Id
|
||||
|
||||
data[index]={}
|
||||
data[index]["_selected"] = 0
|
||||
data[index]["id"] = v.Id:sub(8)
|
||||
data[index]["_id"] = '<a href="javascript:new_tag(\''..v.Id:sub(8,20)..'\')" class="dockerman-link" title="'..translate("New tag")..'">' .. v.Id:sub(8,20) .. '</a>'
|
||||
|
||||
if v.RepoTags and next(v.RepoTags)~=nil then
|
||||
for i, v1 in ipairs(v.RepoTags) do
|
||||
data[index]["_tags"] =(data[index]["_tags"] and ( data[index]["_tags"] .. "<br>" )or "") .. ((v1:match("<none>") or (#v.RepoTags == 1)) and v1 or ('<a href="javascript:un_tag(\''..v1..'\')" class="dockerman_link" title="'..translate("Remove tag")..'" >' .. v1 .. '</a>'))
|
||||
|
||||
if not data[index]["tag"] then
|
||||
data[index]["tag"] = v1
|
||||
end
|
||||
end
|
||||
else
|
||||
data[index]["_tags"] = v.RepoDigests[1] and v.RepoDigests[1]:match("^(.-)@.+")
|
||||
data[index]["_tags"] = (data[index]["_tags"] and data[index]["_tags"] or "<none>" ).. ":<none>"
|
||||
end
|
||||
|
||||
data[index]["_tags"] = data[index]["_tags"]:gsub("<none>","<none>")
|
||||
for ci,cv in ipairs(containers) do
|
||||
if v.Id == cv.ImageID then
|
||||
data[index]["_containers"] = (data[index]["_containers"] and (data[index]["_containers"] .. " | ") or "")..
|
||||
'<a href='..luci.dispatcher.build_url("admin/docker/container/"..cv.Id)..' class="dockerman_link" title="'..translate("Container detail")..'">'.. cv.Names[1]:sub(2).."</a>"
|
||||
end
|
||||
end
|
||||
|
||||
data[index]["_size"] = string.format("%.2f", tostring(v.Size/1024/1024)).."MB"
|
||||
data[index]["_created"] = os.date("%Y/%m/%d %H:%M:%S",v.Created)
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local image_list = not lost_state and get_images() or {}
|
||||
|
||||
m = SimpleForm("docker",
|
||||
translate("Docker - Images"),
|
||||
translate("On this page all images are displayed that are available on the system and with which a container can be created."))
|
||||
m.submit=false
|
||||
m.reset=false
|
||||
|
||||
local pull_value={
|
||||
_image_tag_name="",
|
||||
_registry="index.docker.io"
|
||||
}
|
||||
|
||||
s = m:section(SimpleSection,
|
||||
translate("Pull Image"),
|
||||
translate("By entering a valid image name with the corresponding version, the docker image can be downloaded from the configured registry."))
|
||||
s.template="cbi/nullsection"
|
||||
|
||||
o = s:option(Value, "_image_tag_name")
|
||||
o.template = "dockerman/cbi/inlinevalue"
|
||||
o.placeholder="lisaac/luci:latest"
|
||||
o.write = function(self, section, value)
|
||||
local hastag = value:find(":")
|
||||
|
||||
if not hastag then
|
||||
value = value .. ":latest"
|
||||
end
|
||||
pull_value["_image_tag_name"] = value
|
||||
end
|
||||
|
||||
o = s:option(Button, "_pull")
|
||||
o.inputtitle= translate("Pull")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputstyle = "add"
|
||||
o.disable = lost_state
|
||||
o.write = function(self, section)
|
||||
local tag = pull_value["_image_tag_name"]
|
||||
local json_stringify = luci.jsonc and luci.jsonc.stringify
|
||||
|
||||
if tag and tag ~= "" then
|
||||
docker:write_status("Images: " .. "pulling" .. " " .. tag .. "...\n")
|
||||
local res = dk.images:create({query = {fromImage=tag}}, docker.pull_image_show_status_cb)
|
||||
|
||||
if res and res.code and res.code == 200 and (res.body[#res.body] and not res.body[#res.body].error and res.body[#res.body].status and (res.body[#res.body].status == "Status: Downloaded newer image for ".. tag)) then
|
||||
docker:clear_status()
|
||||
else
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body[#res.body] and res.body[#res.body].error or (res.body.message or res.message)).. "\n")
|
||||
end
|
||||
else
|
||||
docker:append_status("code: 400 please input the name of image name!")
|
||||
end
|
||||
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/images"))
|
||||
end
|
||||
|
||||
s = m:section(SimpleSection,
|
||||
translate("Import Image"),
|
||||
translate("When pressing the Import button, both a local image can be loaded onto the system and a valid image tar can be downloaded from remote."))
|
||||
|
||||
o = s:option(DummyValue, "_image_import")
|
||||
o.template = "dockerman/images_import"
|
||||
o.disable = lost_state
|
||||
|
||||
s = m:section(Table, image_list, translate("Images overview"))
|
||||
|
||||
o = s:option(Flag, "_selected","")
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 0
|
||||
o.write = function(self, section, value)
|
||||
image_list[section]._selected = value
|
||||
end
|
||||
|
||||
o = s:option(DummyValue, "_id", translate("ID"))
|
||||
o.rawhtml = true
|
||||
|
||||
o = s:option(DummyValue, "_tags", translate("RepoTags"))
|
||||
o.rawhtml = true
|
||||
|
||||
o = s:option(DummyValue, "_containers", translate("Containers"))
|
||||
o.rawhtml = true
|
||||
|
||||
o = s:option(DummyValue, "_size", translate("Size"))
|
||||
|
||||
o = s:option(DummyValue, "_created", translate("Created"))
|
||||
|
||||
local remove_action = function(force)
|
||||
local image_selected = {}
|
||||
|
||||
for k in pairs(image_list) do
|
||||
if image_list[k]._selected == 1 then
|
||||
image_selected[#image_selected+1] = (image_list[k]["_tags"]:match("<br>") or image_list[k]["_tags"]:match("<none>")) and image_list[k].id or image_list[k].tag
|
||||
end
|
||||
end
|
||||
|
||||
if next(image_selected) ~= nil then
|
||||
local success = true
|
||||
|
||||
docker:clear_status()
|
||||
for _, img in ipairs(image_selected) do
|
||||
local query
|
||||
docker:append_status("Images: " .. "remove" .. " " .. img .. "...")
|
||||
|
||||
if force then
|
||||
query = {force = true}
|
||||
end
|
||||
|
||||
local msg = dk.images:remove({
|
||||
id = img,
|
||||
query = query
|
||||
})
|
||||
if msg and msg.code ~= 200 then
|
||||
docker:append_status("code:" .. msg.code.." ".. (msg.body.message and msg.body.message or msg.message).. "\n")
|
||||
success = false
|
||||
else
|
||||
docker:append_status("done\n")
|
||||
end
|
||||
end
|
||||
|
||||
if success then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/images"))
|
||||
end
|
||||
end
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
s.template = "dockerman/apply_widget"
|
||||
s.err = docker:read_status()
|
||||
s.err = s.err and s.err:gsub("\n","<br>"):gsub(" "," ")
|
||||
if s.err then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
s = m:section(Table,{{}})
|
||||
s.notitle=true
|
||||
s.rowcolors=false
|
||||
s.template="cbi/nullsection"
|
||||
|
||||
o = s:option(Button, "remove")
|
||||
o.inputtitle= translate("Remove")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputstyle = "remove"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
remove_action()
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
o = s:option(Button, "forceremove")
|
||||
o.inputtitle= translate("Force Remove")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputstyle = "remove"
|
||||
o.forcewrite = true
|
||||
o.write = function(self, section)
|
||||
remove_action(true)
|
||||
end
|
||||
o.disable = lost_state
|
||||
|
||||
o = s:option(Button, "save")
|
||||
o.inputtitle= translate("Save")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputstyle = "edit"
|
||||
o.disable = lost_state
|
||||
o.forcewrite = true
|
||||
o.write = function (self, section)
|
||||
local image_selected = {}
|
||||
|
||||
for k in pairs(image_list) do
|
||||
if image_list[k]._selected == 1 then
|
||||
image_selected[#image_selected + 1] = image_list[k].id
|
||||
end
|
||||
end
|
||||
|
||||
if next(image_selected) ~= nil then
|
||||
local names, first, show_name
|
||||
|
||||
for _, img in ipairs(image_selected) do
|
||||
names = names and (names .. "&names=".. img) or img
|
||||
end
|
||||
if #image_selected > 1 then
|
||||
show_name = "images"
|
||||
else
|
||||
show_name = image_selected[1]
|
||||
end
|
||||
local cb = function(res, chunk)
|
||||
if res and res.code and res.code == 200 then
|
||||
if not first then
|
||||
first = true
|
||||
luci.http.header('Content-Disposition', 'inline; filename="'.. show_name .. '.tar"')
|
||||
luci.http.header('Content-Type', 'application\/x-tar')
|
||||
end
|
||||
luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
else
|
||||
if not first then
|
||||
first = true
|
||||
luci.http.prepare_content("text/plain")
|
||||
end
|
||||
luci.ltn12.pump.all(chunk, luci.http.write)
|
||||
end
|
||||
end
|
||||
|
||||
docker:write_status("Images: " .. "save" .. " " .. table.concat(image_selected, "\n") .. "...")
|
||||
local msg = dk.images:get({query = {names = names}}, cb)
|
||||
if msg and msg.code and msg.code ~= 200 then
|
||||
docker:append_status("code:" .. msg.code.." ".. (msg.body.message and msg.body.message or msg.message).. "\n")
|
||||
else
|
||||
docker:clear_status()
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
o = s:option(Button, "load")
|
||||
o.inputtitle= translate("Load")
|
||||
o.template = "dockerman/images_load"
|
||||
o.inputstyle = "add"
|
||||
o.disable = lost_state
|
||||
|
||||
return m
|
@ -1,159 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2019 lisaac <https://github.com/lisaac/luci-app-dockerman>
|
||||
]]--
|
||||
|
||||
local docker = require "luci.model.docker"
|
||||
|
||||
local m, s, o
|
||||
local networks, dk, res, lost_state
|
||||
|
||||
dk = docker.new()
|
||||
|
||||
if dk:_ping().code ~= 200 then
|
||||
lost_state = true
|
||||
else
|
||||
res = dk.networks:list()
|
||||
if res and res.code and res.code < 300 then
|
||||
networks = res.body
|
||||
end
|
||||
end
|
||||
|
||||
local get_networks = function ()
|
||||
local data = {}
|
||||
|
||||
if type(networks) ~= "table" then
|
||||
return nil
|
||||
end
|
||||
|
||||
for i, v in ipairs(networks) do
|
||||
local index = v.Created .. v.Id
|
||||
|
||||
data[index]={}
|
||||
data[index]["_selected"] = 0
|
||||
data[index]["_id"] = v.Id:sub(1,12)
|
||||
data[index]["_name"] = v.Name
|
||||
data[index]["_driver"] = v.Driver
|
||||
|
||||
if v.Driver == "bridge" then
|
||||
data[index]["_interface"] = v.Options["com.docker.network.bridge.name"]
|
||||
elseif v.Driver == "macvlan" then
|
||||
data[index]["_interface"] = v.Options.parent
|
||||
end
|
||||
|
||||
data[index]["_subnet"] = v.IPAM and v.IPAM.Config[1] and v.IPAM.Config[1].Subnet or nil
|
||||
data[index]["_gateway"] = v.IPAM and v.IPAM.Config[1] and v.IPAM.Config[1].Gateway or nil
|
||||
end
|
||||
|
||||
return data
|
||||
end
|
||||
|
||||
local network_list = not lost_state and get_networks() or {}
|
||||
|
||||
m = SimpleForm("docker",
|
||||
translate("Docker - Networks"),
|
||||
translate("This page displays all docker networks that have been created on the connected docker host."))
|
||||
m.submit=false
|
||||
m.reset=false
|
||||
|
||||
s = m:section(Table, network_list, translate("Networks overview"))
|
||||
s.nodescr=true
|
||||
|
||||
o = s:option(Flag, "_selected","")
|
||||
o.template = "dockerman/cbi/xfvalue"
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 0
|
||||
o.render = function(self, section, scope)
|
||||
self.disable = 0
|
||||
if network_list[section]["_name"] == "bridge" or network_list[section]["_name"] == "none" or network_list[section]["_name"] == "host" then
|
||||
self.disable = 1
|
||||
end
|
||||
Flag.render(self, section, scope)
|
||||
end
|
||||
o.write = function(self, section, value)
|
||||
network_list[section]._selected = value
|
||||
end
|
||||
|
||||
o = s:option(DummyValue, "_id", translate("ID"))
|
||||
|
||||
o = s:option(DummyValue, "_name", translate("Network Name"))
|
||||
|
||||
o = s:option(DummyValue, "_driver", translate("Driver"))
|
||||
|
||||
o = s:option(DummyValue, "_interface", translate("Parent Interface"))
|
||||
|
||||
o = s:option(DummyValue, "_subnet", translate("Subnet"))
|
||||
|
||||
o = s:option(DummyValue, "_gateway", translate("Gateway"))
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
s.template = "dockerman/apply_widget"
|
||||
s.err = docker:read_status()
|
||||
s.err = s.err and s.err:gsub("\n","<br>"):gsub(" "," ")
|
||||
if s.err then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
s = m:section(Table,{{}})
|
||||
s.notitle=true
|
||||
s.rowcolors=false
|
||||
s.template="cbi/nullsection"
|
||||
|
||||
o = s:option(Button, "_new")
|
||||
o.inputtitle= translate("New")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.notitle=true
|
||||
o.inputstyle = "add"
|
||||
o.forcewrite = true
|
||||
o.disable = lost_state
|
||||
o.write = function(self, section)
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/newnetwork"))
|
||||
end
|
||||
|
||||
o = s:option(Button, "_remove")
|
||||
o.inputtitle= translate("Remove")
|
||||
o.template = "dockerman/cbi/inlinebutton"
|
||||
o.inputstyle = "remove"
|
||||
o.forcewrite = true
|
||||
o.disable = lost_state
|
||||
o.write = function(self, section)
|
||||
local network_selected = {}
|
||||
local network_name_selected = {}
|
||||
local network_driver_selected = {}
|
||||
|
||||
for k in pairs(network_list) do
|
||||
if network_list[k]._selected == 1 then
|
||||
network_selected[#network_selected + 1] = network_list[k]._id
|
||||
network_name_selected[#network_name_selected + 1] = network_list[k]._name
|
||||
network_driver_selected[#network_driver_selected + 1] = network_list[k]._driver
|
||||
end
|
||||
end
|
||||
|
||||
if next(network_selected) ~= nil then
|
||||
local success = true
|
||||
docker:clear_status()
|
||||
|
||||
for ii, net in ipairs(network_selected) do
|
||||
docker:append_status("Networks: " .. "remove" .. " " .. net .. "...")
|
||||
local res = dk.networks["remove"](dk, {id = net})
|
||||
|
||||
if res and res.code and res.code >= 300 then
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body.message and res.body.message or res.message).. "\n")
|
||||
success = false
|
||||
else
|
||||
docker:append_status("done\n")
|
||||
if network_driver_selected[ii] == "macvlan" then
|
||||
docker.remove_macvlan_interface(network_name_selected[ii])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if success then
|
||||
docker:clear_status()
|
||||
end
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/networks"))
|
||||
end
|
||||
end
|
||||
|
||||
return m
|
@ -1,923 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2019 lisaac <https://github.com/lisaac/luci-app-dockerman>
|
||||
]]--
|
||||
|
||||
local docker = require "luci.model.docker"
|
||||
|
||||
local m, s, o
|
||||
|
||||
local dk = docker.new()
|
||||
|
||||
local cmd_line = table.concat(arg, '/')
|
||||
local images, networks
|
||||
local create_body = {}
|
||||
|
||||
if dk:_ping().code ~= 200 then
|
||||
lost_state = true
|
||||
images = {}
|
||||
networks = {}
|
||||
else
|
||||
images = dk.images:list().body
|
||||
networks = dk.networks:list().body
|
||||
end
|
||||
|
||||
local is_quot_complete = function(str)
|
||||
local num = 0, w
|
||||
require "math"
|
||||
|
||||
if not str then
|
||||
return true
|
||||
end
|
||||
|
||||
local num = 0, w
|
||||
for w in str:gmatch("\"") do
|
||||
num = num + 1
|
||||
end
|
||||
|
||||
if math.fmod(num, 2) ~= 0 then
|
||||
return false
|
||||
end
|
||||
|
||||
num = 0
|
||||
for w in str:gmatch("\'") do
|
||||
num = num + 1
|
||||
end
|
||||
|
||||
if math.fmod(num, 2) ~= 0 then
|
||||
return false
|
||||
end
|
||||
|
||||
return true
|
||||
end
|
||||
|
||||
function contains(list, x)
|
||||
for _, v in pairs(list) do
|
||||
if v == x then
|
||||
return true
|
||||
end
|
||||
end
|
||||
return false
|
||||
end
|
||||
|
||||
local resolve_cli = function(cmd_line)
|
||||
local config = {
|
||||
advance = 1
|
||||
}
|
||||
|
||||
local key_no_val = {
|
||||
't',
|
||||
'd',
|
||||
'i',
|
||||
'tty',
|
||||
'rm',
|
||||
'read_only',
|
||||
'interactive',
|
||||
'init',
|
||||
'help',
|
||||
'detach',
|
||||
'privileged',
|
||||
'P',
|
||||
'publish_all',
|
||||
}
|
||||
|
||||
local key_with_val = {
|
||||
'sysctl',
|
||||
'add_host',
|
||||
'a',
|
||||
'attach',
|
||||
'blkio_weight_device',
|
||||
'cap_add',
|
||||
'cap_drop',
|
||||
'device',
|
||||
'device_cgroup_rule',
|
||||
'device_read_bps',
|
||||
'device_read_iops',
|
||||
'device_write_bps',
|
||||
'device_write_iops',
|
||||
'dns',
|
||||
'dns_option',
|
||||
'dns_search',
|
||||
'e',
|
||||
'env',
|
||||
'env_file',
|
||||
'expose',
|
||||
'group_add',
|
||||
'l',
|
||||
'label',
|
||||
'label_file',
|
||||
'link',
|
||||
'link_local_ip',
|
||||
'log_driver',
|
||||
'log_opt',
|
||||
'network_alias',
|
||||
'p',
|
||||
'publish',
|
||||
'security_opt',
|
||||
'storage_opt',
|
||||
'tmpfs',
|
||||
'v',
|
||||
'volume',
|
||||
'volumes_from',
|
||||
'blkio_weight',
|
||||
'cgroup_parent',
|
||||
'cidfile',
|
||||
'cpu_period',
|
||||
'cpu_quota',
|
||||
'cpu_rt_period',
|
||||
'cpu_rt_runtime',
|
||||
'c',
|
||||
'cpu_shares',
|
||||
'cpus',
|
||||
'cpuset_cpus',
|
||||
'cpuset_mems',
|
||||
'detach_keys',
|
||||
'disable_content_trust',
|
||||
'domainname',
|
||||
'entrypoint',
|
||||
'gpus',
|
||||
'health_cmd',
|
||||
'health_interval',
|
||||
'health_retries',
|
||||
'health_start_period',
|
||||
'health_timeout',
|
||||
'h',
|
||||
'hostname',
|
||||
'ip',
|
||||
'ip6',
|
||||
'ipc',
|
||||
'isolation',
|
||||
'kernel_memory',
|
||||
'mac_address',
|
||||
'm',
|
||||
'memory',
|
||||
'memory_reservation',
|
||||
'memory_swap',
|
||||
'memory_swappiness',
|
||||
'mount',
|
||||
'name',
|
||||
'network',
|
||||
'no_healthcheck',
|
||||
'oom_kill_disable',
|
||||
'oom_score_adj',
|
||||
'pid',
|
||||
'pids_limit',
|
||||
'restart',
|
||||
'runtime',
|
||||
'shm_size',
|
||||
'sig_proxy',
|
||||
'stop_signal',
|
||||
'stop_timeout',
|
||||
'ulimit',
|
||||
'u',
|
||||
'user',
|
||||
'userns',
|
||||
'uts',
|
||||
'volume_driver',
|
||||
'w',
|
||||
'workdir'
|
||||
}
|
||||
|
||||
local key_abb = {
|
||||
net='network',
|
||||
a='attach',
|
||||
c='cpu-shares',
|
||||
d='detach',
|
||||
e='env',
|
||||
h='hostname',
|
||||
i='interactive',
|
||||
l='label',
|
||||
m='memory',
|
||||
p='publish',
|
||||
P='publish_all',
|
||||
t='tty',
|
||||
u='user',
|
||||
v='volume',
|
||||
w='workdir'
|
||||
}
|
||||
|
||||
local key_with_list = {
|
||||
'sysctl',
|
||||
'add_host',
|
||||
'a',
|
||||
'attach',
|
||||
'blkio_weight_device',
|
||||
'cap_add',
|
||||
'cap_drop',
|
||||
'device',
|
||||
'device_cgroup_rule',
|
||||
'device_read_bps',
|
||||
'device_read_iops',
|
||||
'device_write_bps',
|
||||
'device_write_iops',
|
||||
'dns',
|
||||
'dns_optiondns_search',
|
||||
'e',
|
||||
'env',
|
||||
'env_file',
|
||||
'expose',
|
||||
'group_add',
|
||||
'l',
|
||||
'label',
|
||||
'label_file',
|
||||
'link',
|
||||
'link_local_ip',
|
||||
'log_opt',
|
||||
'network_alias',
|
||||
'p',
|
||||
'publish',
|
||||
'security_opt',
|
||||
'storage_opt',
|
||||
'tmpfs',
|
||||
'v',
|
||||
'volume',
|
||||
'volumes_from',
|
||||
}
|
||||
|
||||
local key = nil
|
||||
local _key = nil
|
||||
local val = nil
|
||||
local is_cmd = false
|
||||
|
||||
cmd_line = cmd_line:match("^DOCKERCLI%s+(.+)")
|
||||
for w in cmd_line:gmatch("[^%s]+") do
|
||||
if w =='\\' then
|
||||
elseif not key and not _key and not is_cmd then
|
||||
--key=val
|
||||
key, val = w:match("^%-%-([%lP%-]-)=(.+)")
|
||||
if not key then
|
||||
--key val
|
||||
key = w:match("^%-%-([%lP%-]+)")
|
||||
if not key then
|
||||
-- -v val
|
||||
key = w:match("^%-([%lP%-]+)")
|
||||
if key then
|
||||
-- for -dit
|
||||
if key:match("i") or key:match("t") or key:match("d") then
|
||||
if key:match("i") then
|
||||
config[key_abb["i"]] = true
|
||||
key:gsub("i", "")
|
||||
end
|
||||
if key:match("t") then
|
||||
config[key_abb["t"]] = true
|
||||
key:gsub("t", "")
|
||||
end
|
||||
if key:match("d") then
|
||||
config[key_abb["d"]] = true
|
||||
key:gsub("d", "")
|
||||
end
|
||||
if key:match("P") then
|
||||
config[key_abb["P"]] = true
|
||||
key:gsub("P", "")
|
||||
end
|
||||
if key == "" then
|
||||
key = nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
if key then
|
||||
key = key:gsub("-","_")
|
||||
key = key_abb[key] or key
|
||||
if contains(key_no_val, key) then
|
||||
config[key] = true
|
||||
val = nil
|
||||
key = nil
|
||||
elseif contains(key_with_val, key) then
|
||||
-- if key == "cap_add" then config.privileged = true end
|
||||
else
|
||||
key = nil
|
||||
val = nil
|
||||
end
|
||||
else
|
||||
config.image = w
|
||||
key = nil
|
||||
val = nil
|
||||
is_cmd = true
|
||||
end
|
||||
elseif (key or _key) and not is_cmd then
|
||||
if key == "mount" then
|
||||
-- we need resolve mount options here
|
||||
-- type=bind,source=/source,target=/app
|
||||
local _type = w:match("^type=([^,]+),") or "bind"
|
||||
local source = (_type ~= "tmpfs") and (w:match("source=([^,]+),") or w:match("src=([^,]+),")) or ""
|
||||
local target = w:match(",target=([^,]+)") or w:match(",dst=([^,]+)") or w:match(",destination=([^,]+)") or ""
|
||||
local ro = w:match(",readonly") and "ro" or nil
|
||||
|
||||
if source and target then
|
||||
if _type ~= "tmpfs" then
|
||||
local bind_propagation = (_type == "bind") and w:match(",bind%-propagation=([^,]+)") or nil
|
||||
val = source..":"..target .. ((ro or bind_propagation) and (":" .. (ro and ro or "") .. (((ro and bind_propagation) and "," or "") .. (bind_propagation and bind_propagation or ""))or ""))
|
||||
else
|
||||
local tmpfs_mode = w:match(",tmpfs%-mode=([^,]+)") or nil
|
||||
local tmpfs_size = w:match(",tmpfs%-size=([^,]+)") or nil
|
||||
key = "tmpfs"
|
||||
val = target .. ((tmpfs_mode or tmpfs_size) and (":" .. (tmpfs_mode and ("mode=" .. tmpfs_mode) or "") .. ((tmpfs_mode and tmpfs_size) and "," or "") .. (tmpfs_size and ("size=".. tmpfs_size) or "")) or "")
|
||||
if not config[key] then
|
||||
config[key] = {}
|
||||
end
|
||||
table.insert( config[key], val )
|
||||
key = nil
|
||||
val = nil
|
||||
end
|
||||
end
|
||||
else
|
||||
val = w
|
||||
end
|
||||
elseif is_cmd then
|
||||
config["command"] = (config["command"] and (config["command"] .. " " )or "") .. w
|
||||
end
|
||||
if (key or _key) and val then
|
||||
key = _key or key
|
||||
if contains(key_with_list, key) then
|
||||
if not config[key] then
|
||||
config[key] = {}
|
||||
end
|
||||
if _key then
|
||||
config[key][#config[key]] = config[key][#config[key]] .. " " .. w
|
||||
else
|
||||
table.insert( config[key], val )
|
||||
end
|
||||
if is_quot_complete(config[key][#config[key]]) then
|
||||
config[key][#config[key]] = config[key][#config[key]]:gsub("[\"\']", "")
|
||||
_key = nil
|
||||
else
|
||||
_key = key
|
||||
end
|
||||
else
|
||||
config[key] = (config[key] and (config[key] .. " ") or "") .. val
|
||||
if is_quot_complete(config[key]) then
|
||||
config[key] = config[key]:gsub("[\"\']", "")
|
||||
_key = nil
|
||||
else
|
||||
_key = key
|
||||
end
|
||||
end
|
||||
key = nil
|
||||
val = nil
|
||||
end
|
||||
end
|
||||
|
||||
return config
|
||||
end
|
||||
|
||||
local default_config = {}
|
||||
|
||||
if cmd_line and cmd_line:match("^DOCKERCLI.+") then
|
||||
default_config = resolve_cli(cmd_line)
|
||||
elseif cmd_line and cmd_line:match("^duplicate/[^/]+$") then
|
||||
local container_id = cmd_line:match("^duplicate/(.+)")
|
||||
create_body = dk:containers_duplicate_config({id = container_id}) or {}
|
||||
if not create_body.HostConfig then
|
||||
create_body.HostConfig = {}
|
||||
end
|
||||
|
||||
if next(create_body) ~= nil then
|
||||
default_config.name = nil
|
||||
default_config.image = create_body.Image
|
||||
default_config.hostname = create_body.Hostname
|
||||
default_config.tty = create_body.Tty and true or false
|
||||
default_config.interactive = create_body.OpenStdin and true or false
|
||||
default_config.privileged = create_body.HostConfig.Privileged and true or false
|
||||
default_config.restart = create_body.HostConfig.RestartPolicy and create_body.HostConfig.RestartPolicy.name or nil
|
||||
-- default_config.network = create_body.HostConfig.NetworkMode == "default" and "bridge" or create_body.HostConfig.NetworkMode
|
||||
-- if container has leave original network, and add new network, .HostConfig.NetworkMode is INcorrect, so using first child of .NetworkingConfig.EndpointsConfig
|
||||
default_config.network = create_body.NetworkingConfig and create_body.NetworkingConfig.EndpointsConfig and next(create_body.NetworkingConfig.EndpointsConfig) or nil
|
||||
default_config.ip = default_config.network and default_config.network ~= "bridge" and default_config.network ~= "host" and default_config.network ~= "null" and create_body.NetworkingConfig.EndpointsConfig[default_config.network].IPAMConfig and create_body.NetworkingConfig.EndpointsConfig[default_config.network].IPAMConfig.IPv4Address or nil
|
||||
default_config.link = create_body.HostConfig.Links
|
||||
default_config.env = create_body.Env
|
||||
default_config.dns = create_body.HostConfig.Dns
|
||||
default_config.volume = create_body.HostConfig.Binds
|
||||
default_config.cap_add = create_body.HostConfig.CapAdd
|
||||
default_config.publish_all = create_body.HostConfig.PublishAllPorts
|
||||
|
||||
if create_body.HostConfig.Sysctls and type(create_body.HostConfig.Sysctls) == "table" then
|
||||
default_config.sysctl = {}
|
||||
for k, v in pairs(create_body.HostConfig.Sysctls) do
|
||||
table.insert( default_config.sysctl, k.."="..v )
|
||||
end
|
||||
end
|
||||
if create_body.HostConfig.LogConfig then
|
||||
if create_body.HostConfig.LogConfig.Config and type(create_body.HostConfig.LogConfig.Config) == "table" then
|
||||
default_config.log_opt = {}
|
||||
for k, v in pairs(create_body.HostConfig.LogConfig.Config) do
|
||||
table.insert( default_config.log_opt, k.."="..v )
|
||||
end
|
||||
end
|
||||
default_config.log_driver = create_body.HostConfig.LogConfig.Type or nil
|
||||
end
|
||||
|
||||
if create_body.HostConfig.PortBindings and type(create_body.HostConfig.PortBindings) == "table" then
|
||||
default_config.publish = {}
|
||||
for k, v in pairs(create_body.HostConfig.PortBindings) do
|
||||
for x, y in ipairs(v) do
|
||||
table.insert( default_config.publish, y.HostPort..":"..k:match("^(%d+)/.+").."/"..k:match("^%d+/(.+)") )
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
default_config.user = create_body.User or nil
|
||||
default_config.command = create_body.Cmd and type(create_body.Cmd) == "table" and table.concat(create_body.Cmd, " ") or nil
|
||||
default_config.advance = 1
|
||||
default_config.cpus = create_body.HostConfig.NanoCPUs
|
||||
default_config.cpu_shares = create_body.HostConfig.CpuShares
|
||||
default_config.memory = create_body.HostConfig.Memory
|
||||
default_config.blkio_weight = create_body.HostConfig.BlkioWeight
|
||||
|
||||
if create_body.HostConfig.Devices and type(create_body.HostConfig.Devices) == "table" then
|
||||
default_config.device = {}
|
||||
for _, v in ipairs(create_body.HostConfig.Devices) do
|
||||
table.insert( default_config.device, v.PathOnHost..":"..v.PathInContainer..(v.CgroupPermissions ~= "" and (":" .. v.CgroupPermissions) or "") )
|
||||
end
|
||||
end
|
||||
|
||||
if create_body.HostConfig.Tmpfs and type(create_body.HostConfig.Tmpfs) == "table" then
|
||||
default_config.tmpfs = {}
|
||||
for k, v in pairs(create_body.HostConfig.Tmpfs) do
|
||||
table.insert( default_config.tmpfs, k .. (v~="" and ":" or "")..v )
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
m = SimpleForm("docker", translate("Docker - Containers"))
|
||||
m.redirect = luci.dispatcher.build_url("admin", "docker", "containers")
|
||||
if lost_state then
|
||||
m.submit=false
|
||||
m.reset=false
|
||||
end
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
s.template = "dockerman/apply_widget"
|
||||
s.err=docker:read_status()
|
||||
s.err=s.err and s.err:gsub("\n","<br>"):gsub(" "," ")
|
||||
if s.err then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
s = m:section(SimpleSection, translate("Create new docker container"))
|
||||
s.addremove = true
|
||||
s.anonymous = true
|
||||
|
||||
o = s:option(DummyValue,"cmd_line", translate("Resolve CLI"))
|
||||
o.rawhtml = true
|
||||
o.template = "dockerman/newcontainer_resolve"
|
||||
|
||||
o = s:option(Value, "name", translate("Container Name"))
|
||||
o.rmempty = true
|
||||
o.default = default_config.name or nil
|
||||
|
||||
o = s:option(Flag, "interactive", translate("Interactive (-i)"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = default_config.interactive and 1 or 0
|
||||
|
||||
o = s:option(Flag, "tty", translate("TTY (-t)"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = default_config.tty and 1 or 0
|
||||
|
||||
o = s:option(Value, "image", translate("Docker Image"))
|
||||
o.rmempty = true
|
||||
o.default = default_config.image or nil
|
||||
for _, v in ipairs (images) do
|
||||
if v.RepoTags then
|
||||
o:value(v.RepoTags[1], v.RepoTags[1])
|
||||
end
|
||||
end
|
||||
|
||||
o = s:option(Flag, "_force_pull", translate("Always pull image first"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 0
|
||||
|
||||
o = s:option(Flag, "privileged", translate("Privileged"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = default_config.privileged and 1 or 0
|
||||
|
||||
o = s:option(ListValue, "restart", translate("Restart Policy"))
|
||||
o.rmempty = true
|
||||
o:value("no", "No")
|
||||
o:value("unless-stopped", "Unless stopped")
|
||||
o:value("always", "Always")
|
||||
o:value("on-failure", "On failure")
|
||||
o.default = default_config.restart or "unless-stopped"
|
||||
|
||||
local d_network = s:option(ListValue, "network", translate("Networks"))
|
||||
d_network.rmempty = true
|
||||
d_network.default = default_config.network or "bridge"
|
||||
|
||||
local d_ip = s:option(Value, "ip", translate("IPv4 Address"))
|
||||
d_ip.datatype="ip4addr"
|
||||
d_ip:depends("network", "nil")
|
||||
d_ip.default = default_config.ip or nil
|
||||
|
||||
o = s:option(DynamicList, "link", translate("Links with other containers"))
|
||||
o.placeholder = "container_name:alias"
|
||||
o.rmempty = true
|
||||
o:depends("network", "bridge")
|
||||
o.default = default_config.link or nil
|
||||
|
||||
o = s:option(DynamicList, "dns", translate("Set custom DNS servers"))
|
||||
o.placeholder = "8.8.8.8"
|
||||
o.rmempty = true
|
||||
o.default = default_config.dns or nil
|
||||
|
||||
o = s:option(Value, "user",
|
||||
translate("User(-u)"),
|
||||
translate("The user that commands are run as inside the container.(format: name|uid[:group|gid])"))
|
||||
o.placeholder = "1000:1000"
|
||||
o.rmempty = true
|
||||
o.default = default_config.user or nil
|
||||
|
||||
o = s:option(DynamicList, "env",
|
||||
translate("Environmental Variable(-e)"),
|
||||
translate("Set environment variables to inside the container"))
|
||||
o.placeholder = "TZ=Asia/Shanghai"
|
||||
o.rmempty = true
|
||||
o.default = default_config.env or nil
|
||||
|
||||
o = s:option(DynamicList, "volume",
|
||||
translate("Bind Mount(-v)"),
|
||||
translate("Bind mount a volume"))
|
||||
o.placeholder = "/media:/media:slave"
|
||||
o.rmempty = true
|
||||
o.default = default_config.volume or nil
|
||||
|
||||
local d_publish = s:option(DynamicList, "publish",
|
||||
translate("Exposed Ports(-p)"),
|
||||
translate("Publish container's port(s) to the host"))
|
||||
d_publish.placeholder = "2200:22/tcp"
|
||||
d_publish.rmempty = true
|
||||
d_publish.default = default_config.publish or nil
|
||||
|
||||
o = s:option(Value, "command", translate("Run command"))
|
||||
o.placeholder = "/bin/sh init.sh"
|
||||
o.rmempty = true
|
||||
o.default = default_config.command or nil
|
||||
|
||||
o = s:option(Flag, "advance", translate("Advance"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = default_config.advance or 0
|
||||
|
||||
o = s:option(Value, "hostname",
|
||||
translate("Host Name"),
|
||||
translate("The hostname to use for the container"))
|
||||
o.rmempty = true
|
||||
o.default = default_config.hostname or nil
|
||||
o:depends("advance", 1)
|
||||
|
||||
o = s:option(Flag, "publish_all",
|
||||
translate("Exposed All Ports(-P)"),
|
||||
translate("Allocates an ephemeral host port for all of a container's exposed ports"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = default_config.publish_all and 1 or 0
|
||||
o:depends("advance", 1)
|
||||
|
||||
o = s:option(DynamicList, "device",
|
||||
translate("Device(--device)"),
|
||||
translate("Add host device to the container"))
|
||||
o.placeholder = "/dev/sda:/dev/xvdc:rwm"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.default = default_config.device or nil
|
||||
|
||||
o = s:option(DynamicList, "tmpfs",
|
||||
translate("Tmpfs(--tmpfs)"),
|
||||
translate("Mount tmpfs directory"))
|
||||
o.placeholder = "/run:rw,noexec,nosuid,size=65536k"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.default = default_config.tmpfs or nil
|
||||
|
||||
o = s:option(DynamicList, "sysctl",
|
||||
translate("Sysctl(--sysctl)"),
|
||||
translate("Sysctls (kernel parameters) options"))
|
||||
o.placeholder = "net.ipv4.ip_forward=1"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.default = default_config.sysctl or nil
|
||||
|
||||
o = s:option(DynamicList, "cap_add",
|
||||
translate("CAP-ADD(--cap-add)"),
|
||||
translate("A list of kernel capabilities to add to the container"))
|
||||
o.placeholder = "NET_ADMIN"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.default = default_config.cap_add or nil
|
||||
|
||||
o = s:option(Value, "cpus",
|
||||
translate("CPUs"),
|
||||
translate("Number of CPUs. Number is a fractional number. 0.000 means no limit"))
|
||||
o.placeholder = "1.5"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.datatype="ufloat"
|
||||
o.default = default_config.cpus or nil
|
||||
|
||||
o = s:option(Value, "cpu_shares",
|
||||
translate("CPU Shares Weight"),
|
||||
translate("CPU shares relative weight, if 0 is set, the system will ignore the value and use the default of 1024"))
|
||||
o.placeholder = "1024"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.datatype="uinteger"
|
||||
o.default = default_config.cpu_shares or nil
|
||||
|
||||
o = s:option(Value, "memory",
|
||||
translate("Memory"),
|
||||
translate("Memory limit (format: <number>[<unit>]). Number is a positive integer. Unit can be one of b, k, m, or g. Minimum is 4M"))
|
||||
o.placeholder = "128m"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.default = default_config.memory or nil
|
||||
|
||||
o = s:option(Value, "blkio_weight",
|
||||
translate("Block IO Weight"),
|
||||
translate("Block IO weight (relative weight) accepts a weight value between 10 and 1000"))
|
||||
o.placeholder = "500"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.datatype="uinteger"
|
||||
o.default = default_config.blkio_weight or nil
|
||||
|
||||
o = s:option(Value, "log_driver",
|
||||
translate("Logging driver"),
|
||||
translate("The logging driver for the container"))
|
||||
o.placeholder = "json-file"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.default = default_config.log_driver or nil
|
||||
|
||||
o = s:option(DynamicList, "log_opt",
|
||||
translate("Log driver options"),
|
||||
translate("The logging configuration for this container"))
|
||||
o.placeholder = "max-size=1m"
|
||||
o.rmempty = true
|
||||
o:depends("advance", 1)
|
||||
o.default = default_config.log_opt or nil
|
||||
|
||||
for _, v in ipairs (networks) do
|
||||
if v.Name then
|
||||
local parent = v.Options and v.Options.parent or nil
|
||||
local ip = v.IPAM and v.IPAM.Config and v.IPAM.Config[1] and v.IPAM.Config[1].Subnet or nil
|
||||
ipv6 = v.IPAM and v.IPAM.Config and v.IPAM.Config[2] and v.IPAM.Config[2].Subnet or nil
|
||||
local network_name = v.Name .. " | " .. v.Driver .. (parent and (" | " .. parent) or "") .. (ip and (" | " .. ip) or "").. (ipv6 and (" | " .. ipv6) or "")
|
||||
d_network:value(v.Name, network_name)
|
||||
|
||||
if v.Name ~= "none" and v.Name ~= "bridge" and v.Name ~= "host" then
|
||||
d_ip:depends("network", v.Name)
|
||||
end
|
||||
|
||||
if v.Driver == "bridge" then
|
||||
d_publish:depends("network", v.Name)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
m.handle = function(self, state, data)
|
||||
if state ~= FORM_VALID then
|
||||
return
|
||||
end
|
||||
|
||||
local tmp
|
||||
local name = data.name or ("luci_" .. os.date("%Y%m%d%H%M%S"))
|
||||
local hostname = data.hostname
|
||||
local tty = type(data.tty) == "number" and (data.tty == 1 and true or false) or default_config.tty or false
|
||||
local publish_all = type(data.publish_all) == "number" and (data.publish_all == 1 and true or false) or default_config.publish_all or false
|
||||
local interactive = type(data.interactive) == "number" and (data.interactive == 1 and true or false) or default_config.interactive or false
|
||||
local image = data.image
|
||||
local user = data.user
|
||||
|
||||
if image and not image:match(".-:.+") then
|
||||
image = image .. ":latest"
|
||||
end
|
||||
|
||||
local privileged = type(data.privileged) == "number" and (data.privileged == 1 and true or false) or default_config.privileged or false
|
||||
local restart = data.restart
|
||||
local env = data.env
|
||||
local dns = data.dns
|
||||
local cap_add = data.cap_add
|
||||
local sysctl = {}
|
||||
local log_driver = data.log_driver
|
||||
|
||||
tmp = data.sysctl
|
||||
if type(tmp) == "table" then
|
||||
for i, v in ipairs(tmp) do
|
||||
local k,v1 = v:match("(.-)=(.+)")
|
||||
if k and v1 then
|
||||
sysctl[k]=v1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local log_opt = {}
|
||||
tmp = data.log_opt
|
||||
if type(tmp) == "table" then
|
||||
for i, v in ipairs(tmp) do
|
||||
local k,v1 = v:match("(.-)=(.+)")
|
||||
if k and v1 then
|
||||
log_opt[k]=v1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local network = data.network
|
||||
local ip = (network ~= "bridge" and network ~= "host" and network ~= "none") and data.ip or nil
|
||||
local volume = data.volume
|
||||
local memory = data.memory or nil
|
||||
local cpu_shares = data.cpu_shares or nil
|
||||
local cpus = data.cpus or nil
|
||||
local blkio_weight = data.blkio_weight or nil
|
||||
|
||||
local portbindings = {}
|
||||
local exposedports = {}
|
||||
|
||||
local tmpfs = {}
|
||||
tmp = data.tmpfs
|
||||
if type(tmp) == "table" then
|
||||
for i, v in ipairs(tmp)do
|
||||
local k= v:match("([^:]+)")
|
||||
local v1 = v:match(".-:([^:]+)") or ""
|
||||
if k then
|
||||
tmpfs[k]=v1
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
local device = {}
|
||||
tmp = data.device
|
||||
if type(tmp) == "table" then
|
||||
for i, v in ipairs(tmp) do
|
||||
local t = {}
|
||||
local _,_, h, c, p = v:find("(.-):(.-):(.+)")
|
||||
if h and c then
|
||||
t['PathOnHost'] = h
|
||||
t['PathInContainer'] = c
|
||||
t['CgroupPermissions'] = p or "rwm"
|
||||
else
|
||||
local _,_, h, c = v:find("(.-):(.+)")
|
||||
if h and c then
|
||||
t['PathOnHost'] = h
|
||||
t['PathInContainer'] = c
|
||||
t['CgroupPermissions'] = "rwm"
|
||||
else
|
||||
t['PathOnHost'] = v
|
||||
t['PathInContainer'] = v
|
||||
t['CgroupPermissions'] = "rwm"
|
||||
end
|
||||
end
|
||||
|
||||
if next(t) ~= nil then
|
||||
table.insert( device, t )
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
tmp = data.publish or {}
|
||||
for i, v in ipairs(tmp) do
|
||||
for v1 ,v2 in string.gmatch(v, "(%d+):([^%s]+)") do
|
||||
local _,_,p= v2:find("^%d+/(%w+)")
|
||||
if p == nil then
|
||||
v2=v2..'/tcp'
|
||||
end
|
||||
portbindings[v2] = {{HostPort=v1}}
|
||||
exposedports[v2] = {HostPort=v1}
|
||||
end
|
||||
end
|
||||
|
||||
local link = data.link
|
||||
tmp = data.command
|
||||
local command = {}
|
||||
if tmp ~= nil then
|
||||
for v in string.gmatch(tmp, "[^%s]+") do
|
||||
command[#command+1] = v
|
||||
end
|
||||
end
|
||||
|
||||
if memory and memory ~= 0 then
|
||||
_,_,n,unit = memory:find("([%d%.]+)([%l%u]+)")
|
||||
if n then
|
||||
unit = unit and unit:sub(1,1):upper() or "B"
|
||||
if unit == "M" then
|
||||
memory = tonumber(n) * 1024 * 1024
|
||||
elseif unit == "G" then
|
||||
memory = tonumber(n) * 1024 * 1024 * 1024
|
||||
elseif unit == "K" then
|
||||
memory = tonumber(n) * 1024
|
||||
else
|
||||
memory = tonumber(n)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
create_body.Hostname = network ~= "host" and (hostname or name) or nil
|
||||
create_body.Tty = tty and true or false
|
||||
create_body.OpenStdin = interactive and true or false
|
||||
create_body.User = user
|
||||
create_body.Cmd = command
|
||||
create_body.Env = env
|
||||
create_body.Image = image
|
||||
create_body.ExposedPorts = exposedports
|
||||
create_body.HostConfig = create_body.HostConfig or {}
|
||||
create_body.HostConfig.Dns = dns
|
||||
create_body.HostConfig.Binds = volume
|
||||
create_body.HostConfig.RestartPolicy = { Name = restart, MaximumRetryCount = 0 }
|
||||
create_body.HostConfig.Privileged = privileged and true or false
|
||||
create_body.HostConfig.PortBindings = portbindings
|
||||
create_body.HostConfig.Memory = memory and tonumber(memory)
|
||||
create_body.HostConfig.CpuShares = cpu_shares and tonumber(cpu_shares)
|
||||
create_body.HostConfig.NanoCPUs = cpus and tonumber(cpus) * 10 ^ 9
|
||||
create_body.HostConfig.BlkioWeight = blkio_weight and tonumber(blkio_weight)
|
||||
create_body.HostConfig.PublishAllPorts = publish_all
|
||||
|
||||
if create_body.HostConfig.NetworkMode ~= network then
|
||||
create_body.NetworkingConfig = nil
|
||||
end
|
||||
|
||||
create_body.HostConfig.NetworkMode = network
|
||||
|
||||
if ip then
|
||||
if create_body.NetworkingConfig and create_body.NetworkingConfig.EndpointsConfig and type(create_body.NetworkingConfig.EndpointsConfig) == "table" then
|
||||
for k, v in pairs (create_body.NetworkingConfig.EndpointsConfig) do
|
||||
if k == network and v.IPAMConfig and v.IPAMConfig.IPv4Address then
|
||||
v.IPAMConfig.IPv4Address = ip
|
||||
else
|
||||
create_body.NetworkingConfig.EndpointsConfig = { [network] = { IPAMConfig = { IPv4Address = ip } } }
|
||||
end
|
||||
break
|
||||
end
|
||||
else
|
||||
create_body.NetworkingConfig = { EndpointsConfig = { [network] = { IPAMConfig = { IPv4Address = ip } } } }
|
||||
end
|
||||
elseif not create_body.NetworkingConfig then
|
||||
create_body.NetworkingConfig = nil
|
||||
end
|
||||
|
||||
create_body["HostConfig"]["Tmpfs"] = tmpfs
|
||||
create_body["HostConfig"]["Devices"] = device
|
||||
create_body["HostConfig"]["Sysctls"] = sysctl
|
||||
create_body["HostConfig"]["CapAdd"] = cap_add
|
||||
create_body["HostConfig"]["LogConfig"] = {
|
||||
Config = log_opt,
|
||||
Type = log_driver
|
||||
}
|
||||
|
||||
if network == "bridge" then
|
||||
create_body["HostConfig"]["Links"] = link
|
||||
end
|
||||
|
||||
local pull_image = function(image)
|
||||
local json_stringify = luci.jsonc and luci.jsonc.stringify
|
||||
docker:append_status("Images: " .. "pulling" .. " " .. image .. "...\n")
|
||||
local res = dk.images:create({query = {fromImage=image}}, docker.pull_image_show_status_cb)
|
||||
if res and res.code and res.code == 200 and (res.body[#res.body] and not res.body[#res.body].error and res.body[#res.body].status and (res.body[#res.body].status == "Status: Downloaded newer image for ".. image or res.body[#res.body].status == "Status: Image is up to date for ".. image)) then
|
||||
docker:append_status("done\n")
|
||||
else
|
||||
res.code = (res.code == 200) and 500 or res.code
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body[#res.body] and res.body[#res.body].error or (res.body.message or res.message)).. "\n")
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/newcontainer"))
|
||||
end
|
||||
end
|
||||
|
||||
docker:clear_status()
|
||||
local exist_image = false
|
||||
|
||||
if image then
|
||||
for _, v in ipairs (images) do
|
||||
if v.RepoTags and v.RepoTags[1] == image then
|
||||
exist_image = true
|
||||
break
|
||||
end
|
||||
end
|
||||
if not exist_image then
|
||||
pull_image(image)
|
||||
elseif data._force_pull == 1 then
|
||||
pull_image(image)
|
||||
end
|
||||
end
|
||||
|
||||
create_body = docker.clear_empty_tables(create_body)
|
||||
|
||||
docker:append_status("Container: " .. "create" .. " " .. name .. "...")
|
||||
local res = dk.containers:create({name = name, body = create_body})
|
||||
if res and res.code and res.code == 201 then
|
||||
docker:clear_status()
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/containers"))
|
||||
else
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body.message and res.body.message or res.message))
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/newcontainer"))
|
||||
end
|
||||
end
|
||||
|
||||
return m
|
@ -1,258 +0,0 @@
|
||||
--[[
|
||||
LuCI - Lua Configuration Interface
|
||||
Copyright 2019 lisaac <https://github.com/lisaac/luci-app-dockerman>
|
||||
]]--
|
||||
|
||||
local docker = require "luci.model.docker"
|
||||
|
||||
local m, s, o
|
||||
|
||||
local dk = docker.new()
|
||||
if dk:_ping().code ~= 200 then
|
||||
lost_state = true
|
||||
end
|
||||
|
||||
m = SimpleForm("docker", translate("Docker - Network"))
|
||||
m.redirect = luci.dispatcher.build_url("admin", "docker", "networks")
|
||||
if lost_state then
|
||||
m.submit=false
|
||||
m.reset=false
|
||||
end
|
||||
|
||||
|
||||
s = m:section(SimpleSection)
|
||||
s.template = "dockerman/apply_widget"
|
||||
s.err=docker:read_status()
|
||||
s.err=s.err and s.err:gsub("\n","<br>"):gsub(" "," ")
|
||||
if s.err then
|
||||
docker:clear_status()
|
||||
end
|
||||
|
||||
s = m:section(SimpleSection, translate("Create new docker network"))
|
||||
s.addremove = true
|
||||
s.anonymous = true
|
||||
|
||||
o = s:option(Value, "name",
|
||||
translate("Network Name"),
|
||||
translate("Name of the network that can be selected during container creation"))
|
||||
o.rmempty = true
|
||||
|
||||
o = s:option(ListValue, "driver", translate("Driver"))
|
||||
o.rmempty = true
|
||||
o:value("bridge", translate("Bridge device"))
|
||||
o:value("macvlan", translate("MAC VLAN"))
|
||||
o:value("ipvlan", translate("IP VLAN"))
|
||||
o:value("overlay", translate("Overlay network"))
|
||||
|
||||
o = s:option(Value, "parent", translate("Base device"))
|
||||
o.rmempty = true
|
||||
o:depends("driver", "macvlan")
|
||||
local interfaces = luci.sys and luci.sys.net and luci.sys.net.devices() or {}
|
||||
for _, v in ipairs(interfaces) do
|
||||
o:value(v, v)
|
||||
end
|
||||
o.default="br-lan"
|
||||
o.placeholder="br-lan"
|
||||
|
||||
o = s:option(ListValue, "macvlan_mode", translate("Mode"))
|
||||
o.rmempty = true
|
||||
o:depends("driver", "macvlan")
|
||||
o.default="bridge"
|
||||
o:value("bridge", translate("Bridge (Support direct communication between MAC VLANs)"))
|
||||
o:value("private", translate("Private (Prevent communication between MAC VLANs)"))
|
||||
o:value("vepa", translate("VEPA (Virtual Ethernet Port Aggregator)"))
|
||||
o:value("passthru", translate("Pass-through (Mirror physical device to single MAC VLAN)"))
|
||||
|
||||
o = s:option(ListValue, "ipvlan_mode", translate("Ipvlan Mode"))
|
||||
o.rmempty = true
|
||||
o:depends("driver", "ipvlan")
|
||||
o.default="l3"
|
||||
o:value("l2", translate("L2 bridge"))
|
||||
o:value("l3", translate("L3 bridge"))
|
||||
|
||||
o = s:option(Flag, "ingress",
|
||||
translate("Ingress"),
|
||||
translate("Ingress network is the network which provides the routing-mesh in swarm mode"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 0
|
||||
o:depends("driver", "overlay")
|
||||
|
||||
o = s:option(DynamicList, "options", translate("Options"))
|
||||
o.rmempty = true
|
||||
o.placeholder="com.docker.network.driver.mtu=1500"
|
||||
|
||||
o = s:option(Flag, "internal", translate("Internal"), translate("Restrict external access to the network"))
|
||||
o.rmempty = true
|
||||
o:depends("driver", "overlay")
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 0
|
||||
|
||||
if nixio.fs.access("/etc/config/network") and nixio.fs.access("/etc/config/firewall")then
|
||||
o = s:option(Flag, "op_macvlan", translate("Create macvlan interface"), translate("Auto create macvlan interface in Openwrt"))
|
||||
o:depends("driver", "macvlan")
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 1
|
||||
end
|
||||
|
||||
o = s:option(Value, "subnet", translate("Subnet"))
|
||||
o.rmempty = true
|
||||
o.placeholder="10.1.0.0/16"
|
||||
o.datatype="ip4addr"
|
||||
|
||||
o = s:option(Value, "gateway", translate("Gateway"))
|
||||
o.rmempty = true
|
||||
o.placeholder="10.1.1.1"
|
||||
o.datatype="ip4addr"
|
||||
|
||||
o = s:option(Value, "ip_range", translate("IP range"))
|
||||
o.rmempty = true
|
||||
o.placeholder="10.1.1.0/24"
|
||||
o.datatype="ip4addr"
|
||||
|
||||
o = s:option(DynamicList, "aux_address", translate("Exclude IPs"))
|
||||
o.rmempty = true
|
||||
o.placeholder="my-route=10.1.1.1"
|
||||
|
||||
o = s:option(Flag, "ipv6", translate("Enable IPv6"))
|
||||
o.rmempty = true
|
||||
o.disabled = 0
|
||||
o.enabled = 1
|
||||
o.default = 0
|
||||
|
||||
o = s:option(Value, "subnet6", translate("IPv6 Subnet"))
|
||||
o.rmempty = true
|
||||
o.placeholder="fe80::/10"
|
||||
o.datatype="ip6addr"
|
||||
o:depends("ipv6", 1)
|
||||
|
||||
o = s:option(Value, "gateway6", translate("IPv6 Gateway"))
|
||||
o.rmempty = true
|
||||
o.placeholder="fe80::1"
|
||||
o.datatype="ip6addr"
|
||||
o:depends("ipv6", 1)
|
||||
|
||||
m.handle = function(self, state, data)
|
||||
if state == FORM_VALID then
|
||||
local name = data.name
|
||||
local driver = data.driver
|
||||
|
||||
local internal = data.internal == 1 and true or false
|
||||
|
||||
local subnet = data.subnet
|
||||
local gateway = data.gateway
|
||||
local ip_range = data.ip_range
|
||||
|
||||
local aux_address = {}
|
||||
local tmp = data.aux_address or {}
|
||||
for i,v in ipairs(tmp) do
|
||||
_,_,k1,v1 = v:find("(.-)=(.+)")
|
||||
aux_address[k1] = v1
|
||||
end
|
||||
|
||||
local options = {}
|
||||
tmp = data.options or {}
|
||||
for i,v in ipairs(tmp) do
|
||||
_,_,k1,v1 = v:find("(.-)=(.+)")
|
||||
options[k1] = v1
|
||||
end
|
||||
|
||||
local ipv6 = data.ipv6 == 1 and true or false
|
||||
|
||||
local create_body = {
|
||||
Name = name,
|
||||
Driver = driver,
|
||||
EnableIPv6 = ipv6,
|
||||
IPAM = {
|
||||
Driver= "default"
|
||||
},
|
||||
Internal = internal
|
||||
}
|
||||
|
||||
if subnet or gateway or ip_range then
|
||||
create_body["IPAM"]["Config"] = {
|
||||
{
|
||||
Subnet = subnet,
|
||||
Gateway = gateway,
|
||||
IPRange = ip_range,
|
||||
AuxAddress = aux_address,
|
||||
AuxiliaryAddresses = aux_address
|
||||
}
|
||||
}
|
||||
end
|
||||
|
||||
if driver == "macvlan" then
|
||||
create_body["Options"] = {
|
||||
macvlan_mode = data.macvlan_mode,
|
||||
parent = data.parent
|
||||
}
|
||||
elseif driver == "ipvlan" then
|
||||
create_body["Options"] = {
|
||||
ipvlan_mode = data.ipvlan_mode
|
||||
}
|
||||
elseif driver == "overlay" then
|
||||
create_body["Ingress"] = data.ingerss == 1 and true or false
|
||||
end
|
||||
|
||||
if ipv6 and data.subnet6 and data.subnet6 then
|
||||
if type(create_body["IPAM"]["Config"]) ~= "table" then
|
||||
create_body["IPAM"]["Config"] = {}
|
||||
end
|
||||
local index = #create_body["IPAM"]["Config"]
|
||||
create_body["IPAM"]["Config"][index+1] = {
|
||||
Subnet = data.subnet6,
|
||||
Gateway = data.gateway6
|
||||
}
|
||||
end
|
||||
|
||||
if next(options) ~= nil then
|
||||
create_body["Options"] = create_body["Options"] or {}
|
||||
for k, v in pairs(options) do
|
||||
create_body["Options"][k] = v
|
||||
end
|
||||
end
|
||||
|
||||
create_body = docker.clear_empty_tables(create_body)
|
||||
docker:write_status("Network: " .. "create" .. " " .. create_body.Name .. "...")
|
||||
|
||||
local res = dk.networks:create({
|
||||
body = create_body
|
||||
})
|
||||
|
||||
if res and res.code == 201 then
|
||||
docker:write_status("Network: " .. "create macvlan interface...")
|
||||
res = dk.networks:inspect({
|
||||
name = create_body.Name
|
||||
})
|
||||
|
||||
if driver == "macvlan" and
|
||||
data.op_macvlan ~= 0 and
|
||||
res and
|
||||
res.code and
|
||||
res.code == 200 and
|
||||
res.body and
|
||||
res.body.IPAM and
|
||||
res.body.IPAM.Config and
|
||||
res.body.IPAM.Config[1] and
|
||||
res.body.IPAM.Config[1].Gateway and
|
||||
res.body.IPAM.Config[1].Subnet then
|
||||
|
||||
docker.create_macvlan_interface(data.name,
|
||||
data.parent,
|
||||
res.body.IPAM.Config[1].Gateway,
|
||||
res.body.IPAM.Config[1].Subnet)
|
||||
end
|
||||
|
||||
docker:clear_status()
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/networks"))
|
||||
else
|
||||
docker:append_status("code:" .. res.code.." ".. (res.body.message and res.body.message or res.message).. "\n")
|
||||
luci.http.redirect(luci.dispatcher.build_url("admin/docker/newnetwork"))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return m
|