first commit
This commit is contained in:
32
utils/brand.sh
Executable file
32
utils/brand.sh
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
. /dev/null
|
||||
|
||||
build.env.export() {
|
||||
GIT_BRANCH="$(git branch | grep '\*' | cut -d' ' -f2-)"
|
||||
GIT_REMOTE="$(git config "branch.${GIT_BRANCH}.remote")"
|
||||
GIT_URL="$(git config --get "remote.${GIT_REMOTE}.url")"
|
||||
if [[ "${GIT_URL}" == git@* ]]; then
|
||||
GIT_URL="${GIT_URL/://}"
|
||||
GIT_URL="${GIT_URL/git@/https://}"
|
||||
fi
|
||||
if [[ "${GIT_URL}" == *.git ]]; then
|
||||
GIT_URL="${GIT_URL%.git}"
|
||||
fi
|
||||
|
||||
SEARXNG_URL="$(python "${REPO_ROOT}/utils/get_setting.py" server.base_url)"
|
||||
SEARXNG_PORT="$(python "${REPO_ROOT}/utils/get_setting.py" server.port)"
|
||||
SEARXNG_BIND_ADDRESS="$(python "${REPO_ROOT}/utils/get_setting.py" server.bind_address)"
|
||||
export GIT_URL
|
||||
export GIT_BRANCH
|
||||
export SEARXNG_URL
|
||||
export SEARXNG_PORT
|
||||
export SEARXNG_BIND_ADDRESS
|
||||
|
||||
}
|
||||
|
||||
pushd "${REPO_ROOT}" &> /dev/null
|
||||
build.env.export
|
||||
popd &> /dev/null
|
||||
123
utils/get_setting.py
Normal file
123
utils/get_setting.py
Normal file
@@ -0,0 +1,123 @@
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
"""build environment used by shell scripts
|
||||
"""
|
||||
|
||||
# set path
|
||||
import sys
|
||||
import importlib.util
|
||||
import re
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
repo_root = Path(__file__).resolve().parent.parent
|
||||
|
||||
|
||||
def main(setting_name):
|
||||
|
||||
settings_path = repo_root / "searx" / "settings.yml"
|
||||
with open(settings_path) as f:
|
||||
settings = parse_yaml(f.read())
|
||||
print(get_setting_value(settings, setting_name))
|
||||
|
||||
|
||||
def get_setting_value(settings, name):
|
||||
value = settings
|
||||
for a in name.split("."):
|
||||
value = value[a]
|
||||
if value is True:
|
||||
value = "1"
|
||||
elif value is False:
|
||||
value = ""
|
||||
return value
|
||||
|
||||
|
||||
def parse_yaml(yaml_str):
|
||||
"""A simple YAML parser that converts a YAML string to a Python dictionary.
|
||||
This parser can handle nested dictionaries, but does not handle list or JSON
|
||||
like structures.
|
||||
|
||||
Good enough parser to get the values of server.base_url, server.port and
|
||||
server.bind_address
|
||||
|
||||
"""
|
||||
|
||||
def get_type_and_value_without_comment(line):
|
||||
"""Extract value without comment and quote
|
||||
|
||||
Returns a tuple:
|
||||
|
||||
1. str or None: str when the value is written inside quote, None otherwise
|
||||
2. the value without quote if any
|
||||
"""
|
||||
match = re.search(r"\"(.*)\"(\s+#)?|\'(.*)\'(\s+#)?|([^#]*)(\s+#)?", line)
|
||||
if match:
|
||||
g = match.groups()
|
||||
if g[0] is not None:
|
||||
return str, g[0]
|
||||
elif g[2] is not None:
|
||||
return str, g[2]
|
||||
elif g[4] is not None:
|
||||
return None, g[4].strip()
|
||||
return None, line.strip()
|
||||
|
||||
# fmt: off
|
||||
true_values = ("y", "Y", "yes", "Yes", "YES", "true", "True", "TRUE", "on", "On", "ON",)
|
||||
false_values = ("n", "N", "no", "No", "NO", "false", "False", "FALSE", "off", "Off", "OFF",)
|
||||
# fmt: on
|
||||
|
||||
def process_line(line):
|
||||
"""Extract key and value from a line, considering its indentation."""
|
||||
if ": " in line:
|
||||
key, value = line.split(": ", 1)
|
||||
key = key.strip()
|
||||
value_type, value = get_type_and_value_without_comment(value)
|
||||
if value in true_values and value_type is None:
|
||||
value = True
|
||||
elif value in false_values and value_type is None:
|
||||
value = False
|
||||
elif value.replace(".", "").isdigit() and value_type is None:
|
||||
for t in (int, float):
|
||||
try:
|
||||
value = t(value)
|
||||
break
|
||||
except ValueError:
|
||||
continue
|
||||
return key, value
|
||||
return None, None
|
||||
|
||||
def get_indentation_level(line):
|
||||
"""Determine the indentation level of a line."""
|
||||
return len(line) - len(line.lstrip())
|
||||
|
||||
yaml_dict = {}
|
||||
lines = yaml_str.split("\n")
|
||||
stack = [yaml_dict]
|
||||
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
continue # Skip empty lines
|
||||
|
||||
indentation_level = get_indentation_level(line)
|
||||
# Assuming 2 spaces per indentation level
|
||||
# see .yamllint.yml
|
||||
current_level = indentation_level // 2
|
||||
|
||||
# Adjust the stack based on the current indentation level
|
||||
while len(stack) > current_level + 1:
|
||||
stack.pop()
|
||||
|
||||
if line.endswith(":"):
|
||||
key = line[0:-1].strip()
|
||||
new_dict = {}
|
||||
stack[-1][key] = new_dict
|
||||
stack.append(new_dict)
|
||||
else:
|
||||
key, value = process_line(line)
|
||||
if key is not None:
|
||||
stack[-1][key] = value
|
||||
|
||||
return yaml_dict
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv[1])
|
||||
1810
utils/lib.sh
Executable file
1810
utils/lib.sh
Executable file
File diff suppressed because it is too large
Load Diff
211
utils/lib_go.sh
Executable file
211
utils/lib_go.sh
Executable file
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
# Tools to install and maintain golang [1] binaries & packages.
|
||||
#
|
||||
# [1] https://golang.org/doc/devel/release#policy
|
||||
#
|
||||
# A simple *helloworld* test with user 'my_user' :
|
||||
#
|
||||
# sudo -H adduser my_user
|
||||
# ./manage go.golang go1.17.3 my_user
|
||||
# ./manage go.install github.com/go-training/helloworld@latest my_user
|
||||
# ./manage go.bash my_user
|
||||
# $ helloword
|
||||
# Hello World!!
|
||||
#
|
||||
# Don't forget to remove 'my_user': sudo -H deluser --remove-home my_user
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
. /dev/null
|
||||
|
||||
# configure golang environment
|
||||
# ----------------------------
|
||||
|
||||
[[ -z "${GO_VERSION}" ]] && GO_VERSION="go1.17.3"
|
||||
|
||||
GO_DL_URL="https://golang.org/dl"
|
||||
|
||||
# implement go functions
|
||||
# -----------------------
|
||||
|
||||
go.help(){
|
||||
cat <<EOF
|
||||
go.:
|
||||
ls : list golang binary archives (stable)
|
||||
golang : (re-) install golang binary in user's \$HOME/local folder
|
||||
install : install go package in user's \$HOME/go-apps folder
|
||||
bash : start bash interpreter with golang environment sourced
|
||||
EOF
|
||||
}
|
||||
|
||||
go.ls(){
|
||||
python <<EOF
|
||||
import sys, json, requests
|
||||
resp = requests.get("${GO_DL_URL}/?mode=json&include=all")
|
||||
for ver in json.loads(resp.text):
|
||||
if not ver['stable']:
|
||||
continue
|
||||
for f in ver['files']:
|
||||
if f['kind'] != 'archive' or not f['size'] or not f['sha256'] or len(f['os']) < 2:
|
||||
continue
|
||||
print(" %(version)-10s|%(os)-8s|%(arch)-8s|%(filename)-30s|%(size)-10s|%(sha256)s" % f)
|
||||
EOF
|
||||
}
|
||||
|
||||
go.ver_info(){
|
||||
|
||||
# print information about a golang distribution. To print filename
|
||||
# sha256 and size of the archive that fits to your OS and host:
|
||||
#
|
||||
# go.ver_info "${GO_VERSION}" archive "$(go.os)" "$(go.arch)" filename sha256 size
|
||||
#
|
||||
# usage: go.ver_info <go-vers> <kind> <os> <arch> [filename|sha256|size]
|
||||
#
|
||||
# kind: [archive|source|installer]
|
||||
# os: [darwin|freebsd|linux|windows]
|
||||
# arch: [amd64|arm64|386|armv6l|ppc64le|s390x]
|
||||
|
||||
python - "$@" <<EOF
|
||||
import sys, json, requests
|
||||
resp = requests.get("${GO_DL_URL}/?mode=json&include=all")
|
||||
for ver in json.loads(resp.text):
|
||||
if ver['version'] != sys.argv[1]:
|
||||
continue
|
||||
for f in ver['files']:
|
||||
if (f['kind'] != sys.argv[2] or f['os'] != sys.argv[3] or f['arch'] != sys.argv[4]):
|
||||
continue
|
||||
for x in sys.argv[5:]:
|
||||
print(f[x])
|
||||
sys.exit(0)
|
||||
sys.exit(42)
|
||||
EOF
|
||||
}
|
||||
|
||||
go.os() {
|
||||
local OS
|
||||
case "$(command uname -a)xx" in
|
||||
Linux\ *) OS=linux ;;
|
||||
Darwin\ *) OS=darwin ;;
|
||||
FreeBSD\ *) OS=freebsd ;;
|
||||
CYGWIN* | MSYS* | MINGW*) OS=windows ;;
|
||||
*) die 42 "OS is unknown: $(command uname -a)" ;;
|
||||
esac
|
||||
echo "${OS}"
|
||||
}
|
||||
|
||||
go.arch() {
|
||||
local ARCH
|
||||
case "$(command uname -m)" in
|
||||
"x86_64") ARCH=amd64 ;;
|
||||
"aarch64") ARCH=arm64 ;;
|
||||
"armv6" | "armv7l") ARCH=armv6l ;;
|
||||
"armv8") ARCH=arm64 ;;
|
||||
.*386.*) ARCH=386 ;;
|
||||
ppc64*) ARCH=ppc64le ;;
|
||||
*) die 42 "ARCH is unknown: $(command uname -m)" ;;
|
||||
esac
|
||||
echo "${ARCH}"
|
||||
}
|
||||
|
||||
go.golang() {
|
||||
|
||||
# install golang binary in user's $HOME/local folder:
|
||||
#
|
||||
# go.golang ${GO_VERSION} ${SERVICE_USER}
|
||||
#
|
||||
# usage: go.golang <go-vers> [<username>]
|
||||
|
||||
local version fname sha size user userpr
|
||||
local buf=()
|
||||
|
||||
version="${1:-${GO_VERSION}}"
|
||||
user="${2:-${USERNAME}}"
|
||||
userpr=" ${_Yellow}|${user}|${_creset} "
|
||||
|
||||
rst_title "Install Go in ${user}'s HOME" section
|
||||
|
||||
mapfile -t buf < <(
|
||||
go.ver_info "${version}" archive "$(go.os)" "$(go.arch)" filename sha256 size
|
||||
)
|
||||
|
||||
if [ ${#buf[@]} -eq 0 ]; then
|
||||
die 42 "can't find info of golang version: ${version}"
|
||||
fi
|
||||
fname="${buf[0]}"
|
||||
sha="${buf[1]}"
|
||||
size="$(numfmt --to=iec "${buf[2]}")"
|
||||
|
||||
info_msg "Download go binary ${fname} (${size}B)"
|
||||
cache_download "${GO_DL_URL}/${fname}" "${fname}"
|
||||
|
||||
pushd "${CACHE}" &> /dev/null
|
||||
echo "${sha} ${fname}" > "${fname}.sha256"
|
||||
if ! sha256sum -c "${fname}.sha256" >/dev/null; then
|
||||
die 42 "downloaded file ${fname} checksum does not match"
|
||||
else
|
||||
info_msg "${fname} checksum OK"
|
||||
fi
|
||||
popd &> /dev/null
|
||||
|
||||
info_msg "install golang"
|
||||
tee_stderr 0.1 <<EOF | sudo -i -u "${user}" | prefix_stdout "${userpr}"
|
||||
mkdir -p \$HOME/local
|
||||
rm -rf \$HOME/local/go
|
||||
tar -C \$HOME/local -xzf ${CACHE}/${fname}
|
||||
echo "export GOPATH=\$HOME/go-apps" > \$HOME/.go_env
|
||||
echo "export PATH=\$HOME/local/go/bin:\\\$GOPATH/bin:\\\$PATH" >> \$HOME/.go_env
|
||||
EOF
|
||||
info_msg "test golang installation"
|
||||
sudo -i -u "${user}" <<EOF
|
||||
source \$HOME/.go_env
|
||||
command -v go
|
||||
go version
|
||||
EOF
|
||||
}
|
||||
|
||||
go.install() {
|
||||
|
||||
# install go package in user's $HOME/go-apps folder:
|
||||
#
|
||||
# go.install github.com/go-training/helloworld@lates ${SERVICE_USER}
|
||||
#
|
||||
# usage: go.install <package> [<username>]
|
||||
|
||||
local package user userpr
|
||||
|
||||
package="${1}"
|
||||
user="${2:-${USERNAME}}"
|
||||
userpr=" ${_Yellow}|${user}|${_creset} "
|
||||
|
||||
if [ -z "${package}" ]; then
|
||||
die 42 "${FUNCNAME[0]}() - missing argument: <package>"
|
||||
fi
|
||||
tee_stderr 0.1 <<EOF | sudo -i -u "${user}" | prefix_stdout "${userpr}"
|
||||
source \$HOME/.go_env
|
||||
go install -v ${package}
|
||||
EOF
|
||||
}
|
||||
|
||||
go.bash() {
|
||||
|
||||
# start bash interpreter with golang environment sourced
|
||||
#
|
||||
# go.bash ${SERVICE_USER}
|
||||
#
|
||||
# usage: go.bash [<username>]
|
||||
|
||||
local user
|
||||
user="${1:-${USERNAME}}"
|
||||
sudo -i -u "${user}" bash --init-file "~${user}/.go_env"
|
||||
}
|
||||
|
||||
go.version(){
|
||||
local user
|
||||
user="${1:-${USERNAME}}"
|
||||
sudo -i -u "${user}" <<EOF
|
||||
source \$HOME/.go_env
|
||||
go version | cut -d' ' -f 3
|
||||
EOF
|
||||
}
|
||||
189
utils/lib_nvm.sh
Executable file
189
utils/lib_nvm.sh
Executable file
@@ -0,0 +1,189 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
# Tools to install and maintain NVM versions manager for Node.js
|
||||
#
|
||||
# [1] https://github.com/nvm-sh/nvm
|
||||
|
||||
# https://github.com/koalaman/shellcheck/issues/356#issuecomment-853515285
|
||||
# shellcheck source=utils/lib.sh
|
||||
. /dev/null
|
||||
|
||||
declare main_cmd
|
||||
|
||||
# configure nvm environment
|
||||
# -------------------------
|
||||
|
||||
NVM_LOCAL_FOLDER=.nvm
|
||||
|
||||
[[ -z "${NVM_GIT_URL}" ]] && NVM_GIT_URL="https://github.com/nvm-sh/nvm.git"
|
||||
[[ -z "${NVM_MIN_NODE_VER}" ]] && NVM_MIN_NODE_VER="16.13.0"
|
||||
|
||||
# initialize nvm environment
|
||||
# -------------------------
|
||||
|
||||
nvm.env() {
|
||||
source "${NVM_DIR}/nvm.sh"
|
||||
source "${NVM_DIR}/bash_completion"
|
||||
[ "$VERBOSE" = "1" ] && info_msg "sourced NVM environment from ${NVM_DIR}"
|
||||
return 0
|
||||
}
|
||||
|
||||
nvm.is_installed() {
|
||||
# is true if NVM is installed / in $HOME or even in <repo-root>/.nvm
|
||||
[[ -f "${NVM_DIR}/nvm.sh" ]]
|
||||
}
|
||||
|
||||
if [[ -z "${NVM_DIR}" ]]; then
|
||||
# nvm is not pre-installed in $HOME. Prepare for using nvm from <repo-root>
|
||||
NVM_DIR="$(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}"
|
||||
fi
|
||||
export NVM_DIR
|
||||
|
||||
if nvm.is_installed; then
|
||||
nvm.env
|
||||
else
|
||||
# if nvm is not installed, use this function as a wrapper
|
||||
nvm() {
|
||||
nvm.ensure
|
||||
nvm "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
# implement nvm functions
|
||||
# -----------------------
|
||||
|
||||
nvm.is_local() {
|
||||
# is true if NVM is installed in <repo-root>/.nvm
|
||||
[ "${NVM_DIR}" = "$(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}" ]
|
||||
}
|
||||
|
||||
nvm.min_node() {
|
||||
|
||||
# usage: nvm.min_node 16.3.0
|
||||
#
|
||||
# Is true if minimal Node.js version is installed.
|
||||
|
||||
local min_v
|
||||
local node_v
|
||||
local higher_v
|
||||
|
||||
if ! command -v node >/dev/null; then
|
||||
warn_msg "Node.js is not yet installed"
|
||||
return 42
|
||||
fi
|
||||
|
||||
min_v="${1}"
|
||||
node_v="$(node --version)"
|
||||
node_v="${node_v:1}" # remove 'v' from 'v16.3.0'
|
||||
if ! [ "${min_v}" = "${node_v}" ]; then
|
||||
higher_v="$(echo -e "$min_v\n${node_v}" | sort -Vr | head -1)"
|
||||
if [ "${min_v}" = "${higher_v}" ]; then
|
||||
return 42
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# implement nvm command line
|
||||
# --------------------------
|
||||
|
||||
nvm.help() {
|
||||
cat <<EOF
|
||||
nvm.: use nvm (without dot) to execute nvm commands directly
|
||||
install : install NVM locally at $(git rev-parse --show-toplevel)/${NVM_LOCAL_FOLDER}
|
||||
clean : remove NVM installation
|
||||
status : prompt some status information about nvm & node
|
||||
nodejs : install Node.js latest LTS
|
||||
cmd ... : run command ... in NVM environment
|
||||
bash : start bash interpreter with NVM environment sourced
|
||||
EOF
|
||||
}
|
||||
|
||||
nvm.install() {
|
||||
local NVM_VERSION_TAG
|
||||
info_msg "install (update) NVM at ${NVM_DIR}"
|
||||
if nvm.is_installed; then
|
||||
info_msg "already cloned at: ${NVM_DIR}"
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
git fetch --all | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
else
|
||||
# delete any leftovers from previous installations
|
||||
if nvm.is_local; then
|
||||
rm -rf "${NVM_DIR}"
|
||||
fi
|
||||
info_msg "clone: ${NVM_GIT_URL}"
|
||||
git clone "${NVM_GIT_URL}" "${NVM_DIR}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
pushd "${NVM_DIR}" &> /dev/null
|
||||
git config --local advice.detachedHead false
|
||||
fi
|
||||
NVM_VERSION_TAG="$(git rev-list --tags --max-count=1)"
|
||||
NVM_VERSION_TAG="$(git describe --abbrev=0 --tags --match "v[0-9]*" "${NVM_VERSION_TAG}")"
|
||||
info_msg "checkout ${NVM_VERSION_TAG}"
|
||||
git checkout "${NVM_VERSION_TAG}" 2>&1 | prefix_stdout " ${_Yellow}||${_creset} "
|
||||
popd &> /dev/null
|
||||
if [ -f "${REPO_ROOT}/.nvm_packages" ]; then
|
||||
cp "${REPO_ROOT}/.nvm_packages" "${NVM_DIR}/default-packages"
|
||||
fi
|
||||
nvm.env
|
||||
}
|
||||
|
||||
nvm.clean() {
|
||||
if ! nvm.is_installed; then
|
||||
build_msg CLEAN "[NVM] not installed"
|
||||
return
|
||||
fi
|
||||
if ! nvm.is_local; then
|
||||
build_msg CLEAN "[NVM] can't remove nvm from ${NVM_DIR}"
|
||||
return
|
||||
fi
|
||||
if [ -n "${NVM_DIR}" ]; then
|
||||
build_msg CLEAN "[NVM] drop $(realpath --relative-to=. "${NVM_DIR}")/"
|
||||
rm -rf "${NVM_DIR}"
|
||||
fi
|
||||
}
|
||||
|
||||
nvm.status() {
|
||||
if command -v node >/dev/null; then
|
||||
info_msg "Node.js is installed at $(command -v node)"
|
||||
info_msg "Node.js is version $(node --version)"
|
||||
if ! nvm.min_node "${NVM_MIN_NODE_VER}"; then
|
||||
warn_msg "minimal Node.js version is ${NVM_MIN_NODE_VER}"
|
||||
fi
|
||||
else
|
||||
warn_msg "Node.js is mot installed"
|
||||
fi
|
||||
if command -v npm >/dev/null; then
|
||||
info_msg "npm is installed at $(command -v npm)"
|
||||
info_msg "npm is version $(npm --version)"
|
||||
else
|
||||
warn_msg "npm is not installed"
|
||||
fi
|
||||
if nvm.is_installed; then
|
||||
info_msg "NVM is installed at ${NVM_DIR}"
|
||||
else
|
||||
warn_msg "NVM is not installed"
|
||||
info_msg "to install NVM and Node.js (LTS) use: ${main_cmd} nvm.nodejs"
|
||||
fi
|
||||
}
|
||||
|
||||
nvm.nodejs() {
|
||||
nvm install
|
||||
nvm.status
|
||||
}
|
||||
|
||||
nvm.bash() {
|
||||
nvm.ensure
|
||||
bash --init-file <(cat "${NVM_DIR}/nvm.sh" "${NVM_DIR}/bash_completion")
|
||||
}
|
||||
|
||||
nvm.cmd() {
|
||||
nvm.ensure
|
||||
"$@"
|
||||
}
|
||||
|
||||
nvm.ensure() {
|
||||
if ! nvm.is_installed; then
|
||||
nvm.install
|
||||
fi
|
||||
}
|
||||
355
utils/lib_redis.sh
Executable file
355
utils/lib_redis.sh
Executable file
@@ -0,0 +1,355 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
#
|
||||
# Tools to build and install redis [1] binaries & packages.
|
||||
#
|
||||
# [1] https://redis.io/download#installation
|
||||
#
|
||||
# 1. redis.devpkg (sudo)
|
||||
# 2. redis.build
|
||||
# 3. redis.install (sudo)
|
||||
#
|
||||
# systemd commands::
|
||||
#
|
||||
# sudo -H systemctl status searxng-redis
|
||||
# sudo -H journalctl -u searxng-redis
|
||||
# sudo -H journalctl --vacuum-size=1M
|
||||
#
|
||||
# Test socket connection from client (local user)::
|
||||
#
|
||||
# $ sudo -H ./manage redis.addgrp "${USER}"
|
||||
# # logout & login to get member of group
|
||||
# $ groups
|
||||
# ... searxng-redis ...
|
||||
# $ source /usr/local/searxng-redis/.redis_env
|
||||
# $ which redis-cli
|
||||
# /usr/local/searxng-redis/.local/bin/redis-cli
|
||||
#
|
||||
# $ redis-cli -s /usr/local/searxng-redis/redis.sock
|
||||
# redis /usr/local/searxng-redis/redis.sock> set foo bar
|
||||
# OK
|
||||
# redis /usr/local/searxng-redis/redis.sock> get foo
|
||||
# "bar"
|
||||
# [CTRL-D]
|
||||
|
||||
|
||||
# shellcheck disable=SC2091
|
||||
# shellcheck source=utils/lib.sh
|
||||
. /dev/null
|
||||
|
||||
REDIS_GIT_URL="https://github.com/redis/redis.git"
|
||||
REDIS_GIT_TAG="${REDIS_GIT_TAG:-6.2.6}"
|
||||
|
||||
REDIS_USER="searxng-redis"
|
||||
REDIS_GROUP="searxng-redis"
|
||||
|
||||
REDIS_HOME="/usr/local/${REDIS_USER}"
|
||||
REDIS_HOME_BIN="${REDIS_HOME}/.local/bin"
|
||||
REDIS_ENV="${REDIS_HOME}/.redis_env"
|
||||
|
||||
REDIS_SERVICE_NAME="searxng-redis"
|
||||
REDIS_SYSTEMD_UNIT="${SYSTEMD_UNITS}/${REDIS_SERVICE_NAME}.service"
|
||||
|
||||
# binaries to compile & install
|
||||
REDIS_INSTALL_EXE=(redis-server redis-benchmark redis-cli)
|
||||
# link names of redis-server binary
|
||||
REDIS_LINK_EXE=(redis-sentinel redis-check-rdb redis-check-aof)
|
||||
|
||||
REDIS_CONF="${REDIS_HOME}/redis.conf"
|
||||
REDIS_CONF_TEMPLATE=$(cat <<EOF
|
||||
# Note that in order to read the configuration file, Redis must be
|
||||
# started with the file path as first argument:
|
||||
#
|
||||
# ./redis-server /path/to/redis.conf
|
||||
|
||||
# bind 127.0.0.1 -::1
|
||||
protected-mode yes
|
||||
|
||||
# Accept connections on the specified port, default is 6379 (IANA #815344).
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 0
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections.
|
||||
|
||||
unixsocket ${REDIS_HOME}/run/redis.sock
|
||||
unixsocketperm 770
|
||||
|
||||
# The working directory.
|
||||
dir ${REDIS_HOME}/run
|
||||
|
||||
# If you run Redis from upstart or systemd, Redis can interact with your
|
||||
# supervision tree.
|
||||
supervised auto
|
||||
|
||||
pidfile ${REDIS_HOME}/run/redis.pid
|
||||
|
||||
# log to the system logger
|
||||
syslog-enabled yes
|
||||
EOF
|
||||
)
|
||||
|
||||
redis.help(){
|
||||
cat <<EOF
|
||||
redis.:
|
||||
devpkg : install essential packages to compile redis
|
||||
build : build redis binaries at $(redis._get_dist)
|
||||
install : create user (${REDIS_USER}) and install systemd service (${REDIS_SERVICE_NAME})
|
||||
remove : delete user (${REDIS_USER}) and remove service (${REDIS_SERVICE_NAME})
|
||||
shell : start bash interpreter from user ${REDIS_USER}
|
||||
src : clone redis source code to <path> and checkput ${REDIS_GIT_TAG}
|
||||
useradd : create user (${REDIS_USER}) at ${REDIS_HOME}
|
||||
userdel : delete user (${REDIS_USER})
|
||||
addgrp : add <user> to group (${REDIS_USER})
|
||||
rmgrp : remove <user> from group (${REDIS_USER})
|
||||
EOF
|
||||
}
|
||||
|
||||
redis.devpkg() {
|
||||
|
||||
# Uses OS package manager to install the essential packages to build and
|
||||
# compile sources
|
||||
|
||||
sudo_or_exit
|
||||
|
||||
case ${DIST_ID} in
|
||||
ubuntu|debian)
|
||||
pkg_install git build-essential gawk
|
||||
;;
|
||||
arch)
|
||||
pkg_install git base-devel
|
||||
;;
|
||||
fedora)
|
||||
pkg_install git @development-tools
|
||||
;;
|
||||
centos)
|
||||
pkg_install git
|
||||
yum groupinstall "Development Tools" -y
|
||||
;;
|
||||
*)
|
||||
err_msg "$DIST_ID-$DIST_VERS: No rules to install development tools from OS."
|
||||
return 42
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
redis.build() {
|
||||
|
||||
# usage: redis.build
|
||||
|
||||
rst_title "get redis sources" section
|
||||
redis.src "${CACHE}/redis"
|
||||
|
||||
if ! required_commands gcc nm make gawk ; then
|
||||
info_msg "install development tools to get missing command(s) .."
|
||||
if [[ -n ${SUDO_USER} ]]; then
|
||||
sudo -H "$0" redis.devpkg
|
||||
else
|
||||
redis.devpkg
|
||||
fi
|
||||
fi
|
||||
|
||||
rst_title "compile redis sources" section
|
||||
|
||||
pushd "${CACHE}/redis" &>/dev/null
|
||||
|
||||
if ask_yn "Do you run 'make distclean' first'?" Yn; then
|
||||
$(bash.cmd) -c "make distclean" 2>&1 | prefix_stdout
|
||||
fi
|
||||
|
||||
$(bash.cmd) -c "make" 2>&1 | prefix_stdout
|
||||
if ask_yn "Do you run 'make test'?" Ny; then
|
||||
$(bash.cmd) -c "make test" | prefix_stdout
|
||||
fi
|
||||
|
||||
popd &>/dev/null
|
||||
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
mkdir -p "$(redis._get_dist)"
|
||||
cd "${CACHE}/redis/src"
|
||||
cp ${REDIS_INSTALL_EXE[@]} "$(redis._get_dist)"
|
||||
EOF
|
||||
info_msg "redis binaries available at $(redis._get_dist)"
|
||||
}
|
||||
|
||||
|
||||
redis.install() {
|
||||
sudo_or_exit
|
||||
(
|
||||
set -e
|
||||
redis.useradd
|
||||
redis._install_bin
|
||||
redis._install_conf
|
||||
redis._install_service
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
redis.remove() {
|
||||
sudo_or_exit
|
||||
(
|
||||
set -e
|
||||
redis._remove_service
|
||||
redis.userdel
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
redis.shell() {
|
||||
interactive_shell "${REDIS_USER}"
|
||||
}
|
||||
|
||||
redis.src() {
|
||||
|
||||
# usage: redis.src "${CACHE}/redis"
|
||||
|
||||
local dest="${1:-${CACHE}/redis}"
|
||||
|
||||
if [ -d "${dest}" ] ; then
|
||||
info_msg "already cloned: $dest"
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
cd "${dest}"
|
||||
git fetch --all
|
||||
git reset --hard tags/${REDIS_GIT_TAG}
|
||||
EOF
|
||||
else
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
mkdir -p "$(dirname "$dest")"
|
||||
cd "$(dirname "$dest")"
|
||||
git clone "${REDIS_GIT_URL}" "${dest}"
|
||||
EOF
|
||||
tee_stderr 0.1 <<EOF | $(bash.cmd) 2>&1 | prefix_stdout
|
||||
cd "${dest}"
|
||||
git checkout tags/${REDIS_GIT_TAG} -b "build-branch"
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
redis.useradd(){
|
||||
|
||||
# usage: redis.useradd
|
||||
|
||||
rst_title "add user ${REDIS_USER}" section
|
||||
echo
|
||||
sudo_or_exit
|
||||
|
||||
# create user account
|
||||
tee_stderr 0.5 <<EOF | sudo -H bash | prefix_stdout
|
||||
useradd --shell /bin/bash --system \
|
||||
--home-dir "${REDIS_HOME}" \
|
||||
--comment 'user that runs a redis instance' "${REDIS_USER}"
|
||||
mkdir -p "${REDIS_HOME}"
|
||||
chown -R "${REDIS_USER}:${REDIS_GROUP}" "${REDIS_HOME}"
|
||||
groups "${REDIS_USER}"
|
||||
EOF
|
||||
|
||||
# create App-ENV and add source it in the .profile
|
||||
tee_stderr 0.5 <<EOF | sudo -H -u "${REDIS_USER}" bash | prefix_stdout
|
||||
mkdir -p "${REDIS_HOME_BIN}"
|
||||
echo "export PATH=${REDIS_HOME_BIN}:\\\$PATH" > "${REDIS_ENV}"
|
||||
grep -qFs -- 'source "${REDIS_ENV}"' ~/.profile || echo 'source "${REDIS_ENV}"' >> ~/.profile
|
||||
EOF
|
||||
}
|
||||
|
||||
redis.userdel() {
|
||||
sudo_or_exit
|
||||
drop_service_account "${REDIS_USER}"
|
||||
groupdel "${REDIS_GROUP}" 2>&1 | prefix_stdout || true
|
||||
}
|
||||
|
||||
redis.addgrp() {
|
||||
|
||||
# usage: redis.addgrp <user>
|
||||
|
||||
[[ -z $1 ]] && die_caller 42 "missing argument <user>"
|
||||
sudo -H gpasswd -a "$1" "${REDIS_GROUP}"
|
||||
}
|
||||
|
||||
redis.rmgrp() {
|
||||
|
||||
# usage: redis.rmgrp <user>
|
||||
|
||||
[[ -z $1 ]] && die_caller 42 "missing argument <user>"
|
||||
sudo -H gpasswd -d "$1" "${REDIS_GROUP}"
|
||||
|
||||
}
|
||||
|
||||
|
||||
# private redis. functions
|
||||
# ------------------------
|
||||
|
||||
redis._install_bin() {
|
||||
local src
|
||||
src="$(redis._get_dist)"
|
||||
(
|
||||
set -e
|
||||
for redis_exe in "${REDIS_INSTALL_EXE[@]}"; do
|
||||
install -v -o "${REDIS_USER}" -g "${REDIS_GROUP}" \
|
||||
"${src}/${redis_exe}" "${REDIS_HOME_BIN}"
|
||||
done
|
||||
|
||||
pushd "${REDIS_HOME_BIN}" &> /dev/null
|
||||
for redis_exe in "${REDIS_LINK_EXE[@]}"; do
|
||||
info_msg "link redis-server --> ${redis_exe}"
|
||||
sudo -H -u "${REDIS_USER}" ln -sf redis-server "${redis_exe}"
|
||||
done
|
||||
popd &> /dev/null
|
||||
|
||||
)
|
||||
}
|
||||
|
||||
redis._install_conf() {
|
||||
sudo -H -u "${REDIS_USER}" bash <<EOF
|
||||
mkdir -p "${REDIS_HOME}/run"
|
||||
echo '${REDIS_CONF_TEMPLATE}' > "${REDIS_CONF}"
|
||||
EOF
|
||||
}
|
||||
|
||||
redis._install_service() {
|
||||
systemd_install_service "${REDIS_SERVICE_NAME}" "${REDIS_SYSTEMD_UNIT}"
|
||||
}
|
||||
|
||||
redis._remove_service() {
|
||||
systemd_remove_service "${REDIS_SERVICE_NAME}" "${REDIS_SYSTEMD_UNIT}"
|
||||
}
|
||||
|
||||
redis._get_dist() {
|
||||
if [ -z "${REDIS_DIST}" ]; then
|
||||
echo "${REPO_ROOT}/dist/redis/${REDIS_GIT_TAG}/$(redis._arch)"
|
||||
else
|
||||
echo "${REDIS_DIST}"
|
||||
fi
|
||||
}
|
||||
|
||||
redis._arch() {
|
||||
local ARCH
|
||||
case "$(command uname -m)" in
|
||||
"x86_64") ARCH=amd64 ;;
|
||||
"aarch64") ARCH=arm64 ;;
|
||||
"armv6" | "armv7l") ARCH=armv6l ;;
|
||||
"armv8") ARCH=arm64 ;;
|
||||
.*386.*) ARCH=386 ;;
|
||||
ppc64*) ARCH=ppc64le ;;
|
||||
*) die 42 "ARCH is unknown: $(command uname -m)" ;;
|
||||
esac
|
||||
echo "${ARCH}"
|
||||
}
|
||||
|
||||
# TODO: move this to the right place ..
|
||||
|
||||
bash.cmd(){
|
||||
|
||||
# print cmd to get a bash in a non-root mode, even if we are in a sudo
|
||||
# context.
|
||||
|
||||
local user="${USER}"
|
||||
local bash_cmd="bash"
|
||||
|
||||
if [ -n "${SUDO_USER}" ] && [ "root" != "${SUDO_USER}" ] ; then
|
||||
user="${SUDO_USER}"
|
||||
bash_cmd="sudo -H -u ${SUDO_USER} bash"
|
||||
fi
|
||||
|
||||
printf "%s" "${bash_cmd}"
|
||||
}
|
||||
307
utils/lib_sxng_container.sh
Normal file
307
utils/lib_sxng_container.sh
Normal file
@@ -0,0 +1,307 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
container.help() {
|
||||
cat <<EOF
|
||||
container.:
|
||||
build : build container image
|
||||
EOF
|
||||
}
|
||||
|
||||
CONTAINER_IMAGE_ORGANIZATION=${GITHUB_REPOSITORY_OWNER:-"searxng"}
|
||||
CONTAINER_IMAGE_NAME="searxng"
|
||||
|
||||
container.build() {
|
||||
local parch=${OVERRIDE_ARCH:-$(uname -m)}
|
||||
local container_engine
|
||||
local dockerfile
|
||||
local arch
|
||||
local variant
|
||||
local platform
|
||||
|
||||
required_commands git
|
||||
|
||||
# Check if podman or docker is installed
|
||||
if [ "$1" = "podman" ] || [ "$1" = "docker" ]; then
|
||||
if ! command -v "$1" &>/dev/null; then
|
||||
die 42 "$1 is not installed"
|
||||
fi
|
||||
container_engine="$1"
|
||||
else
|
||||
# If no explicit engine is passed, prioritize podman over docker
|
||||
if command -v podman &>/dev/null; then
|
||||
container_engine="podman"
|
||||
elif command -v docker &>/dev/null; then
|
||||
container_engine="docker"
|
||||
else
|
||||
die 42 "no compatible container engine is installed (podman or docker)"
|
||||
fi
|
||||
fi
|
||||
info_msg "Selected engine: $container_engine"
|
||||
|
||||
# Setup arch specific
|
||||
case $parch in
|
||||
"X64" | "x86_64" | "amd64")
|
||||
dockerfile="Dockerfile"
|
||||
arch="amd64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
dockerfile="Dockerfile"
|
||||
arch="arm64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARMV7" | "armhf" | "armv7l" | "armv7")
|
||||
dockerfile="Dockerfile"
|
||||
arch="arm"
|
||||
variant="v7"
|
||||
platform="linux/$arch/$variant"
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $parch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
info_msg "Selected platform: $platform"
|
||||
|
||||
pyenv.install
|
||||
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
|
||||
# Check if it is a git repository
|
||||
if [ ! -d .git ]; then
|
||||
die 1 "This is not Git repository"
|
||||
fi
|
||||
|
||||
if ! git remote get-url origin &>/dev/null; then
|
||||
die 1 "There is no remote origin"
|
||||
fi
|
||||
|
||||
# This is a git repository
|
||||
git update-index -q --refresh
|
||||
python -m searx.version freeze
|
||||
eval "$(python -m searx.version)"
|
||||
|
||||
info_msg "Set \$VERSION_STRING: $VERSION_STRING"
|
||||
info_msg "Set \$VERSION_TAG: $VERSION_TAG"
|
||||
info_msg "Set \$DOCKER_TAG: $DOCKER_TAG"
|
||||
info_msg "Set \$GIT_URL: $GIT_URL"
|
||||
info_msg "Set \$GIT_BRANCH: $GIT_BRANCH"
|
||||
|
||||
if [ "$container_engine" = "podman" ]; then
|
||||
params_build_builder="build --format=docker --platform=$platform --target=builder --layers --identity-label=false"
|
||||
params_build="build --format=docker --platform=$platform --layers --squash-all --omit-history --identity-label=false"
|
||||
else
|
||||
params_build_builder="build --platform=$platform --target=builder"
|
||||
params_build="build --platform=$platform --squash"
|
||||
fi
|
||||
|
||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||
params_build_builder+=" --cache-from=ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache --cache-to=ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache"
|
||||
|
||||
# Tags
|
||||
params_build+=" --tag=ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache:$CONTAINER_IMAGE_NAME-$arch$variant"
|
||||
else
|
||||
# Tags
|
||||
params_build+=" --tag=localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:latest"
|
||||
params_build+=" --tag=localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:$DOCKER_TAG"
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
"$container_engine" $params_build_builder \
|
||||
--build-arg="TIMESTAMP_SETTINGS=$(git log -1 --format="%cd" --date=unix -- ./searx/settings.yml)" \
|
||||
--tag="localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:builder" \
|
||||
--file="./container/$dockerfile" \
|
||||
.
|
||||
build_msg CONTAINER "Image \"builder\" built"
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
"$container_engine" $params_build \
|
||||
--build-arg="TIMESTAMP_SETTINGS=$(git log -1 --format="%cd" --date=unix -- ./searx/settings.yml)" \
|
||||
--build-arg="TIMESTAMP_UWSGI=$(git log -1 --format="%cd" --date=unix -- ./container/config/uwsgi.ini)" \
|
||||
--build-arg="GIT_URL=$GIT_URL" \
|
||||
--build-arg="SEARXNG_GIT_VERSION=$VERSION_STRING" \
|
||||
--build-arg="LABEL_DATE=$(date -u +%Y-%m-%dT%H:%M:%SZ)" \
|
||||
--build-arg="LABEL_VCS_REF=$(git rev-parse HEAD)" \
|
||||
--build-arg="LABEL_VCS_URL=$GIT_URL" \
|
||||
--file="./container/$dockerfile" \
|
||||
.
|
||||
build_msg CONTAINER "Image built"
|
||||
|
||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||
"$container_engine" push "ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache:$CONTAINER_IMAGE_NAME-$arch$variant"
|
||||
|
||||
# Output to GHA
|
||||
cat <<EOF >>"$GITHUB_OUTPUT"
|
||||
version_string=$VERSION_STRING
|
||||
version_tag=$VERSION_TAG
|
||||
docker_tag=$DOCKER_TAG
|
||||
git_url=$GIT_URL
|
||||
git_branch=$GIT_BRANCH
|
||||
EOF
|
||||
fi
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
container.test() {
|
||||
local parch=${OVERRIDE_ARCH:-$(uname -m)}
|
||||
local arch
|
||||
local variant
|
||||
local platform
|
||||
|
||||
if [ "$GITHUB_ACTIONS" != "true" ]; then
|
||||
die 1 "This command is intended to be run in GitHub Actions"
|
||||
fi
|
||||
|
||||
required_commands podman
|
||||
|
||||
# Setup arch specific
|
||||
case $parch in
|
||||
"X64" | "x86_64" | "amd64")
|
||||
arch="amd64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
arch="arm64"
|
||||
variant=""
|
||||
platform="linux/$arch"
|
||||
;;
|
||||
"ARMV7" | "armhf" | "armv7l" | "armv7")
|
||||
arch="arm"
|
||||
variant="v7"
|
||||
platform="linux/$arch/$variant"
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $parch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
build_msg CONTAINER "Selected platform: $platform"
|
||||
|
||||
(
|
||||
set -e
|
||||
|
||||
podman pull "ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache:$CONTAINER_IMAGE_NAME-$arch$variant"
|
||||
|
||||
name="$CONTAINER_IMAGE_NAME-$(date +%N)"
|
||||
|
||||
podman create --name="$name" --rm --timeout=60 --network="host" \
|
||||
"ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache:$CONTAINER_IMAGE_NAME-$arch$variant" >/dev/null
|
||||
|
||||
podman start "$name" >/dev/null
|
||||
podman logs -f "$name" &
|
||||
pid_logs=$!
|
||||
|
||||
# Wait until container is ready
|
||||
sleep 5
|
||||
|
||||
curl -vf --max-time 5 "http://localhost:8080/healthz"
|
||||
|
||||
kill $pid_logs &>/dev/null || true
|
||||
podman stop "$name" >/dev/null
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
container.push() {
|
||||
# Architectures on manifest
|
||||
local release_archs=("amd64" "arm64" "armv7")
|
||||
|
||||
local archs=()
|
||||
local variants=()
|
||||
local platforms=()
|
||||
|
||||
if [ "$GITHUB_ACTIONS" != "true" ]; then
|
||||
die 1 "This command is intended to be run in GitHub Actions"
|
||||
fi
|
||||
|
||||
required_commands podman
|
||||
|
||||
for arch in "${release_archs[@]}"; do
|
||||
case $arch in
|
||||
"X64" | "x86_64" | "amd64")
|
||||
archs+=("amd64")
|
||||
variants+=("")
|
||||
platforms+=("linux/${archs[-1]}")
|
||||
;;
|
||||
"ARM64" | "aarch64" | "arm64")
|
||||
archs+=("arm64")
|
||||
variants+=("")
|
||||
platforms+=("linux/${archs[-1]}")
|
||||
;;
|
||||
"ARMV7" | "armv7" | "armhf" | "arm")
|
||||
archs+=("arm")
|
||||
variants+=("v7")
|
||||
platforms+=("linux/${archs[-1]}/${variants[-1]}")
|
||||
;;
|
||||
*)
|
||||
err_msg "Unsupported architecture; $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
(
|
||||
set -e
|
||||
|
||||
# Pull archs
|
||||
for i in "${!archs[@]}"; do
|
||||
podman pull "ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache:$CONTAINER_IMAGE_NAME-${archs[$i]}${variants[$i]}"
|
||||
done
|
||||
|
||||
# Manifest tags
|
||||
release_tags=("latest" "$DOCKER_TAG")
|
||||
|
||||
# Create manifests
|
||||
for tag in "${release_tags[@]}"; do
|
||||
if ! podman manifest exists "localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:$tag"; then
|
||||
podman manifest create "localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:$tag"
|
||||
fi
|
||||
|
||||
# Add archs to manifest
|
||||
for i in "${!archs[@]}"; do
|
||||
podman manifest add \
|
||||
"localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:$tag" \
|
||||
"containers-storage:ghcr.io/$CONTAINER_IMAGE_ORGANIZATION/cache:$CONTAINER_IMAGE_NAME-${archs[$i]}${variants[$i]}"
|
||||
done
|
||||
done
|
||||
|
||||
podman image list
|
||||
|
||||
# Remote registries
|
||||
release_registries=("ghcr.io" "docker.io")
|
||||
|
||||
# Push manifests
|
||||
for registry in "${release_registries[@]}"; do
|
||||
for tag in "${release_tags[@]}"; do
|
||||
build_msg CONTAINER "Pushing manifest $tag to $registry"
|
||||
|
||||
podman manifest push \
|
||||
"localhost/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:$tag" \
|
||||
"docker://$registry/$CONTAINER_IMAGE_ORGANIZATION/$CONTAINER_IMAGE_NAME:$tag"
|
||||
done
|
||||
done
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
# Alias
|
||||
podman.build() {
|
||||
container.build podman
|
||||
}
|
||||
|
||||
# Alias
|
||||
docker.build() {
|
||||
container.build docker
|
||||
}
|
||||
|
||||
# Alias
|
||||
docker.buildx() {
|
||||
container.build docker
|
||||
}
|
||||
71
utils/lib_sxng_data.sh
Executable file
71
utils/lib_sxng_data.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
data.help(){
|
||||
cat <<EOF
|
||||
data.:
|
||||
all : update searx/sxng_locales.py and searx/data/*
|
||||
traits : update searx/data/engine_traits.json & searx/sxng_locales.py
|
||||
useragents: update searx/data/useragents.json with the most recent versions of Firefox
|
||||
locales : update searx/data/locales.json from babel
|
||||
currencies: update searx/data/currencies.json from wikidata
|
||||
EOF
|
||||
}
|
||||
|
||||
data.all() {
|
||||
( set -e
|
||||
|
||||
pyenv.activate
|
||||
data.traits
|
||||
data.useragents
|
||||
data.locales
|
||||
|
||||
build_msg DATA "update searx/data/osm_keys_tags.json"
|
||||
pyenv.cmd python searxng_extra/update/update_osm_keys_tags.py
|
||||
build_msg DATA "update searx/data/ahmia_blacklist.txt"
|
||||
python searxng_extra/update/update_ahmia_blacklist.py
|
||||
build_msg DATA "update searx/data/wikidata_units.json"
|
||||
python searxng_extra/update/update_wikidata_units.py
|
||||
build_msg DATA "update searx/data/currencies.json"
|
||||
python searxng_extra/update/update_currencies.py
|
||||
build_msg DATA "update searx/data/external_bangs.json"
|
||||
python searxng_extra/update/update_external_bangs.py
|
||||
build_msg DATA "update searx/data/engine_descriptions.json"
|
||||
python searxng_extra/update/update_engine_descriptions.py
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
data.traits() {
|
||||
( set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/engine_traits.json"
|
||||
python searxng_extra/update/update_engine_traits.py
|
||||
build_msg ENGINES "update searx/sxng_locales.py"
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
data.useragents() {
|
||||
build_msg DATA "update searx/data/useragents.json"
|
||||
pyenv.cmd python searxng_extra/update/update_firefox_version.py
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
data.locales() {
|
||||
( set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/locales.json"
|
||||
python searxng_extra/update/update_locales.py
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
data.currencies(){
|
||||
( set -e
|
||||
pyenv.activate
|
||||
build_msg DATA "update searx/data/currencies.json"
|
||||
python searxng_extra/update/update_currencies.py
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
62
utils/lib_sxng_node.sh
Executable file
62
utils/lib_sxng_node.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
declare _Blue
|
||||
declare _creset
|
||||
|
||||
export NODE_MINIMUM_VERSION="18.17.0"
|
||||
|
||||
node.help(){
|
||||
cat <<EOF
|
||||
node.:
|
||||
env : download & install SearXNG's npm dependencies locally
|
||||
env.dev : download & install developer and CI tools
|
||||
clean : drop locally npm installations
|
||||
EOF
|
||||
}
|
||||
|
||||
nodejs.ensure() {
|
||||
if ! nvm.min_node "${NODE_MINIMUM_VERSION}"; then
|
||||
info_msg "install Node.js by NVM"
|
||||
nvm.nodejs
|
||||
fi
|
||||
}
|
||||
|
||||
node.env() {
|
||||
nodejs.ensure
|
||||
( set -e
|
||||
build_msg INSTALL "[npm] ./client/simple/package.json"
|
||||
npm --prefix client/simple install
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
node.env.dev() {
|
||||
nodejs.ensure
|
||||
build_msg INSTALL "[npm] ./package.json: developer and CI tools"
|
||||
npm install
|
||||
}
|
||||
|
||||
node.clean() {
|
||||
if ! required_commands npm 2>/dev/null; then
|
||||
build_msg CLEAN "npm is not installed / ignore npm dependencies"
|
||||
return 0
|
||||
fi
|
||||
build_msg CLEAN "themes -- locally installed npm dependencies"
|
||||
( set -e
|
||||
npm --prefix client/simple run clean \
|
||||
| prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
return 1
|
||||
fi
|
||||
)
|
||||
build_msg CLEAN "locally installed developer and CI tools"
|
||||
( set -e
|
||||
npm --prefix . run clean \
|
||||
| prefix_stdout "${_Blue}CLEAN ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
return 1
|
||||
fi
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
127
utils/lib_sxng_static.sh
Executable file
127
utils/lib_sxng_static.sh
Executable file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
|
||||
STATIC_BUILD_COMMIT="[build] /static"
|
||||
STATIC_BUILT_PATHS=(
|
||||
'searx/templates/simple/icons.html'
|
||||
'searx/static/themes/simple'
|
||||
'client/simple/package-lock.json'
|
||||
)
|
||||
|
||||
static.help(){
|
||||
cat <<EOF
|
||||
static.build.: ${STATIC_BUILD_COMMIT}
|
||||
commit : build & commit /static folder
|
||||
drop : drop last commit if it was previously done by static.build.commit
|
||||
restore : git restore of the /static folder (after themes.all)
|
||||
EOF
|
||||
}
|
||||
|
||||
is.static.build.commit() {
|
||||
|
||||
local commit_sha="$1"
|
||||
local commit_message
|
||||
local commit_files
|
||||
|
||||
# check commit message
|
||||
commit_message=$(git show -s --format=%s "${commit_sha}")
|
||||
if [ "${commit_message}" != "${STATIC_BUILD_COMMIT}" ]; then
|
||||
err_msg "expecting commit message: '${STATIC_BUILD_COMMIT}'"
|
||||
err_msg "commit message of ${commit_sha} is: '${commit_message}'"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# check all files of the commit belongs to $STATIC_BUILT_PATHS
|
||||
commit_files=$(git diff-tree --no-commit-id --name-only -r "${commit_sha}")
|
||||
for i in "${STATIC_BUILT_PATHS[@]}"; do
|
||||
# remove files of ${STATIC_BUILT_PATHS}
|
||||
commit_files=$(echo "${commit_files}" | grep -v "^${i}")
|
||||
done
|
||||
|
||||
if [ -n "${commit_files}" ]; then
|
||||
err_msg "commit ${commit_sha} contains files not a part of ${STATIC_BUILD_COMMIT}"
|
||||
echo "${commit_files}" | prefix_stdout " "
|
||||
return 2
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
static.build.drop() {
|
||||
# drop last commit if it was made by the static.build.commit command
|
||||
|
||||
local last_commit_id
|
||||
local branch
|
||||
|
||||
build_msg STATIC "drop last commit if it was previously done by static.build.commit"
|
||||
|
||||
# get only last (option -n1) local commit not in remotes
|
||||
branch="$(git branch --show-current)"
|
||||
last_commit_id="$(git log -n1 "${branch}" --pretty=format:'%h'\
|
||||
--not --exclude="${branch}" --branches --remotes)"
|
||||
|
||||
if [ -z "${last_commit_id}" ]; then
|
||||
err_msg "there are no local commits"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! is.static.build.commit "${last_commit_id}"; then
|
||||
return $?
|
||||
fi
|
||||
|
||||
build_msg STATIC "drop last commit ${last_commit_id}"
|
||||
git reset --hard HEAD~1
|
||||
}
|
||||
|
||||
static.build.commit() {
|
||||
# call the "static.build.drop" command, then "themes.all" then commit the
|
||||
# built files ($BUILT_PATHS).
|
||||
|
||||
build_msg STATIC "build & commit /static files"
|
||||
|
||||
# check for not committed files
|
||||
if [ -n "$(git diff --name-only)" ]; then
|
||||
err_msg "some files are not committed:"
|
||||
git diff --name-only | prefix_stdout " "
|
||||
return 1
|
||||
fi
|
||||
|
||||
# check for staged files
|
||||
if [ -n "$(git diff --name-only --cached)" ]; then
|
||||
err_msg "some files are staged:"
|
||||
git diff --name-only --cached | prefix_stdout " "
|
||||
return 1
|
||||
fi
|
||||
|
||||
# drop existing commit from previous build
|
||||
static.build.drop &>/dev/null
|
||||
|
||||
( set -e
|
||||
# fix & build the themes
|
||||
themes.fix
|
||||
themes.all
|
||||
|
||||
# add build files
|
||||
for built_path in "${STATIC_BUILT_PATHS[@]}"; do
|
||||
git add -v "${built_path}"
|
||||
done
|
||||
|
||||
# check if any file has been added (in case of no changes)
|
||||
if [ -z "$(git diff --name-only --cached)" ]; then
|
||||
build_msg STATIC "no changes applied / nothing to commit"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# check for modified files that are not staged
|
||||
if [ -n "$(git diff --name-only)" ]; then
|
||||
die 42 "themes.all has created files that are not in STATIC_BUILT_PATHS"
|
||||
fi
|
||||
git commit -m "${STATIC_BUILD_COMMIT}"
|
||||
)
|
||||
}
|
||||
|
||||
static.build.restore() {
|
||||
build_msg STATIC "git-restore of the built files (/static)"
|
||||
git restore --staged "${STATIC_BUILT_PATHS[@]}"
|
||||
git restore --worktree "${STATIC_BUILT_PATHS[@]}"
|
||||
}
|
||||
159
utils/lib_sxng_test.sh
Executable file
159
utils/lib_sxng_test.sh
Executable file
@@ -0,0 +1,159 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
test.help(){
|
||||
cat <<EOF
|
||||
test.:
|
||||
yamllint : lint YAML files (YAMLLINT_FILES)
|
||||
pylint : lint ./searx, ./searxng_extra and ./tests
|
||||
pyright : static type check of python sources (.dev or .ci)
|
||||
black : check black code format
|
||||
unit : run unit tests
|
||||
coverage : run unit tests with coverage
|
||||
robot : run robot test
|
||||
rst : test .rst files incl. README.rst
|
||||
clean : clean intermediate test stuff
|
||||
EOF
|
||||
}
|
||||
|
||||
if [ "$VERBOSE" = "1" ]; then
|
||||
TEST_NOSE2_VERBOSE="-vvv"
|
||||
fi
|
||||
|
||||
test.yamllint() {
|
||||
build_msg TEST "[yamllint] \$YAMLLINT_FILES"
|
||||
pyenv.cmd yamllint --strict --format parsable "${YAMLLINT_FILES[@]}"
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.pylint() {
|
||||
( set -e
|
||||
pyenv.activate
|
||||
PYLINT_OPTIONS="--rcfile .pylintrc"
|
||||
|
||||
build_msg TEST "[pylint] ./searx/engines"
|
||||
# shellcheck disable=SC2086
|
||||
pylint ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
|
||||
--additional-builtins="traits,supported_languages,language_aliases,logger,categories" \
|
||||
searx/engines
|
||||
|
||||
build_msg TEST "[pylint] ./searx ./searxng_extra ./tests"
|
||||
# shellcheck disable=SC2086
|
||||
pylint ${PYLINT_OPTIONS} ${PYLINT_VERBOSE} \
|
||||
--ignore=searx/engines \
|
||||
searx searx/searxng.msg \
|
||||
searxng_extra searxng_extra/docs_prebuild \
|
||||
tests
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.types.dev() {
|
||||
# use this pyright test for local tests in development / it suppress
|
||||
# warnings related to intentional monkey patching but gives good hints where
|
||||
# we need to work on SearXNG's typification.
|
||||
#
|
||||
# --> pyrightconfig.json
|
||||
|
||||
build_msg TEST "[pyright/types] static type check of python sources"
|
||||
build_msg TEST " --> typeCheckingMode: on"
|
||||
node.env.dev
|
||||
|
||||
build_msg TEST "[pyright/types] suppress warnings related to intentional monkey patching"
|
||||
# We run Pyright in the virtual environment because pyright executes
|
||||
# "python" to determine the Python version.
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig.json \
|
||||
| grep -E '\.py:[0-9]+:[0-9]+'\
|
||||
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\
|
||||
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\
|
||||
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
# ignore exit value from pyright
|
||||
# dump_return ${PIPESTATUS[0]}
|
||||
return 0
|
||||
}
|
||||
|
||||
test.types.ci() {
|
||||
# use this pyright test for CI / disables typeCheckingMode, needed as long
|
||||
# we do not have fixed all typification issues.
|
||||
#
|
||||
# --> pyrightconfig-ci.json
|
||||
|
||||
build_msg TEST "[pyright] static type check of python sources"
|
||||
build_msg TEST " --> typeCheckingMode: off !!!"
|
||||
node.env.dev
|
||||
|
||||
build_msg TEST "[pyright] suppress warnings related to intentional monkey patching"
|
||||
# We run Pyright in the virtual environment because pyright executes
|
||||
# "python" to determine the Python version.
|
||||
pyenv.cmd npx --no-install pyright -p pyrightconfig-ci.json \
|
||||
| grep -E '\.py:[0-9]+:[0-9]+'\
|
||||
| grep -v '/engines/.*.py.* - warning: "logger" is not defined'\
|
||||
| grep -v '/plugins/.*.py.* - error: "logger" is not defined'\
|
||||
| grep -v '/engines/.*.py.* - warning: "supported_languages" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "language_aliases" is not defined' \
|
||||
| grep -v '/engines/.*.py.* - warning: "categories" is not defined'
|
||||
# ignore exit value from pyright
|
||||
# dump_return ${PIPESTATUS[0]}
|
||||
return 0
|
||||
}
|
||||
|
||||
test.black() {
|
||||
build_msg TEST "[black] \$BLACK_TARGETS"
|
||||
pyenv.cmd black --check --diff "${BLACK_OPTIONS[@]}" "${BLACK_TARGETS[@]}"
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.unit() {
|
||||
build_msg TEST 'tests/unit'
|
||||
# shellcheck disable=SC2086
|
||||
pyenv.cmd python -m nose2 ${TEST_NOSE2_VERBOSE} -s tests/unit
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.coverage() {
|
||||
build_msg TEST 'unit test coverage'
|
||||
( set -e
|
||||
pyenv.activate
|
||||
# shellcheck disable=SC2086
|
||||
python -m nose2 ${TEST_NOSE2_VERBOSE} -C --log-capture --with-coverage --coverage searx -s tests/unit
|
||||
coverage report
|
||||
coverage html
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.robot() {
|
||||
build_msg TEST 'robot'
|
||||
gecko.driver
|
||||
PYTHONPATH=. pyenv.cmd python -m tests.robot
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.rst() {
|
||||
build_msg TEST "[reST markup] ${RST_FILES[*]}"
|
||||
|
||||
for rst in "${RST_FILES[@]}"; do
|
||||
pyenv.cmd rst2html --halt error "$rst" > /dev/null || die 42 "fix issue in $rst"
|
||||
done
|
||||
}
|
||||
|
||||
test.themes() {
|
||||
build_msg TEST 'SearXNG themes'
|
||||
themes.test
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
test.pybabel() {
|
||||
TEST_BABEL_FOLDER="build/test/pybabel"
|
||||
build_msg TEST "[extract messages] pybabel"
|
||||
mkdir -p "${TEST_BABEL_FOLDER}"
|
||||
pyenv.cmd pybabel extract -F babel.cfg -o "${TEST_BABEL_FOLDER}/messages.pot" searx
|
||||
}
|
||||
|
||||
test.clean() {
|
||||
build_msg CLEAN "test stuff"
|
||||
rm -rf geckodriver.log .coverage coverage/
|
||||
dump_return $?
|
||||
}
|
||||
36
utils/lib_sxng_themes.sh
Executable file
36
utils/lib_sxng_themes.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
themes.help(){
|
||||
cat <<EOF
|
||||
themes.:
|
||||
all : test & build all themes
|
||||
test : test all themes
|
||||
fix : fix JS & CSS (LESS)
|
||||
EOF
|
||||
}
|
||||
|
||||
themes.all() {
|
||||
( set -e
|
||||
build_msg SIMPLE "theme: run build"
|
||||
vite.simple.build
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
themes.fix() {
|
||||
( set -e
|
||||
build_msg SIMPLE "theme: fix"
|
||||
vite.simple.fix
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
themes.test() {
|
||||
( set -e
|
||||
# we run a build to test (in CI)
|
||||
build_msg SIMPLE "theme: run build (to test)"
|
||||
vite.simple.build
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
62
utils/lib_sxng_vite.sh
Normal file
62
utils/lib_sxng_vite.sh
Normal file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
declare _Blue
|
||||
declare _creset
|
||||
|
||||
vite.help(){
|
||||
cat <<EOF
|
||||
vite.: .. to be done ..
|
||||
simple.:
|
||||
build: build static files of the simple theme
|
||||
dev: start development server
|
||||
EOF
|
||||
}
|
||||
|
||||
VITE_SIMPLE_THEME="${REPO_ROOT}/client/simple"
|
||||
|
||||
# ToDo: vite server is not implemented yet / will be done in a follow up PR
|
||||
#
|
||||
# vite.simple.dev() {
|
||||
# ( set -e
|
||||
# build_msg SIMPLE "start server for FE development of: ${VITE_SIMPLE_THEME}"
|
||||
# pushd "${VITE_SIMPLE_THEME}"
|
||||
# npm install
|
||||
# npm exec -- vite
|
||||
# popd &> /dev/null
|
||||
# )
|
||||
# }
|
||||
|
||||
vite.simple.build() {
|
||||
( set -e
|
||||
templates.simple.pygments
|
||||
|
||||
node.env
|
||||
build_msg SIMPLE "run build of theme from: ${VITE_SIMPLE_THEME}"
|
||||
|
||||
pushd "${VITE_SIMPLE_THEME}"
|
||||
npm install
|
||||
npm run fix
|
||||
npm run icons.html
|
||||
npm run build
|
||||
popd &> /dev/null
|
||||
)
|
||||
}
|
||||
|
||||
vite.simple.fix() {
|
||||
( set -e
|
||||
node.env
|
||||
npm --prefix client/simple run fix
|
||||
)
|
||||
}
|
||||
|
||||
templates.simple.pygments() {
|
||||
build_msg PYGMENTS "searxng_extra/update/update_pygments.py"
|
||||
pyenv.cmd python searxng_extra/update/update_pygments.py \
|
||||
| prefix_stdout "${_Blue}PYGMENTS ${_creset} "
|
||||
if [ "${PIPESTATUS[0]}" -ne "0" ]; then
|
||||
build_msg PYGMENTS "building LESS files for pygments failed"
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
216
utils/lib_sxng_weblate.sh
Executable file
216
utils/lib_sxng_weblate.sh
Executable file
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/env bash
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
weblate.help(){
|
||||
cat <<EOF
|
||||
weblate.:
|
||||
push.translations: push translation changes from SearXNG to Weblate's counterpart
|
||||
to.translations: Update 'translations' branch with last additions from Weblate.
|
||||
EOF
|
||||
}
|
||||
|
||||
TRANSLATIONS_WORKTREE="$CACHE/translations"
|
||||
|
||||
weblate.translations.worktree() {
|
||||
|
||||
# Create git worktree ${TRANSLATIONS_WORKTREE} and checkout branch
|
||||
# 'translations' from Weblate's counterpart (weblate) of the SearXNG
|
||||
# (origin).
|
||||
#
|
||||
# remote weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
|
||||
( set -e
|
||||
if ! git remote get-url weblate 2> /dev/null; then
|
||||
git remote add weblate https://translate.codeberg.org/git/searxng/searxng/
|
||||
fi
|
||||
if [ -d "${TRANSLATIONS_WORKTREE}" ]; then
|
||||
pushd .
|
||||
cd "${TRANSLATIONS_WORKTREE}"
|
||||
git reset --hard HEAD
|
||||
git pull origin translations
|
||||
popd
|
||||
else
|
||||
mkdir -p "${TRANSLATIONS_WORKTREE}"
|
||||
git worktree add "${TRANSLATIONS_WORKTREE}" translations
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
weblate.to.translations() {
|
||||
|
||||
# Update 'translations' branch of SearXNG (origin) with last additions from
|
||||
# Weblate.
|
||||
|
||||
# 1. Check if Weblate is locked, if not die with error message
|
||||
# 2. On Weblate's counterpart (weblate), pull master and translations branch
|
||||
# from SearXNG (origin).
|
||||
# 3. Commit changes made in a Weblate object on Weblate's counterpart
|
||||
# (weblate).
|
||||
# 4. In translations worktree, merge changes of branch 'translations' from
|
||||
# remote 'weblate' and push it on branch 'translations' of 'origin'
|
||||
|
||||
( set -e
|
||||
pyenv.activate
|
||||
if [ "$(wlc lock-status)" != "locked: True" ]; then
|
||||
die 1 "weblate must be locked, currently: $(wlc lock-status)"
|
||||
fi
|
||||
# weblate: commit pending changes
|
||||
wlc pull
|
||||
wlc commit
|
||||
|
||||
# get the translations in a worktree
|
||||
weblate.translations.worktree
|
||||
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git remote update weblate
|
||||
git merge weblate/translations
|
||||
git push
|
||||
popd
|
||||
)
|
||||
dump_return $?
|
||||
}
|
||||
|
||||
weblate.translations.commit() {
|
||||
|
||||
# Update 'translations' branch of SearXNG (origin) with last additions from
|
||||
# Weblate. Copy the changes to the master branch, compile translations and
|
||||
# create a commit in the local branch (master)
|
||||
|
||||
local existing_commit_hash commit_body commit_message exitcode
|
||||
( set -e
|
||||
pyenv.activate
|
||||
# lock change on weblate
|
||||
wlc lock
|
||||
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
existing_commit_hash=$(cd "${TRANSLATIONS_WORKTREE}"; git log -n1 --pretty=format:'%h')
|
||||
|
||||
# pull weblate commits
|
||||
weblate.to.translations
|
||||
|
||||
# copy the changes to the master branch
|
||||
cp -rv --preserve=mode,timestamps "${TRANSLATIONS_WORKTREE}/searx/translations" "searx"
|
||||
|
||||
# compile translations
|
||||
build_msg BABEL 'compile translation catalogs into binary MO files'
|
||||
pybabel compile --statistics \
|
||||
-d "searx/translations"
|
||||
|
||||
# update searx/data/translation_labels.json
|
||||
data.locales
|
||||
|
||||
# git add/commit (no push)
|
||||
commit_body=$(cd "${TRANSLATIONS_WORKTREE}"; git log --pretty=format:'%h - %as - %aN <%ae>' "${existing_commit_hash}..HEAD")
|
||||
commit_message=$(echo -e "[l10n] update translations from Weblate\n\n${commit_body}")
|
||||
git add searx/translations
|
||||
git add searx/data/locales.json
|
||||
git commit -m "${commit_message}"
|
||||
)
|
||||
exitcode=$?
|
||||
( # make sure to always unlock weblate
|
||||
set -e
|
||||
pyenv.cmd wlc unlock
|
||||
)
|
||||
dump_return $exitcode
|
||||
}
|
||||
|
||||
weblate.push.translations() {
|
||||
|
||||
# Push *translation changes* from SearXNG (origin) to Weblate's counterpart
|
||||
# (weblate).
|
||||
|
||||
# In branch master of SearXNG (origin) check for meaningful changes in
|
||||
# folder 'searx/translations', commit changes on branch 'translations' and
|
||||
# at least, pull updated branches on Weblate's counterpart (weblate).
|
||||
|
||||
# 1. Create git worktree ${TRANSLATIONS_WORKTREE} and checkout branch
|
||||
# 'translations' from remote 'weblate'.
|
||||
# 2. Stop if there is no meaningful change in the 'master' branch (origin),
|
||||
# compared to the 'translations' branch (weblate), otherwise ...
|
||||
# 3. Update 'translations' branch of SearXNG (origin) with last additions
|
||||
# from Weblate.
|
||||
# 5. Notify Weblate to pull updated 'master' & 'translations' branch.
|
||||
|
||||
local messages_pot diff_messages_pot last_commit_hash last_commit_detail \
|
||||
exitcode
|
||||
messages_pot="${TRANSLATIONS_WORKTREE}/searx/translations/messages.pot"
|
||||
( set -e
|
||||
pyenv.activate
|
||||
# get translations branch in git worktree (TRANSLATIONS_WORKTREE)
|
||||
weblate.translations.worktree
|
||||
|
||||
# update messages.pot in the master branch
|
||||
build_msg BABEL 'extract messages from source files and generate POT file'
|
||||
pybabel extract -F babel.cfg \
|
||||
-o "${messages_pot}" \
|
||||
"searx/"
|
||||
|
||||
# stop if there is no meaningful change in the master branch
|
||||
diff_messages_pot=$(cd "${TRANSLATIONS_WORKTREE}";\
|
||||
git diff -- "searx/translations/messages.pot")
|
||||
if ! echo "$diff_messages_pot" | grep -qE "[\+\-](msgid|msgstr)"; then
|
||||
build_msg BABEL 'no changes detected, exiting'
|
||||
return 42
|
||||
fi
|
||||
return 0
|
||||
)
|
||||
exitcode=$?
|
||||
if [ "$exitcode" -eq 42 ]; then
|
||||
return 0
|
||||
fi
|
||||
if [ "$exitcode" -gt 0 ]; then
|
||||
return $exitcode
|
||||
fi
|
||||
(
|
||||
set -e
|
||||
pyenv.activate
|
||||
|
||||
# lock change on weblate
|
||||
# weblate may add commit(s) since the call to "weblate.translations.worktree".
|
||||
# this is not a problem because after this line, "weblate.to.translations"
|
||||
# calls again "weblate.translations.worktree" which calls "git pull"
|
||||
wlc lock
|
||||
|
||||
# save messages.pot in the translations branch for later
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git stash push
|
||||
popd
|
||||
|
||||
# merge weblate commits into the translations branch
|
||||
weblate.to.translations
|
||||
|
||||
# restore messages.pot in the translations branch
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git stash pop
|
||||
popd
|
||||
|
||||
# update messages.po files in the master branch
|
||||
build_msg BABEL 'update existing message catalogs from POT file'
|
||||
pybabel update -N \
|
||||
-i "${messages_pot}" \
|
||||
-d "${TRANSLATIONS_WORKTREE}/searx/translations"
|
||||
|
||||
# git add/commit/push
|
||||
last_commit_hash=$(git log -n1 --pretty=format:'%h')
|
||||
last_commit_detail=$(git log -n1 --pretty=format:'%h - %as - %aN <%ae>' "${last_commit_hash}")
|
||||
|
||||
pushd "${TRANSLATIONS_WORKTREE}"
|
||||
git add searx/translations
|
||||
git commit \
|
||||
-m "[translations] update messages.pot and messages.po files" \
|
||||
-m "From ${last_commit_detail}"
|
||||
git push
|
||||
popd
|
||||
|
||||
# notify weblate to pull updated master & translations branch
|
||||
wlc pull
|
||||
)
|
||||
exitcode=$?
|
||||
( # make sure to always unlock weblate
|
||||
set -e
|
||||
pyenv.activate
|
||||
wlc unlock
|
||||
)
|
||||
dump_return $exitcode
|
||||
}
|
||||
69
utils/lxc-searxng.env
Normal file
69
utils/lxc-searxng.env
Normal file
@@ -0,0 +1,69 @@
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
# shellcheck shell=bash
|
||||
|
||||
# This file is a setup of a LXC suite. It is sourced from different context, do
|
||||
# not manipulate the environment directly, implement functions and manipulate
|
||||
# environment only in subshells.
|
||||
|
||||
lxc_set_suite_env() {
|
||||
|
||||
export LXC_SUITE_NAME="searxng"
|
||||
|
||||
# name of https://images.linuxcontainers.org
|
||||
export LINUXCONTAINERS_ORG_NAME="${LINUXCONTAINERS_ORG_NAME:-images}"
|
||||
export LXC_HOST_PREFIX="${LXC_SUITE_NAME:-searx}"
|
||||
export LXC_SUITE=(
|
||||
|
||||
# end of standard support see https://wiki.ubuntu.com/Releases
|
||||
"$LINUXCONTAINERS_ORG_NAME:ubuntu/20.04" "ubu2004" # LTS EOSS April 2025
|
||||
"$LINUXCONTAINERS_ORG_NAME:ubuntu/22.04" "ubu2204" # LTS EOSS April 2027
|
||||
|
||||
# EOL see https://fedoraproject.org/wiki/Releases
|
||||
"$LINUXCONTAINERS_ORG_NAME:fedora/35" "fedora35"
|
||||
|
||||
# rolling releases see https://www.archlinux.org/releng/releases/
|
||||
"$LINUXCONTAINERS_ORG_NAME:archlinux" "archlinux"
|
||||
)
|
||||
}
|
||||
|
||||
lxc_suite_install_info() {
|
||||
(
|
||||
lxc_set_suite_env
|
||||
cat <<EOF
|
||||
LXC suite: ${LXC_SUITE_NAME}
|
||||
Suite includes installation of SearXNG
|
||||
images: ${LOCAL_IMAGES[*]}
|
||||
containers: ${CONTAINERS[*]}
|
||||
EOF
|
||||
)
|
||||
}
|
||||
|
||||
lxc_suite_install() {
|
||||
(
|
||||
lxc_set_suite_env
|
||||
FORCE_TIMEOUT=0 "${LXC_REPO_ROOT}/utils/searxng.sh" install all
|
||||
rst_title "Suite installation finished ($(hostname))" part
|
||||
if ask_yn "Developer install? (wraps source from HOST into the running instance)" Yn; then
|
||||
"${LXC_REPO_ROOT}/utils/searxng.sh" searxng.install.link_src "$(pwd)"
|
||||
fi
|
||||
lxc_suite_info
|
||||
echo
|
||||
)
|
||||
}
|
||||
|
||||
lxc_suite_info() {
|
||||
(
|
||||
lxc_set_suite_env
|
||||
for ip in $(global_IPs) ; do
|
||||
if [[ $ip =~ .*:.* ]]; then
|
||||
info_msg "(${ip%|*}) IPv6: http://[${ip#*|}]"
|
||||
else
|
||||
# IPv4:
|
||||
# shellcheck disable=SC2034,SC2031
|
||||
info_msg "(${ip%|*}) docs-live: http://${ip#*|}:8080/"
|
||||
fi
|
||||
done
|
||||
"${LXC_REPO_ROOT}/utils/searxng.sh" searxng.instance.env
|
||||
)
|
||||
}
|
||||
573
utils/lxc.sh
Executable file
573
utils/lxc.sh
Executable file
@@ -0,0 +1,573 @@
|
||||
#!/usr/bin/env bash
|
||||
# -*- coding: utf-8; mode: sh indent-tabs-mode: nil -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
# shellcheck source=utils/lib.sh
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/lib.sh"
|
||||
|
||||
# load environment of the LXC suite
|
||||
LXC_ENV="${LXC_ENV:-${REPO_ROOT}/utils/lxc-searxng.env}"
|
||||
source "$LXC_ENV"
|
||||
lxc_set_suite_env
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
# config
|
||||
# ----------------------------------------------------------------------------
|
||||
#
|
||||
# read also:
|
||||
# - https://lxd.readthedocs.io/en/latest/
|
||||
|
||||
LXC_HOST_PREFIX="${LXC_HOST_PREFIX:-test}"
|
||||
|
||||
# Location in the container where all folders from HOST are mounted
|
||||
LXC_SHARE_FOLDER="/share"
|
||||
LXC_REPO_ROOT="${LXC_SHARE_FOLDER}/$(basename "${REPO_ROOT}")"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
ubu2004_boilerplate="
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
apt-get update -y
|
||||
apt-get upgrade -y
|
||||
apt-get install -y git curl wget
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
ubu2204_boilerplate="$ubu2004_boilerplate"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
archlinux_boilerplate="
|
||||
pacman --noprogressbar -Syu --noconfirm
|
||||
pacman --noprogressbar -S --noconfirm inetutils git curl wget sudo
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
fedora35_boilerplate="
|
||||
dnf update -y
|
||||
dnf install -y git curl wget hostname
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
centos7_boilerplate="
|
||||
yum update -y
|
||||
yum install -y git curl wget hostname sudo which
|
||||
echo 'Set disable_coredump false' >> /etc/sudo.conf
|
||||
"
|
||||
|
||||
REMOTE_IMAGES=()
|
||||
CONTAINERS=()
|
||||
LOCAL_IMAGES=()
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
REMOTE_IMAGES=("${REMOTE_IMAGES[@]}" "${LXC_SUITE[i]}")
|
||||
CONTAINERS=("${CONTAINERS[@]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}")
|
||||
LOCAL_IMAGES=("${LOCAL_IMAGES[@]}" "${LXC_SUITE[i+1]}")
|
||||
done
|
||||
|
||||
HOST_USER="${SUDO_USER:-$USER}"
|
||||
HOST_USER_ID=$(id -u "${HOST_USER}")
|
||||
HOST_GROUP_ID=$(id -g "${HOST_USER}")
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
usage() {
|
||||
# ----------------------------------------------------------------------------
|
||||
_cmd="$(basename "$0")"
|
||||
cat <<EOF
|
||||
usage::
|
||||
$_cmd build [containers|<name>]
|
||||
$_cmd copy [images]
|
||||
$_cmd remove [containers|<name>|images]
|
||||
$_cmd [start|stop] [containers|<name>]
|
||||
$_cmd show [images|suite|info|config [<name>]]
|
||||
$_cmd cmd [--|<name>] '...'
|
||||
$_cmd install [suite|base [<name>]]
|
||||
|
||||
build
|
||||
:containers: build, launch all containers and 'install base' packages
|
||||
:<name>: build, launch container <name> and 'install base' packages
|
||||
copy:
|
||||
:images: copy remote images of the suite into local storage
|
||||
remove
|
||||
:containers: delete all 'containers' or only <container-name>
|
||||
:images: delete local images of the suite
|
||||
start/stop
|
||||
:containers: start/stop all 'containers' from the suite
|
||||
:<name>: start/stop container <name> from suite
|
||||
show
|
||||
:info: show info of all (or <name>) containers from LXC suite
|
||||
:config: show config of all (or <name>) containers from the LXC suite
|
||||
:suite: show services of all (or <name>) containers from the LXC suite
|
||||
:images: show information of local images
|
||||
cmd
|
||||
use single quotes to evaluate in container's bash, e.g.: 'echo \$(hostname)'
|
||||
-- run command '...' in all containers of the LXC suite
|
||||
:<name>: run command '...' in container <name>
|
||||
install
|
||||
:base: prepare LXC; install basic packages
|
||||
:suite: install LXC ${LXC_SUITE_NAME} suite into all (or <name>) containers
|
||||
|
||||
EOF
|
||||
usage_containers
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
}
|
||||
|
||||
usage_containers() {
|
||||
lxc_suite_install_info
|
||||
[ -n "${1+x}" ] && err_msg "$1"
|
||||
}
|
||||
|
||||
lxd_info() {
|
||||
|
||||
cat <<EOF
|
||||
|
||||
LXD is needed, to install run::
|
||||
|
||||
snap install lxd
|
||||
lxd init --auto
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
local exit_val
|
||||
local _usage="unknown or missing $1 command $2"
|
||||
|
||||
lxc_distro_setup
|
||||
|
||||
# don't check prerequisite when in recursion
|
||||
if [[ ! $1 == __* ]] && [[ ! $1 == --help ]]; then
|
||||
if ! in_container; then
|
||||
! required_commands lxc && lxd_info && exit 42
|
||||
fi
|
||||
[[ -z $LXC_SUITE ]] && err_msg "missing LXC_SUITE" && exit 42
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
--getenv) var="$2"; echo "${!var}"; exit 0;;
|
||||
-h|--help) usage; exit 0;;
|
||||
|
||||
build)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
${LXC_HOST_PREFIX}-*) build_container "$2" ;;
|
||||
''|--|containers) build_all_containers ;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
copy)
|
||||
case $2 in
|
||||
''|images) lxc_copy_images_locally;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
remove)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) remove_containers ;;
|
||||
images) lxc_delete_images_locally ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && warn_msg "container not yet exists: $2" && exit 0
|
||||
if ask_yn "Do you really want to delete container $2"; then
|
||||
lxc_delete_container "$2"
|
||||
fi
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
start|stop)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
''|--|containers) lxc_cmd "$1" ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$2" && usage_containers "unknown container: $2" && exit 42
|
||||
info_msg "lxc $1 $2"
|
||||
lxc "$1" "$2" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
;;
|
||||
*) usage "unknown or missing container <name> $2"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
show)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
suite)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
lxc exec -t "$3" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}$3${_creset}] "
|
||||
;;
|
||||
*) show_suite;;
|
||||
esac
|
||||
;;
|
||||
images) show_images ;;
|
||||
config)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc config show "$3" | prefix_stdout "[${_BBlue}${3}${_creset}] "
|
||||
;;
|
||||
*)
|
||||
rst_title "container configurations"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo
|
||||
lxc_cmd config show
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
info)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc info "$3" | prefix_stdout "[${_BBlue}${3}${_creset}] "
|
||||
;;
|
||||
*)
|
||||
rst_title "container info"
|
||||
echo
|
||||
lxc_cmd info
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42;;
|
||||
esac
|
||||
;;
|
||||
__show)
|
||||
# wrapped show commands, called once in each container
|
||||
case $2 in
|
||||
suite) lxc_suite_info ;;
|
||||
esac
|
||||
;;
|
||||
cmd)
|
||||
sudo_or_exit
|
||||
shift
|
||||
case $1 in
|
||||
--) shift; lxc_exec "$@" ;;
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$1" && usage_containers "unknown container: $1" && exit 42
|
||||
local name=$1
|
||||
shift
|
||||
lxc_exec_cmd "${name}" "$@"
|
||||
;;
|
||||
*) usage_containers "unknown container: $1" && exit 42
|
||||
esac
|
||||
;;
|
||||
install)
|
||||
sudo_or_exit
|
||||
case $2 in
|
||||
suite|base)
|
||||
case $3 in
|
||||
${LXC_HOST_PREFIX}-*)
|
||||
! lxc_exists "$3" && usage_containers "unknown container: $3" && exit 42
|
||||
lxc_exec_cmd "$3" "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2"
|
||||
;;
|
||||
''|--) lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install "$2" ;;
|
||||
*) usage_containers "unknown container: $3" && exit 42
|
||||
esac
|
||||
;;
|
||||
*) usage "$_usage"; exit 42 ;;
|
||||
esac
|
||||
;;
|
||||
__install)
|
||||
# wrapped install commands, called once in each container
|
||||
# shellcheck disable=SC2119
|
||||
case $2 in
|
||||
suite) lxc_suite_install ;;
|
||||
base) FORCE_TIMEOUT=0 lxc_install_base_packages ;;
|
||||
esac
|
||||
;;
|
||||
doc)
|
||||
echo
|
||||
echo ".. generic utils/lxc.sh documentation"
|
||||
;;
|
||||
-*) usage "unknown option $1"; exit 42;;
|
||||
*) usage "unknown or missing command $1"; exit 42;;
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
build_all_containers() {
|
||||
rst_title "Build all LXC containers of suite"
|
||||
echo
|
||||
usage_containers
|
||||
lxc_copy_images_locally
|
||||
lxc_init_all_containers
|
||||
lxc_config_all_containers
|
||||
lxc_boilerplate_all_containers
|
||||
rst_title "install LXC base packages" section
|
||||
echo
|
||||
lxc_exec "${LXC_REPO_ROOT}/utils/lxc.sh" __install base
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX"
|
||||
}
|
||||
|
||||
build_container() {
|
||||
rst_title "Build container $1"
|
||||
|
||||
local remote_image
|
||||
local container
|
||||
local image
|
||||
local boilerplate_script
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
if [ "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}" = "$1" ]; then
|
||||
remote_image="${LXC_SUITE[i]}"
|
||||
container="${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
image="${LXC_SUITE[i+1]}"
|
||||
boilerplate_script="${image}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo
|
||||
if [ -z "$container" ]; then
|
||||
err_msg "container $1 unknown"
|
||||
usage_containers
|
||||
return 42
|
||||
fi
|
||||
lxc_image_copy "${remote_image}" "${image}"
|
||||
rst_title "init container" section
|
||||
lxc_init_container "${image}" "${container}"
|
||||
rst_title "configure container" section
|
||||
lxc_config_container "${container}"
|
||||
rst_title "run LXC boilerplate scripts" section
|
||||
lxc_install_boilerplate "${container}" "$boilerplate_script"
|
||||
echo
|
||||
rst_title "install LXC base packages" section
|
||||
lxc_exec_cmd "${container}" "${LXC_REPO_ROOT}/utils/lxc.sh" __install base \
|
||||
| prefix_stdout "[${_BBlue}${container}${_creset}] "
|
||||
echo
|
||||
lxc list "$container"
|
||||
}
|
||||
|
||||
remove_containers() {
|
||||
rst_title "Remove all LXC containers of suite"
|
||||
rst_para "existing containers matching ${_BGreen}$LXC_HOST_PREFIX-*${_creset}"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo -en "\\n${_BRed}LXC containers to delete::${_creset}\\n\\n ${CONTAINERS[*]}\\n" | $FMT
|
||||
local default=Ny
|
||||
[[ $FORCE_TIMEOUT = 0 ]] && default=Yn
|
||||
if ask_yn "Do you really want to delete these containers" $default; then
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
lxc_delete_container "$i"
|
||||
done
|
||||
fi
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
}
|
||||
|
||||
# images
|
||||
# ------
|
||||
|
||||
lxc_copy_images_locally() {
|
||||
rst_title "copy images" section
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_image_copy "${LXC_SUITE[i]}" "${LXC_SUITE[i+1]}"
|
||||
done
|
||||
# lxc image list local: && wait_key
|
||||
}
|
||||
|
||||
lxc_delete_images_locally() {
|
||||
rst_title "Delete LXC images"
|
||||
rst_para "local existing images"
|
||||
echo
|
||||
lxc image list local:
|
||||
echo -en "\\n${_BRed}LXC images to delete::${_creset}\\n\\n ${LOCAL_IMAGES[*]}\\n"
|
||||
if ask_yn "Do you really want to delete these images"; then
|
||||
for i in "${LOCAL_IMAGES[@]}"; do
|
||||
lxc_delete_local_image "$i"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(lxc image list --format csv | grep '^,' | sed 's/,\([^,]*\).*$/\1/'); do
|
||||
if ask_yn "Image $i has no alias, do you want to delete the image?" Yn; then
|
||||
lxc_delete_local_image "$i"
|
||||
fi
|
||||
done
|
||||
|
||||
echo
|
||||
lxc image list local:
|
||||
}
|
||||
|
||||
show_images(){
|
||||
rst_title "local images"
|
||||
echo
|
||||
lxc image list local:
|
||||
echo -en "\\n${_Green}LXC suite images::${_creset}\\n\\n ${LOCAL_IMAGES[*]}\\n"
|
||||
wait_key
|
||||
for i in "${LOCAL_IMAGES[@]}"; do
|
||||
if lxc_image_exists "$i"; then
|
||||
info_msg "lxc image info ${_BBlue}${i}${_creset}"
|
||||
lxc image info "$i" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
else
|
||||
warn_msg "image ${_BBlue}$i${_creset} does not yet exists"
|
||||
fi
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
|
||||
# container
|
||||
# ---------
|
||||
|
||||
show_suite(){
|
||||
rst_title "LXC suite ($LXC_HOST_PREFIX-*)"
|
||||
echo
|
||||
lxc list "$LXC_HOST_PREFIX-"
|
||||
echo
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
lxc exec -t "${i}" -- "${LXC_REPO_ROOT}/utils/lxc.sh" __show suite \
|
||||
| prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
echo
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_cmd() {
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
info_msg "lxc $* $i"
|
||||
lxc "$@" "$i" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_exec_cmd() {
|
||||
local name="$1"
|
||||
shift
|
||||
exit_val=
|
||||
info_msg "[${_BBlue}${name}${_creset}] ${_BGreen}${*}${_creset}"
|
||||
lxc exec -t --cwd "${LXC_REPO_ROOT}" "${name}" -- bash -c "$*"
|
||||
exit_val=$?
|
||||
if [[ $exit_val -ne 0 ]]; then
|
||||
warn_msg "[${_BBlue}${name}${_creset}] exit code (${_BRed}${exit_val}${_creset}) from ${_BGreen}${*}${_creset}"
|
||||
else
|
||||
info_msg "[${_BBlue}${name}${_creset}] exit code (${exit_val}) from ${_BGreen}${*}${_creset}"
|
||||
fi
|
||||
}
|
||||
|
||||
lxc_exec() {
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
if ! lxc_exists "$i"; then
|
||||
warn_msg "container ${_BBlue}$i${_creset} does not yet exists"
|
||||
else
|
||||
lxc_exec_cmd "${i}" "$@" | prefix_stdout "[${_BBlue}${i}${_creset}] "
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_init_all_containers() {
|
||||
rst_title "init all containers" section
|
||||
|
||||
local image_name
|
||||
local container_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
lxc_init_container "${LXC_SUITE[i+1]}" "${LXC_HOST_PREFIX}-${LXC_SUITE[i+1]}"
|
||||
done
|
||||
}
|
||||
|
||||
lxc_config_all_containers() {
|
||||
rst_title "configure all containers" section
|
||||
|
||||
for i in "${CONTAINERS[@]}"; do
|
||||
lxc_config_container "${i}"
|
||||
done
|
||||
}
|
||||
|
||||
lxc_config_container() {
|
||||
info_msg "[${_BBlue}$1${_creset}] configure container ..."
|
||||
|
||||
info_msg "[${_BBlue}$1${_creset}] map uid/gid from host to container"
|
||||
# https://lxd.readthedocs.io/en/latest/userns-idmap/#custom-idmaps
|
||||
echo -e -n "uid $HOST_USER_ID 0\\ngid $HOST_GROUP_ID 0"\
|
||||
| lxc config set "$1" raw.idmap -
|
||||
|
||||
info_msg "[${_BBlue}$1${_creset}] share ${REPO_ROOT} (repo_share) from HOST into container"
|
||||
# https://lxd.readthedocs.io/en/latest/instances/#type-disk
|
||||
lxc config device add "$1" repo_share disk \
|
||||
source="${REPO_ROOT}" \
|
||||
path="${LXC_REPO_ROOT}" &>/dev/null
|
||||
# lxc config show "$1" && wait_key
|
||||
}
|
||||
|
||||
lxc_boilerplate_all_containers() {
|
||||
rst_title "run LXC boilerplate scripts" section
|
||||
|
||||
local boilerplate_script
|
||||
local image_name
|
||||
|
||||
for ((i=0; i<${#LXC_SUITE[@]}; i+=2)); do
|
||||
|
||||
image_name="${LXC_SUITE[i+1]}"
|
||||
boilerplate_script="${image_name}_boilerplate"
|
||||
boilerplate_script="${!boilerplate_script}"
|
||||
|
||||
lxc_install_boilerplate "${LXC_HOST_PREFIX}-${image_name}" "$boilerplate_script"
|
||||
|
||||
if [[ -z "${boilerplate_script}" ]]; then
|
||||
err_msg "[${_BBlue}${container_name}${_creset}] no boilerplate for image '${image_name}'"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
lxc_install_boilerplate() {
|
||||
|
||||
# usage: lxc_install_boilerplate <container-name> <string: shell commands ..>
|
||||
#
|
||||
# usage: lxc_install_boilerplate searx-archlinux "${archlinux_boilerplate}"
|
||||
|
||||
local container_name="$1"
|
||||
local boilerplate_script="$2"
|
||||
|
||||
info_msg "[${_BBlue}${container_name}${_creset}] init .."
|
||||
if lxc start -q "${container_name}" &>/dev/null; then
|
||||
sleep 5 # guest needs some time to come up and get an IP
|
||||
fi
|
||||
if ! check_connectivity "${container_name}"; then
|
||||
die 42 "Container ${container_name} has no internet connectivity!"
|
||||
fi
|
||||
lxc_init_container_env "${container_name}"
|
||||
info_msg "[${_BBlue}${container_name}${_creset}] install /.lxcenv.mk .."
|
||||
cat <<EOF | lxc exec "${container_name}" -- bash | prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
rm -f "/.lxcenv.mk"
|
||||
ln -s "${LXC_REPO_ROOT}/utils/makefile.lxc" "/.lxcenv.mk"
|
||||
ls -l "/.lxcenv.mk"
|
||||
EOF
|
||||
|
||||
info_msg "[${_BBlue}${container_name}${_creset}] run LXC boilerplate scripts .."
|
||||
if lxc start -q "${container_name}" &>/dev/null; then
|
||||
sleep 5 # guest needs some time to come up and get an IP
|
||||
fi
|
||||
if [[ -n "${boilerplate_script}" ]]; then
|
||||
echo "${boilerplate_script}" \
|
||||
| lxc exec "${container_name}" -- bash \
|
||||
| prefix_stdout "[${_BBlue}${container_name}${_creset}] "
|
||||
fi
|
||||
}
|
||||
|
||||
check_connectivity() {
|
||||
local ret_val=0
|
||||
info_msg "check internet connectivity ..."
|
||||
if ! lxc exec "${1}" -- ping -c 1 9.9.9.9 &>/dev/null; then
|
||||
ret_val=1
|
||||
err_msg "no internet connectivity!"
|
||||
info_msg "Most often the connectivity is blocked by a docker installation:"
|
||||
info_msg "Whenever docker is started (reboot) it sets the iptables policy "
|
||||
info_msg "for the FORWARD chain to DROP, see:"
|
||||
info_msg " https://docs.searxng.org/utils/lxc.sh.html#internet-connectivity-docker"
|
||||
iptables-save | grep ":FORWARD"
|
||||
fi
|
||||
return $ret_val
|
||||
}
|
||||
|
||||
# ----------------------------------------------------------------------------
|
||||
main "$@"
|
||||
# ----------------------------------------------------------------------------
|
||||
103
utils/makefile.include
Normal file
103
utils/makefile.include
Normal file
@@ -0,0 +1,103 @@
|
||||
# -*- coding: utf-8; mode: makefile-gmake -*-
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
|
||||
ifeq (,$(wildcard /.lxcenv.mk))
|
||||
PHONY += lxc-activate lxc-purge
|
||||
lxc-activate:
|
||||
@$(MAKE) -s -f "$$(dirname $(abspath $(lastword $(MAKEFILE_LIST))))/makefile.lxc" lxc-activate
|
||||
lxc-purge:
|
||||
$(Q)rm -rf ./lxc-env
|
||||
else
|
||||
include /.lxcenv.mk
|
||||
endif
|
||||
|
||||
PHONY += make-help
|
||||
ifeq (,$(wildcard /.lxcenv.mk))
|
||||
make-help:
|
||||
else
|
||||
make-help: lxc-help
|
||||
endif
|
||||
@echo 'options:'
|
||||
@echo ' make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
|
||||
@echo ' make V=2 [targets] 2 => give reason for rebuild of target'
|
||||
|
||||
ifeq ("$(origin V)", "command line")
|
||||
VERBOSE = $(V)
|
||||
endif
|
||||
ifndef VERBOSE
|
||||
VERBOSE = 0
|
||||
endif
|
||||
|
||||
export VERBOSE
|
||||
|
||||
ifeq ($(VERBOSE),1)
|
||||
quiet =
|
||||
Q =
|
||||
else
|
||||
quiet=quiet_
|
||||
Q = @
|
||||
endif
|
||||
|
||||
# stolen from linux/scripts/Kbuild.include
|
||||
#
|
||||
|
||||
# Convenient variables
|
||||
squote := '
|
||||
#' this comment is only for emacs highlighting
|
||||
|
||||
# Find any prerequisites that is newer than target or that does not exist.
|
||||
# PHONY targets skipped in both cases.
|
||||
any-prereq = $(filter-out $(PHONY),$?) $(filter-out $(PHONY) $(wildcard $^),$^)
|
||||
#
|
||||
###
|
||||
# why - tell why a a target got build
|
||||
# enabled by make V=2
|
||||
# Output (listed in the order they are checked):
|
||||
# (1) - due to target is PHONY
|
||||
# (2) - due to target missing
|
||||
# (3) - due to: file1.h file2.h
|
||||
# (4) - due to command line change
|
||||
# (5) - due to missing .cmd file
|
||||
# (6) - due to target not in $(targets)
|
||||
# (1) PHONY targets are always build
|
||||
# (2) No target, so we better build it
|
||||
# (3) Prerequisite is newer than target
|
||||
# (4) The command line stored in the file named dir/.target.cmd
|
||||
# differed from actual command line. This happens when compiler
|
||||
# options changes
|
||||
# (5) No dir/.target.cmd file (used to store command line)
|
||||
# (6) No dir/.target.cmd file and target not listed in $(targets)
|
||||
# This is a good hint that there is a bug in the kbuild file
|
||||
ifeq ($(VERBOSE),2)
|
||||
why = \
|
||||
$(if $(filter $@, $(PHONY)),- due to target is PHONY, \
|
||||
$(if $(wildcard $@), \
|
||||
$(if $(strip $(any-prereq)),- due to: $(any-prereq), \
|
||||
$(if $(arg-check), \
|
||||
$(if $(cmd_$@),- due to command line change, \
|
||||
$(if $(filter $@, $(targets)), \
|
||||
- due to missing .cmd file, \
|
||||
- due to $(notdir $@) not in $$(targets) \
|
||||
) \
|
||||
) \
|
||||
) \
|
||||
), \
|
||||
- due to target missing \
|
||||
) \
|
||||
)
|
||||
|
||||
echo-why = $(call escsq, $(strip $(why)))
|
||||
endif
|
||||
#
|
||||
###
|
||||
# Escape single quote for use in echo statements
|
||||
escsq = $(subst $(squote),'\$(squote)',$1)
|
||||
#
|
||||
# echo command.
|
||||
# Short version is used, if $(quiet) equals `quiet_', otherwise full one.
|
||||
echo-cmd = $(if $($(quiet)cmd_$(1)),echo '$(call escsq,$($(quiet)cmd_$(1)))$(echo-why)';)
|
||||
#
|
||||
# printing commands
|
||||
cmd = @$(echo-cmd) $(cmd_$(1))
|
||||
|
||||
.PHONY: $(PHONY)
|
||||
32
utils/makefile.lxc
Normal file
32
utils/makefile.lxc
Normal file
@@ -0,0 +1,32 @@
|
||||
# -*- coding: utf-8; mode: makefile-gmake -*-
|
||||
#
|
||||
# LXC environment
|
||||
# ===============
|
||||
#
|
||||
# To activate/deactivate LXC makefile environment in a container, set/unset link
|
||||
# from root '/.lxcenv.mk' to *this* file::
|
||||
#
|
||||
# sudo make ./utils/makefile.lxc lxc-activate
|
||||
# sudo make ./utils/makefile.lxc lxc-deactivate
|
||||
|
||||
LXC_ENV_FOLDER=lxc-env/$(shell hostname)/
|
||||
|
||||
lxc-help::
|
||||
@echo ' LXC: running in container LXC_ENV_FOLDER=$(LXC_ENV_FOLDER)'
|
||||
|
||||
# If not activated, serve target 'lxc-activate' ..
|
||||
ifeq (,$(wildcard /.lxcenv.mk))
|
||||
PHONY += lxc-activate
|
||||
lxc-activate:
|
||||
ln -s "$(abspath $(lastword $(MAKEFILE_LIST)))" "/.lxcenv.mk"
|
||||
else
|
||||
# .. and if activated, serve target 'lxc-deactivate'.
|
||||
PHONY += lxc-deactivate
|
||||
lxc-deactivate:
|
||||
rm /.lxcenv.mk
|
||||
$(LXC_ENV_FOLDER):
|
||||
$(Q)mkdir -p $(LXC_ENV_FOLDER)
|
||||
$(Q)echo placeholder > $(LXC_ENV_FOLDER).placeholder
|
||||
endif
|
||||
|
||||
.PHONY: $(PHONY)
|
||||
1027
utils/searxng.sh
Executable file
1027
utils/searxng.sh
Executable file
File diff suppressed because it is too large
Load Diff
41
utils/searxng_check.py
Normal file
41
utils/searxng_check.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
"""Implement some checks in the active installation
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import warnings
|
||||
from pathlib import Path
|
||||
|
||||
repo_root = Path(__file__).resolve().parent.parent
|
||||
|
||||
LOG_FORMAT_DEBUG = '%(levelname)-7s %(name)-30.30s: %(message)s'
|
||||
logging.basicConfig(level=logging.getLevelName('DEBUG'), format=LOG_FORMAT_DEBUG)
|
||||
os.environ['SEARXNG_DEBUG'] = '1'
|
||||
|
||||
# from here on implement the checks of the installation
|
||||
|
||||
import searx
|
||||
|
||||
OLD_SETTING = '/etc/searx/settings.yml'
|
||||
|
||||
if os.path.isfile(OLD_SETTING):
|
||||
msg = (
|
||||
'%s is no longer valid, move setting to %s' % (
|
||||
OLD_SETTING,
|
||||
os.environ.get('SEARXNG_SETTINGS_PATH', '/etc/searxng/settings.yml')
|
||||
))
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
|
||||
OLD_BRAND_ENV = repo_root / 'utils' / 'brand.env'
|
||||
|
||||
if os.path.isfile(OLD_BRAND_ENV):
|
||||
msg = ('%s is no longer needed, remove the file' % (OLD_BRAND_ENV))
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
|
||||
from searx import redisdb, get_setting
|
||||
|
||||
if not redisdb.initialize():
|
||||
warnings.warn("can't connect to redis DB at: %s" % get_setting('redis.url'), RuntimeWarning, stacklevel=2)
|
||||
warnings.warn("--> no bot protection without redis DB", RuntimeWarning, stacklevel=2)
|
||||
1
utils/templates/etc/apache2
Symbolic link
1
utils/templates/etc/apache2
Symbolic link
@@ -0,0 +1 @@
|
||||
httpd
|
||||
41
utils/templates/etc/httpd/sites-available/searxng.conf
Normal file
41
utils/templates/etc/httpd/sites-available/searxng.conf
Normal file
@@ -0,0 +1,41 @@
|
||||
# -*- coding: utf-8; mode: apache -*-
|
||||
|
||||
LoadModule ssl_module ${APACHE_MODULES}/mod_ssl.so
|
||||
LoadModule headers_module ${APACHE_MODULES}/mod_headers.so
|
||||
LoadModule proxy_module ${APACHE_MODULES}/mod_proxy.so
|
||||
LoadModule proxy_http_module ${APACHE_MODULES}/mod_proxy_http.so
|
||||
# LoadModule setenvif_module ${APACHE_MODULES}/mod_setenvif.so
|
||||
#
|
||||
# SetEnvIf Request_URI "${SEARXNG_URL_PATH}" dontlog
|
||||
# CustomLog /dev/null combined env=dontlog
|
||||
|
||||
<Location ${SEARXNG_URL_PATH}>
|
||||
|
||||
Require all granted
|
||||
Order deny,allow
|
||||
Deny from all
|
||||
# Allow from fd00::/8 192.168.0.0/16 fe80::/10 127.0.0.0/8 ::1
|
||||
Allow from all
|
||||
|
||||
# add the trailing slash
|
||||
RedirectMatch 308 ${SEARXNG_URL_PATH}\$ ${SEARXNG_URL_PATH}/
|
||||
|
||||
ProxyPreserveHost On
|
||||
ProxyPass http://${SEARXNG_INTERNAL_HTTP}
|
||||
|
||||
# see flaskfix.py
|
||||
RequestHeader set X-Scheme %{REQUEST_SCHEME}s
|
||||
RequestHeader set X-Script-Name ${SEARXNG_URL_PATH}
|
||||
|
||||
# see limiter.py
|
||||
RequestHeader set X-Real-IP %{REMOTE_ADDR}s
|
||||
RequestHeader append X-Forwarded-For %{REMOTE_ADDR}s
|
||||
|
||||
</Location>
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
# Alias ${SEARXNG_URL_PATH}/static/ ${SEARXNG_STATIC}/
|
||||
@@ -0,0 +1,41 @@
|
||||
# -*- coding: utf-8; mode: apache -*-
|
||||
|
||||
LoadModule ssl_module ${APACHE_MODULES}/mod_ssl.so
|
||||
LoadModule headers_module ${APACHE_MODULES}/mod_headers.so
|
||||
LoadModule proxy_module ${APACHE_MODULES}/mod_proxy.so
|
||||
LoadModule proxy_uwsgi_module ${APACHE_MODULES}/mod_proxy_uwsgi.so
|
||||
# LoadModule setenvif_module ${APACHE_MODULES}/mod_setenvif.so
|
||||
#
|
||||
# SetEnvIf Request_URI "${SEARXNG_URL_PATH}" dontlog
|
||||
# CustomLog /dev/null combined env=dontlog
|
||||
|
||||
<Location ${SEARXNG_URL_PATH}>
|
||||
|
||||
Require all granted
|
||||
Order deny,allow
|
||||
Deny from all
|
||||
# Allow from fd00::/8 192.168.0.0/16 fe80::/10 127.0.0.0/8 ::1
|
||||
Allow from all
|
||||
|
||||
# add the trailing slash
|
||||
RedirectMatch 308 ${SEARXNG_URL_PATH}\$ ${SEARXNG_URL_PATH}/
|
||||
|
||||
ProxyPreserveHost On
|
||||
ProxyPass unix:${SEARXNG_UWSGI_SOCKET}|uwsgi://uwsgi-uds-searxng/
|
||||
|
||||
# see flaskfix.py
|
||||
RequestHeader set X-Scheme %{REQUEST_SCHEME}s
|
||||
RequestHeader set X-Script-Name ${SEARXNG_URL_PATH}
|
||||
|
||||
# see limiter.py
|
||||
RequestHeader set X-Real-IP %{REMOTE_ADDR}s
|
||||
RequestHeader append X-Forwarded-For %{REMOTE_ADDR}s
|
||||
|
||||
</Location>
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
# Alias ${SEARXNG_URL_PATH}/static/ ${SEARXNG_STATIC}/
|
||||
@@ -0,0 +1,29 @@
|
||||
location ${SEARXNG_URL_PATH} {
|
||||
|
||||
proxy_pass http://${SEARXNG_INTERNAL_HTTP};
|
||||
|
||||
proxy_set_header Host \$host;
|
||||
proxy_set_header Connection \$http_connection;
|
||||
|
||||
# see flaskfix.py
|
||||
proxy_set_header X-Scheme \$scheme;
|
||||
proxy_set_header X-Script-Name ${SEARXNG_URL_PATH};
|
||||
|
||||
# see limiter.py
|
||||
proxy_set_header X-Real-IP \$remote_addr;
|
||||
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
|
||||
|
||||
# proxy_buffering off;
|
||||
# proxy_request_buffering off;
|
||||
# proxy_buffer_size 8k;
|
||||
|
||||
}
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
# location ${SEARXNG_URL_PATH}/static/ {
|
||||
# alias ${SEARXNG_STATIC}/;
|
||||
# }
|
||||
@@ -0,0 +1,26 @@
|
||||
location ${SEARXNG_URL_PATH} {
|
||||
|
||||
uwsgi_pass unix://${SEARXNG_UWSGI_SOCKET};
|
||||
|
||||
include uwsgi_params;
|
||||
|
||||
uwsgi_param HTTP_HOST \$host;
|
||||
uwsgi_param HTTP_CONNECTION \$http_connection;
|
||||
|
||||
# see flaskfix.py
|
||||
uwsgi_param HTTP_X_SCHEME \$scheme;
|
||||
uwsgi_param HTTP_X_SCRIPT_NAME ${SEARXNG_URL_PATH};
|
||||
|
||||
# see limiter.py
|
||||
uwsgi_param HTTP_X_REAL_IP \$remote_addr;
|
||||
uwsgi_param HTTP_X_FORWARDED_FOR \$proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
# location ${SEARXNG_URL_PATH}/static/ {
|
||||
# alias ${SEARXNG_STATIC}/;
|
||||
# }
|
||||
46
utils/templates/etc/searxng/settings.yml
Normal file
46
utils/templates/etc/searxng/settings.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
# SearXNG settings
|
||||
|
||||
use_default_settings: true
|
||||
|
||||
general:
|
||||
debug: false
|
||||
instance_name: "SearXNG"
|
||||
|
||||
search:
|
||||
safe_search: 2
|
||||
autocomplete: 'duckduckgo'
|
||||
formats:
|
||||
- html
|
||||
|
||||
server:
|
||||
# Is overwritten by ${SEARXNG_SECRET}
|
||||
secret_key: "ultrasecretkey"
|
||||
limiter: true
|
||||
image_proxy: true
|
||||
# public URL of the instance, to ensure correct inbound links. Is overwritten
|
||||
# by ${SEARXNG_BASE_URL}.
|
||||
# base_url: http://example.com/location
|
||||
|
||||
redis:
|
||||
# URL to connect redis database. Is overwritten by ${SEARXNG_REDIS_URL}.
|
||||
url: unix:///usr/local/searxng-redis/run/redis.sock?db=0
|
||||
|
||||
ui:
|
||||
static_use_hash: true
|
||||
|
||||
# preferences:
|
||||
# lock:
|
||||
# - autocomplete
|
||||
# - method
|
||||
|
||||
# engines:
|
||||
#
|
||||
# - name: fdroid
|
||||
# disabled: false
|
||||
#
|
||||
# - name: apk mirror
|
||||
# disabled: false
|
||||
#
|
||||
# - name: mediathekviewweb
|
||||
# categories: TV
|
||||
# disabled: false
|
||||
85
utils/templates/etc/uwsgi/apps-archlinux/searxng.ini
Normal file
85
utils/templates/etc/uwsgi/apps-archlinux/searxng.ini
Normal file
@@ -0,0 +1,85 @@
|
||||
# -*- mode: conf; coding: utf-8 -*-
|
||||
[uwsgi]
|
||||
|
||||
# uWSGI core
|
||||
# ----------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#uwsgi-core
|
||||
|
||||
# Who will run the code
|
||||
uid = ${SERVICE_USER}
|
||||
gid = ${SERVICE_GROUP}
|
||||
|
||||
# set (python) default encoding UTF-8
|
||||
env = LANG=C.UTF-8
|
||||
env = LANGUAGE=C.UTF-8
|
||||
env = LC_ALL=C.UTF-8
|
||||
|
||||
# chdir to specified directory before apps loading
|
||||
chdir = ${SEARXNG_SRC}/searx
|
||||
|
||||
# SearXNG configuration (settings.yml)
|
||||
env = SEARXNG_SETTINGS_PATH=${SEARXNG_SETTINGS_PATH}
|
||||
|
||||
# disable logging for privacy
|
||||
logger = systemd
|
||||
disable-logging = true
|
||||
|
||||
# The right granted on the created socket
|
||||
chmod-socket = 666
|
||||
|
||||
# Plugin to use and interpreter config
|
||||
single-interpreter = true
|
||||
|
||||
# enable master process
|
||||
master = true
|
||||
|
||||
# load apps in each worker instead of the master
|
||||
lazy-apps = true
|
||||
|
||||
# load uWSGI plugins
|
||||
plugin = python
|
||||
|
||||
# By default the Python plugin does not initialize the GIL. This means your
|
||||
# app-generated threads will not run. If you need threads, remember to enable
|
||||
# them with enable-threads. Running uWSGI in multithreading mode (with the
|
||||
# threads options) will automatically enable threading support. This *strange*
|
||||
# default behaviour is for performance reasons.
|
||||
enable-threads = true
|
||||
|
||||
# Number of workers (usually CPU count)
|
||||
workers = ${UWSGI_WORKERS:-%k}
|
||||
threads = ${UWSGI_THREADS:-4}
|
||||
|
||||
# plugin: python
|
||||
# --------------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#plugin-python
|
||||
|
||||
# load a WSGI module
|
||||
module = searx.webapp
|
||||
|
||||
# set PYTHONHOME/virtualenv
|
||||
virtualenv = ${SEARXNG_PYENV}
|
||||
|
||||
# add directory (or glob) to pythonpath
|
||||
pythonpath = ${SEARXNG_SRC}
|
||||
|
||||
|
||||
# speak to upstream
|
||||
# -----------------
|
||||
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#plugin-http
|
||||
# Native HTTP support: https://uwsgi-docs.readthedocs.io/en/latest/HTTP.html
|
||||
|
||||
http = ${SEARXNG_INTERNAL_HTTP}
|
||||
buffer-size = 8192
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
static-map = /static=${SEARXNG_STATIC}
|
||||
static-gzip-all = True
|
||||
offload-threads = %k
|
||||
82
utils/templates/etc/uwsgi/apps-archlinux/searxng.ini:socket
Normal file
82
utils/templates/etc/uwsgi/apps-archlinux/searxng.ini:socket
Normal file
@@ -0,0 +1,82 @@
|
||||
# -*- mode: conf; coding: utf-8 -*-
|
||||
[uwsgi]
|
||||
|
||||
# uWSGI core
|
||||
# ----------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#uwsgi-core
|
||||
|
||||
# Who will run the code
|
||||
uid = ${SERVICE_USER}
|
||||
gid = ${SERVICE_GROUP}
|
||||
|
||||
# set (python) default encoding UTF-8
|
||||
env = LANG=C.UTF-8
|
||||
env = LANGUAGE=C.UTF-8
|
||||
env = LC_ALL=C.UTF-8
|
||||
|
||||
# chdir to specified directory before apps loading
|
||||
chdir = ${SEARXNG_SRC}/searx
|
||||
|
||||
# SearXNG configuration (settings.yml)
|
||||
env = SEARXNG_SETTINGS_PATH=${SEARXNG_SETTINGS_PATH}
|
||||
|
||||
# disable logging for privacy
|
||||
logger = systemd
|
||||
disable-logging = true
|
||||
|
||||
# The right granted on the created socket
|
||||
chmod-socket = 666
|
||||
|
||||
# Plugin to use and interpreter config
|
||||
single-interpreter = true
|
||||
|
||||
# enable master process
|
||||
master = true
|
||||
|
||||
# load apps in each worker instead of the master
|
||||
lazy-apps = true
|
||||
|
||||
# load uWSGI plugins
|
||||
plugin = python
|
||||
|
||||
# By default the Python plugin does not initialize the GIL. This means your
|
||||
# app-generated threads will not run. If you need threads, remember to enable
|
||||
# them with enable-threads. Running uWSGI in multithreading mode (with the
|
||||
# threads options) will automatically enable threading support. This *strange*
|
||||
# default behaviour is for performance reasons.
|
||||
enable-threads = true
|
||||
|
||||
# Number of workers (usually CPU count)
|
||||
workers = ${UWSGI_WORKERS:-%k}
|
||||
threads = ${UWSGI_THREADS:-4}
|
||||
|
||||
# plugin: python
|
||||
# --------------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#plugin-python
|
||||
|
||||
# load a WSGI module
|
||||
module = searx.webapp
|
||||
|
||||
# set PYTHONHOME/virtualenv
|
||||
virtualenv = ${SEARXNG_PYENV}
|
||||
|
||||
# add directory (or glob) to pythonpath
|
||||
pythonpath = ${SEARXNG_SRC}
|
||||
|
||||
|
||||
# speak to upstream
|
||||
# -----------------
|
||||
|
||||
socket = ${SEARXNG_UWSGI_SOCKET}
|
||||
buffer-size = 8192
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
static-map = /static=${SEARXNG_STATIC}
|
||||
static-gzip-all = True
|
||||
offload-threads = %k
|
||||
88
utils/templates/etc/uwsgi/apps-available/searxng.ini
Normal file
88
utils/templates/etc/uwsgi/apps-available/searxng.ini
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- mode: conf; coding: utf-8 -*-
|
||||
[uwsgi]
|
||||
|
||||
# uWSGI core
|
||||
# ----------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#uwsgi-core
|
||||
|
||||
# Who will run the code / Hint: in emperor-tyrant mode uid & gid setting will be
|
||||
# ignored [1]. Mode emperor-tyrant is the default on fedora (/etc/uwsgi.ini).
|
||||
#
|
||||
# [1] https://uwsgi-docs.readthedocs.io/en/latest/Emperor.html#tyrant-mode-secure-multi-user-hosting
|
||||
#
|
||||
uid = ${SERVICE_USER}
|
||||
gid = ${SERVICE_GROUP}
|
||||
|
||||
# set (python) default encoding UTF-8
|
||||
env = LANG=C.UTF-8
|
||||
env = LANGUAGE=C.UTF-8
|
||||
env = LC_ALL=C.UTF-8
|
||||
|
||||
# chdir to specified directory before apps loading
|
||||
chdir = ${SEARXNG_SRC}/searx
|
||||
|
||||
# SearXNG configuration (settings.yml)
|
||||
env = SEARXNG_SETTINGS_PATH=${SEARXNG_SETTINGS_PATH}
|
||||
|
||||
# disable logging for privacy
|
||||
disable-logging = true
|
||||
|
||||
# The right granted on the created socket
|
||||
chmod-socket = 666
|
||||
|
||||
# Plugin to use and interpreter config
|
||||
single-interpreter = true
|
||||
|
||||
# enable master process
|
||||
master = true
|
||||
|
||||
# load apps in each worker instead of the master
|
||||
lazy-apps = true
|
||||
|
||||
# load uWSGI plugins
|
||||
plugin = python3,http
|
||||
|
||||
# By default the Python plugin does not initialize the GIL. This means your
|
||||
# app-generated threads will not run. If you need threads, remember to enable
|
||||
# them with enable-threads. Running uWSGI in multithreading mode (with the
|
||||
# threads options) will automatically enable threading support. This *strange*
|
||||
# default behaviour is for performance reasons.
|
||||
enable-threads = true
|
||||
|
||||
# Number of workers (usually CPU count)
|
||||
workers = ${UWSGI_WORKERS:-%k}
|
||||
threads = ${UWSGI_THREADS:-4}
|
||||
|
||||
# plugin: python
|
||||
# --------------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#plugin-python
|
||||
|
||||
# load a WSGI module
|
||||
module = searx.webapp
|
||||
|
||||
# set PYTHONHOME/virtualenv
|
||||
virtualenv = ${SEARXNG_PYENV}
|
||||
|
||||
# add directory (or glob) to pythonpath
|
||||
pythonpath = ${SEARXNG_SRC}
|
||||
|
||||
|
||||
# speak to upstream
|
||||
# -----------------
|
||||
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#plugin-http
|
||||
# Native HTTP support: https://uwsgi-docs.readthedocs.io/en/latest/HTTP.html
|
||||
|
||||
http = ${SEARXNG_INTERNAL_HTTP}
|
||||
buffer-size = 8192
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
static-map = /static=${SEARXNG_STATIC}
|
||||
static-gzip-all = True
|
||||
offload-threads = %k
|
||||
85
utils/templates/etc/uwsgi/apps-available/searxng.ini:socket
Normal file
85
utils/templates/etc/uwsgi/apps-available/searxng.ini:socket
Normal file
@@ -0,0 +1,85 @@
|
||||
# -*- mode: conf; coding: utf-8 -*-
|
||||
[uwsgi]
|
||||
|
||||
# uWSGI core
|
||||
# ----------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#uwsgi-core
|
||||
|
||||
# Who will run the code / Hint: in emperor-tyrant mode uid & gid setting will be
|
||||
# ignored [1]. Mode emperor-tyrant is the default on fedora (/etc/uwsgi.ini).
|
||||
#
|
||||
# [1] https://uwsgi-docs.readthedocs.io/en/latest/Emperor.html#tyrant-mode-secure-multi-user-hosting
|
||||
#
|
||||
uid = ${SERVICE_USER}
|
||||
gid = ${SERVICE_GROUP}
|
||||
|
||||
# set (python) default encoding UTF-8
|
||||
env = LANG=C.UTF-8
|
||||
env = LANGUAGE=C.UTF-8
|
||||
env = LC_ALL=C.UTF-8
|
||||
|
||||
# chdir to specified directory before apps loading
|
||||
chdir = ${SEARXNG_SRC}/searx
|
||||
|
||||
# SearXNG configuration (settings.yml)
|
||||
env = SEARXNG_SETTINGS_PATH=${SEARXNG_SETTINGS_PATH}
|
||||
|
||||
# disable logging for privacy
|
||||
disable-logging = true
|
||||
|
||||
# The right granted on the created socket
|
||||
chmod-socket = 666
|
||||
|
||||
# Plugin to use and interpreter config
|
||||
single-interpreter = true
|
||||
|
||||
# enable master process
|
||||
master = true
|
||||
|
||||
# load apps in each worker instead of the master
|
||||
lazy-apps = true
|
||||
|
||||
# load uWSGI plugins
|
||||
plugin = python3,http
|
||||
|
||||
# By default the Python plugin does not initialize the GIL. This means your
|
||||
# app-generated threads will not run. If you need threads, remember to enable
|
||||
# them with enable-threads. Running uWSGI in multithreading mode (with the
|
||||
# threads options) will automatically enable threading support. This *strange*
|
||||
# default behaviour is for performance reasons.
|
||||
enable-threads = true
|
||||
|
||||
# Number of workers (usually CPU count)
|
||||
workers = ${UWSGI_WORKERS:-%k}
|
||||
threads = ${UWSGI_THREADS:-4}
|
||||
|
||||
# plugin: python
|
||||
# --------------
|
||||
#
|
||||
# https://uwsgi-docs.readthedocs.io/en/latest/Options.html#plugin-python
|
||||
|
||||
# load a WSGI module
|
||||
module = searx.webapp
|
||||
|
||||
# set PYTHONHOME/virtualenv
|
||||
virtualenv = ${SEARXNG_PYENV}
|
||||
|
||||
# add directory (or glob) to pythonpath
|
||||
pythonpath = ${SEARXNG_SRC}
|
||||
|
||||
|
||||
# speak to upstream
|
||||
# -----------------
|
||||
|
||||
socket = ${SEARXNG_UWSGI_SOCKET}
|
||||
buffer-size = 8192
|
||||
|
||||
# uWSGI serves the static files and in settings.yml we use::
|
||||
#
|
||||
# ui:
|
||||
# static_use_hash: true
|
||||
#
|
||||
static-map = /static=${SEARXNG_STATIC}
|
||||
static-gzip-all = True
|
||||
offload-threads = %k
|
||||
42
utils/templates/lib/systemd/system/searxng-redis.service
Normal file
42
utils/templates/lib/systemd/system/searxng-redis.service
Normal file
@@ -0,0 +1,42 @@
|
||||
[Unit]
|
||||
|
||||
Description=SearXNG redis service
|
||||
After=syslog.target
|
||||
After=network.target
|
||||
Documentation=https://redis.io/documentation
|
||||
|
||||
[Service]
|
||||
|
||||
Type=simple
|
||||
User=${REDIS_USER}
|
||||
Group=${REDIS_USER}
|
||||
WorkingDirectory=${REDIS_HOME}
|
||||
Restart=always
|
||||
TimeoutStopSec=0
|
||||
|
||||
Environment=USER=${REDIS_USER} HOME=${REDIS_HOME}
|
||||
ExecStart=${REDIS_HOME_BIN}/redis-server ${REDIS_CONF}
|
||||
ExecPaths=${REDIS_HOME_BIN}
|
||||
|
||||
LimitNOFILE=65535
|
||||
NoNewPrivileges=true
|
||||
PrivateDevices=yes
|
||||
|
||||
# ProtectSystem=full
|
||||
ProtectHome=yes
|
||||
ReadOnlyDirectories=/
|
||||
ReadWritePaths=-${REDIS_HOME}/run
|
||||
|
||||
UMask=007
|
||||
PrivateTmp=yes
|
||||
|
||||
MemoryDenyWriteExecute=true
|
||||
ProtectKernelModules=true
|
||||
ProtectKernelTunables=true
|
||||
ProtectControlGroups=true
|
||||
RestrictRealtime=true
|
||||
RestrictNamespaces=true
|
||||
|
||||
[Install]
|
||||
|
||||
WantedBy=multi-user.target
|
||||
Reference in New Issue
Block a user