fix(sandbox): consolidate dev and prod sandbox (#273)
This commit is contained in:
parent
5344853344
commit
739654bb25
|
@ -1,32 +0,0 @@
|
||||||
FROM docker.io/library/node:20-slim
|
|
||||||
|
|
||||||
# install minimal set of packages, then clean up
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
||||||
man-db \
|
|
||||||
curl \
|
|
||||||
dnsutils \
|
|
||||||
less \
|
|
||||||
jq \
|
|
||||||
bc \
|
|
||||||
gh \
|
|
||||||
git \
|
|
||||||
unzip \
|
|
||||||
rsync \
|
|
||||||
ripgrep \
|
|
||||||
procps \
|
|
||||||
psmisc \
|
|
||||||
lsof \
|
|
||||||
socat \
|
|
||||||
&& apt-get clean \
|
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
|
||||||
|
|
||||||
# set up npm global package folder under /usr/local/share
|
|
||||||
# give it to non-root user node, already set up in base image
|
|
||||||
RUN mkdir -p /usr/local/share/npm-global \
|
|
||||||
&& chown -R node:node /usr/local/share/npm-global
|
|
||||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
|
||||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
|
||||||
|
|
||||||
# switch to non-root user node
|
|
||||||
USER node
|
|
||||||
|
|
|
@ -7,11 +7,11 @@
|
||||||
"packages/*"
|
"packages/*"
|
||||||
],
|
],
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"build": "scripts/build.sh",
|
"build": "BUILD_SANDBOX=1 scripts/build.sh",
|
||||||
"clean": "scripts/clean.sh",
|
"clean": "scripts/clean.sh",
|
||||||
"test": "npm run test --workspaces",
|
"test": "npm run test --workspaces",
|
||||||
"start": "scripts/start.sh",
|
"start": "NODE_ENV=development scripts/start.sh",
|
||||||
"debug": "DEBUG=1 scripts/start.sh",
|
"debug": "NODE_ENV=development DEBUG=1 scripts/start.sh",
|
||||||
"lint:fix": "eslint . --fix",
|
"lint:fix": "eslint . --fix",
|
||||||
"lint": "eslint . --ext .ts,.tsx",
|
"lint": "eslint . --ext .ts,.tsx",
|
||||||
"typecheck": "tsc --noEmit --jsx react",
|
"typecheck": "tsc --noEmit --jsx react",
|
||||||
|
|
|
@ -57,19 +57,87 @@ function parseImageName(image: string): string {
|
||||||
return tag ? `${name}-${tag}` : name;
|
return tag ? `${name}-${tag}` : name;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function ports(): string[] {
|
||||||
|
return (process.env.SANDBOX_PORTS ?? '')
|
||||||
|
.split(',')
|
||||||
|
.filter((p) => p.trim())
|
||||||
|
.map((p) => p.trim());
|
||||||
|
}
|
||||||
|
|
||||||
|
function entrypoint(workdir: string): string[] {
|
||||||
|
// set up bash command to be run inside container
|
||||||
|
// start with setting up PATH and PYTHONPATH with optional suffixes from host
|
||||||
|
const bashCmds = [];
|
||||||
|
|
||||||
|
// copy any paths in PATH that are under working directory in sandbox
|
||||||
|
// note we can't just pass these as --env since that would override base PATH
|
||||||
|
// instead we construct a suffix and append as part of bashCmd below
|
||||||
|
let pathSuffix = '';
|
||||||
|
if (process.env.PATH) {
|
||||||
|
const paths = process.env.PATH.split(':');
|
||||||
|
for (const path of paths) {
|
||||||
|
if (path.startsWith(workdir)) {
|
||||||
|
pathSuffix += `:${path}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (pathSuffix) {
|
||||||
|
bashCmds.push(`export PATH="$PATH${pathSuffix}";`); // suffix includes leading ':'
|
||||||
|
}
|
||||||
|
|
||||||
|
// copy any paths in PYTHONPATH that are under working directory in sandbox
|
||||||
|
// note we can't just pass these as --env since that would override base PYTHONPATH
|
||||||
|
// instead we construct a suffix and append as part of bashCmd below
|
||||||
|
let pythonPathSuffix = '';
|
||||||
|
if (process.env.PYTHONPATH) {
|
||||||
|
const paths = process.env.PYTHONPATH.split(':');
|
||||||
|
for (const path of paths) {
|
||||||
|
if (path.startsWith(workdir)) {
|
||||||
|
pythonPathSuffix += `:${path}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (pythonPathSuffix) {
|
||||||
|
bashCmds.push(`export PYTHONPATH="$PYTHONPATH${pythonPathSuffix}";`); // suffix includes leading ':'
|
||||||
|
}
|
||||||
|
|
||||||
|
// source sandbox.bashrc if exists under project settings directory
|
||||||
|
const projectSandboxBashrc = path.join(
|
||||||
|
SETTINGS_DIRECTORY_NAME,
|
||||||
|
'sandbox.bashrc',
|
||||||
|
);
|
||||||
|
if (fs.existsSync(projectSandboxBashrc)) {
|
||||||
|
bashCmds.push(`source ${projectSandboxBashrc};`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// also set up redirects (via socat) so servers can listen on localhost instead of 0.0.0.0
|
||||||
|
ports().forEach((p) =>
|
||||||
|
bashCmds.push(
|
||||||
|
`socat TCP4-LISTEN:${p},bind=$(hostname -i),fork,reuseaddr TCP4:127.0.0.1:${p} 2> /dev/null &`,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
// append remaining args (bash -c "gemini-code cli_args...")
|
||||||
|
// cli_args need to be quoted before being inserted into bash_cmd
|
||||||
|
const cliArgs = process.argv.slice(2).map((arg) => quote([arg]));
|
||||||
|
const cliCmd =
|
||||||
|
process.env.NODE_ENV === 'development'
|
||||||
|
? process.env.DEBUG
|
||||||
|
? 'npm run debug --'
|
||||||
|
: 'npm run start --'
|
||||||
|
: 'gemini-code';
|
||||||
|
|
||||||
|
const args = [...bashCmds, cliCmd, ...cliArgs];
|
||||||
|
|
||||||
|
return ['bash', '-c', args.join(' ')];
|
||||||
|
}
|
||||||
|
|
||||||
export async function start_sandbox(sandbox: string) {
|
export async function start_sandbox(sandbox: string) {
|
||||||
// determine full path for gemini-code to distinguish linked vs installed setting
|
// determine full path for gemini-code to distinguish linked vs installed setting
|
||||||
const gcPath = execSync(`realpath $(which gemini-code)`).toString().trim();
|
const gcPath = execSync(`realpath $(which gemini-code)`).toString().trim();
|
||||||
|
|
||||||
// if project is gemini-code, then switch to -dev image & run CLI from ${workdir}/packages/cli
|
const image = process.env.GEMINI_CODE_SANDBOX_IMAGE ?? 'gemini-code-sandbox';
|
||||||
let image = process.env.GEMINI_CODE_SANDBOX_IMAGE ?? 'gemini-code-sandbox';
|
|
||||||
const project = path.basename(process.cwd());
|
|
||||||
const workdir = process.cwd();
|
const workdir = process.cwd();
|
||||||
let cliPath = '$(which gemini-code)';
|
|
||||||
if (project === 'gemini-code') {
|
|
||||||
image = 'gemini-code-sandbox-dev';
|
|
||||||
cliPath = quote([`${workdir}/packages/cli`]);
|
|
||||||
}
|
|
||||||
|
|
||||||
// if BUILD_SANDBOX is set, then call scripts/build_sandbox.sh under gemini-code repo
|
// if BUILD_SANDBOX is set, then call scripts/build_sandbox.sh under gemini-code repo
|
||||||
// note this can only be done with binary linked from gemini-code repo
|
// note this can only be done with binary linked from gemini-code repo
|
||||||
|
@ -163,6 +231,14 @@ export async function start_sandbox(sandbox: string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// expose env-specified ports on the sandbox
|
||||||
|
ports().forEach((p) => args.push('--publish', `${p}:${p}`));
|
||||||
|
|
||||||
|
if (process.env.DEBUG) {
|
||||||
|
const debugPort = process.env.DEBUG_PORT || '9229';
|
||||||
|
args.push(`--publish`, `${debugPort}:${debugPort}`);
|
||||||
|
}
|
||||||
|
|
||||||
// name container after image, plus numeric suffix to avoid conflicts
|
// name container after image, plus numeric suffix to avoid conflicts
|
||||||
const containerName = parseImageName(image);
|
const containerName = parseImageName(image);
|
||||||
let index = 0;
|
let index = 0;
|
||||||
|
@ -205,32 +281,6 @@ export async function start_sandbox(sandbox: string) {
|
||||||
args.push('--env', `COLORTERM=${process.env.COLORTERM}`);
|
args.push('--env', `COLORTERM=${process.env.COLORTERM}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy any paths in PATH that are under working directory in sandbox
|
|
||||||
// note we can't just pass these as --env since that would override base PATH
|
|
||||||
// instead we construct a suffix and append as part of bashCmd below
|
|
||||||
let pathSuffix = '';
|
|
||||||
if (process.env.PATH) {
|
|
||||||
const paths = process.env.PATH.split(':');
|
|
||||||
for (const path of paths) {
|
|
||||||
if (path.startsWith(workdir)) {
|
|
||||||
pathSuffix += `:${path}`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy any paths in PYTHONPATH that are under working directory in sandbox
|
|
||||||
// note we can't just pass these as --env since that would override base PYTHONPATH
|
|
||||||
// instead we construct a suffix and append as part of bashCmd below
|
|
||||||
let pythonPathSuffix = '';
|
|
||||||
if (process.env.PYTHONPATH) {
|
|
||||||
const paths = process.env.PYTHONPATH.split(':');
|
|
||||||
for (const path of paths) {
|
|
||||||
if (path.startsWith(workdir)) {
|
|
||||||
pythonPathSuffix += `:${path}`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy VIRTUAL_ENV if under working directory
|
// copy VIRTUAL_ENV if under working directory
|
||||||
// also mount-replace VIRTUAL_ENV directory with <project_settings>/sandbox.venv
|
// also mount-replace VIRTUAL_ENV directory with <project_settings>/sandbox.venv
|
||||||
// sandbox can then set up this new VIRTUAL_ENV directory using sandbox.bashrc (see below)
|
// sandbox can then set up this new VIRTUAL_ENV directory using sandbox.bashrc (see below)
|
||||||
|
@ -274,45 +324,6 @@ export async function start_sandbox(sandbox: string) {
|
||||||
args.push('--authfile', emptyAuthFilePath);
|
args.push('--authfile', emptyAuthFilePath);
|
||||||
}
|
}
|
||||||
|
|
||||||
// enable debugging via node --inspect-brk if DEBUG is set
|
|
||||||
const nodeArgs = [];
|
|
||||||
const debugPort = process.env.DEBUG_PORT || '9229';
|
|
||||||
if (process.env.DEBUG) {
|
|
||||||
args.push('--publish', `${debugPort}:${debugPort}`);
|
|
||||||
nodeArgs.push(`--inspect-brk=0.0.0.0:${debugPort}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// set up bash command to be run inside container
|
|
||||||
// start with setting up PATH and PYTHONPATH with optional suffixes from host
|
|
||||||
let bashCmd = '';
|
|
||||||
if (pathSuffix) {
|
|
||||||
bashCmd += `export PATH="$PATH${pathSuffix}"; `; // suffix includes leading ':'
|
|
||||||
}
|
|
||||||
if (pythonPathSuffix) {
|
|
||||||
bashCmd += `export PYTHONPATH="$PYTHONPATH${pythonPathSuffix}"; `; // suffix includes leading ':'
|
|
||||||
}
|
|
||||||
|
|
||||||
// source sandbox.bashrc if exists under project settings directory
|
|
||||||
const projectSandboxBashrc = path.join(
|
|
||||||
SETTINGS_DIRECTORY_NAME,
|
|
||||||
'sandbox.bashrc',
|
|
||||||
);
|
|
||||||
if (fs.existsSync(projectSandboxBashrc)) {
|
|
||||||
bashCmd += `source ${projectSandboxBashrc}; `;
|
|
||||||
}
|
|
||||||
|
|
||||||
// open additional ports if SANDBOX_PORTS is set
|
|
||||||
// also set up redirects (via socat) so servers can listen on localhost instead of 0.0.0.0
|
|
||||||
if (process.env.SANDBOX_PORTS) {
|
|
||||||
for (let port of process.env.SANDBOX_PORTS.split(',')) {
|
|
||||||
if ((port = port.trim())) {
|
|
||||||
console.log(`SANDBOX_PORTS: ${port}`);
|
|
||||||
args.push('--publish', `${port}:${port}`);
|
|
||||||
bashCmd += `socat TCP4-LISTEN:${port},bind=$(hostname -i),fork,reuseaddr TCP4:127.0.0.1:${port} 2> /dev/null & `;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// specify --user as "$(id -u):$(id -g)" if SANDBOX_SET_UID_GID is 1|true
|
// specify --user as "$(id -u):$(id -g)" if SANDBOX_SET_UID_GID is 1|true
|
||||||
// only necessary if user mapping is not handled by sandboxing setup on host
|
// only necessary if user mapping is not handled by sandboxing setup on host
|
||||||
// (e.g. rootful docker on linux w/o userns-remap configured)
|
// (e.g. rootful docker on linux w/o userns-remap configured)
|
||||||
|
@ -322,12 +333,11 @@ export async function start_sandbox(sandbox: string) {
|
||||||
args.push('--user', `${uid}:${gid}`);
|
args.push('--user', `${uid}:${gid}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
// append remaining args (image, bash -c "node node_args... cli path cli_args...")
|
// push container image name
|
||||||
// node_args and cli_args need to be quoted before being inserted into bash_cmd
|
args.push(image);
|
||||||
const quotedNodeArgs = nodeArgs.map((arg) => quote([arg]));
|
|
||||||
const quotedCliArgs = process.argv.slice(2).map((arg) => quote([arg]));
|
// push container entrypoint (including args)
|
||||||
bashCmd += `node ${quotedNodeArgs.join(' ')} ${cliPath} ${quotedCliArgs.join(' ')}`;
|
args.push(...entrypoint(workdir));
|
||||||
args.push(image, 'bash', '-c', bashCmd);
|
|
||||||
|
|
||||||
// spawn child and let it inherit stdio
|
// spawn child and let it inherit stdio
|
||||||
const child = spawn(sandbox, args, {
|
const child = spawn(sandbox, args, {
|
||||||
|
|
|
@ -25,7 +25,6 @@ npm run build --workspaces
|
||||||
|
|
||||||
# also build container image if sandboxing is enabled
|
# also build container image if sandboxing is enabled
|
||||||
# skip (-s) npm install + build since we did that above
|
# skip (-s) npm install + build since we did that above
|
||||||
# use (-d) for dev build that can reuse existing image
|
if scripts/sandbox_command.sh -q && [[ "${BUILD_SANDBOX:-}" =~ ^(1|true)$ ]]; then
|
||||||
if scripts/sandbox_command.sh -q; then
|
scripts/build_sandbox.sh -s
|
||||||
scripts/build_sandbox.sh -sd
|
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -30,10 +30,6 @@ SKIP_NPM_INSTALL_BUILD=false
|
||||||
while getopts "sdf:" opt; do
|
while getopts "sdf:" opt; do
|
||||||
case ${opt} in
|
case ${opt} in
|
||||||
s) SKIP_NPM_INSTALL_BUILD=true ;;
|
s) SKIP_NPM_INSTALL_BUILD=true ;;
|
||||||
d)
|
|
||||||
DOCKERFILE=Dockerfile-dev
|
|
||||||
IMAGE+="-dev"
|
|
||||||
;;
|
|
||||||
f)
|
f)
|
||||||
DOCKERFILE=$OPTARG
|
DOCKERFILE=$OPTARG
|
||||||
;;
|
;;
|
||||||
|
@ -54,38 +50,27 @@ if [ "$SKIP_NPM_INSTALL_BUILD" = false ]; then
|
||||||
npm run build --workspaces
|
npm run build --workspaces
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# if using Dockerfile-dev, then skip rebuild unless BUILD_SANDBOX is set
|
# prepare global installation files for prod builds
|
||||||
# rebuild should not be necessary unless Dockerfile-dev is modified
|
# pack cli
|
||||||
if [ "$DOCKERFILE" = "Dockerfile-dev" ]; then
|
echo "packing @gemini-code/cli ..."
|
||||||
if $CMD images -q "$IMAGE" | grep -q . && [ -z "${BUILD_SANDBOX:-}" ]; then
|
rm -f packages/cli/dist/gemini-code-cli-*.tgz
|
||||||
echo "using existing $IMAGE (set BUILD_SANDBOX=true to force rebuild)"
|
npm pack -w @gemini-code/cli --pack-destination ./packages/cli/dist &>/dev/null
|
||||||
exit 0
|
# pack server
|
||||||
fi
|
echo "packing @gemini-code/server ..."
|
||||||
fi
|
rm -f packages/server/dist/gemini-code-server-*.tgz
|
||||||
|
npm pack -w @gemini-code/server --pack-destination ./packages/server/dist &>/dev/null
|
||||||
# prepare global installation files for prod builds (anything but Dockerfile-dev)
|
# give node user (used during installation, see Dockerfile) access to these files
|
||||||
if [ "$DOCKERFILE" != "Dockerfile-dev" ]; then
|
chmod 755 packages/*/dist/gemini-code-*.tgz
|
||||||
# pack cli
|
|
||||||
echo "packing @gemini-code/cli ..."
|
|
||||||
rm -f packages/cli/dist/gemini-code-cli-*.tgz
|
|
||||||
npm pack -w @gemini-code/cli --pack-destination ./packages/cli/dist &>/dev/null
|
|
||||||
# pack server
|
|
||||||
echo "packing @gemini-code/server ..."
|
|
||||||
rm -f packages/server/dist/gemini-code-server-*.tgz
|
|
||||||
npm pack -w @gemini-code/server --pack-destination ./packages/server/dist &>/dev/null
|
|
||||||
# give node user (used during installation, see Dockerfile) access to these files
|
|
||||||
chmod 755 packages/*/dist/gemini-code-*.tgz
|
|
||||||
fi
|
|
||||||
|
|
||||||
# build container image & prune older unused images
|
# build container image & prune older unused images
|
||||||
echo "building $IMAGE ... (can be slow first time)"
|
echo "building $IMAGE ... (can be slow first time)"
|
||||||
|
|
||||||
if [[ "$CMD" == "podman" ]]; then
|
if [[ "$CMD" == "podman" ]]; then
|
||||||
# use empty --authfile to skip unnecessary auth refresh overhead
|
# use empty --authfile to skip unnecessary auth refresh overhead
|
||||||
$CMD build --authfile=<(echo '{}') -f "$DOCKERFILE" -t "$IMAGE" . >/dev/null
|
$CMD build --authfile=<(echo '{}') -f "$DOCKERFILE" -t "$IMAGE" .
|
||||||
elif [[ "$CMD" == "docker" ]]; then
|
elif [[ "$CMD" == "docker" ]]; then
|
||||||
# use an empty config directory to skip unnecessary auth refresh overhead
|
# use an empty config directory to skip unnecessary auth refresh overhead
|
||||||
$CMD --config="empty" build -f "$DOCKERFILE" -t "$IMAGE" . >/dev/null
|
$CMD --config="empty" buildx build -f "$DOCKERFILE" -t "$IMAGE" .
|
||||||
else
|
else
|
||||||
$CMD build -f "$DOCKERFILE" -t "$IMAGE" . >/dev/null
|
$CMD build -f "$DOCKERFILE" -t "$IMAGE" . >/dev/null
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -22,7 +22,12 @@ node ./scripts/check-build-status.js
|
||||||
# note with sandboxing this flag is passed to the binary inside the sandbox
|
# note with sandboxing this flag is passed to the binary inside the sandbox
|
||||||
node_args=()
|
node_args=()
|
||||||
if [ -n "${DEBUG:-}" ] && ! scripts/sandbox_command.sh -q; then
|
if [ -n "${DEBUG:-}" ] && ! scripts/sandbox_command.sh -q; then
|
||||||
node_args=(--inspect-brk)
|
if [ -n "${SANDBOX:-}" ]; then
|
||||||
|
port="${DEBUG_PORT:-9229}"
|
||||||
|
node_args=("--inspect-brk=0.0.0.0:$port")
|
||||||
|
else
|
||||||
|
node_args=(--inspect-brk)
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
node_args+=("./packages/cli" "$@")
|
node_args+=("./packages/cli" "$@")
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue