fix(sandbox): consolidate dev and prod sandbox (#273)
This commit is contained in:
parent
5344853344
commit
739654bb25
|
@ -1,32 +0,0 @@
|
|||
FROM docker.io/library/node:20-slim
|
||||
|
||||
# install minimal set of packages, then clean up
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
man-db \
|
||||
curl \
|
||||
dnsutils \
|
||||
less \
|
||||
jq \
|
||||
bc \
|
||||
gh \
|
||||
git \
|
||||
unzip \
|
||||
rsync \
|
||||
ripgrep \
|
||||
procps \
|
||||
psmisc \
|
||||
lsof \
|
||||
socat \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set up npm global package folder under /usr/local/share
|
||||
# give it to non-root user node, already set up in base image
|
||||
RUN mkdir -p /usr/local/share/npm-global \
|
||||
&& chown -R node:node /usr/local/share/npm-global
|
||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
||||
|
||||
# switch to non-root user node
|
||||
USER node
|
||||
|
|
@ -7,11 +7,11 @@
|
|||
"packages/*"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "scripts/build.sh",
|
||||
"build": "BUILD_SANDBOX=1 scripts/build.sh",
|
||||
"clean": "scripts/clean.sh",
|
||||
"test": "npm run test --workspaces",
|
||||
"start": "scripts/start.sh",
|
||||
"debug": "DEBUG=1 scripts/start.sh",
|
||||
"start": "NODE_ENV=development scripts/start.sh",
|
||||
"debug": "NODE_ENV=development DEBUG=1 scripts/start.sh",
|
||||
"lint:fix": "eslint . --fix",
|
||||
"lint": "eslint . --ext .ts,.tsx",
|
||||
"typecheck": "tsc --noEmit --jsx react",
|
||||
|
|
|
@ -57,19 +57,87 @@ function parseImageName(image: string): string {
|
|||
return tag ? `${name}-${tag}` : name;
|
||||
}
|
||||
|
||||
function ports(): string[] {
|
||||
return (process.env.SANDBOX_PORTS ?? '')
|
||||
.split(',')
|
||||
.filter((p) => p.trim())
|
||||
.map((p) => p.trim());
|
||||
}
|
||||
|
||||
function entrypoint(workdir: string): string[] {
|
||||
// set up bash command to be run inside container
|
||||
// start with setting up PATH and PYTHONPATH with optional suffixes from host
|
||||
const bashCmds = [];
|
||||
|
||||
// copy any paths in PATH that are under working directory in sandbox
|
||||
// note we can't just pass these as --env since that would override base PATH
|
||||
// instead we construct a suffix and append as part of bashCmd below
|
||||
let pathSuffix = '';
|
||||
if (process.env.PATH) {
|
||||
const paths = process.env.PATH.split(':');
|
||||
for (const path of paths) {
|
||||
if (path.startsWith(workdir)) {
|
||||
pathSuffix += `:${path}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pathSuffix) {
|
||||
bashCmds.push(`export PATH="$PATH${pathSuffix}";`); // suffix includes leading ':'
|
||||
}
|
||||
|
||||
// copy any paths in PYTHONPATH that are under working directory in sandbox
|
||||
// note we can't just pass these as --env since that would override base PYTHONPATH
|
||||
// instead we construct a suffix and append as part of bashCmd below
|
||||
let pythonPathSuffix = '';
|
||||
if (process.env.PYTHONPATH) {
|
||||
const paths = process.env.PYTHONPATH.split(':');
|
||||
for (const path of paths) {
|
||||
if (path.startsWith(workdir)) {
|
||||
pythonPathSuffix += `:${path}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pythonPathSuffix) {
|
||||
bashCmds.push(`export PYTHONPATH="$PYTHONPATH${pythonPathSuffix}";`); // suffix includes leading ':'
|
||||
}
|
||||
|
||||
// source sandbox.bashrc if exists under project settings directory
|
||||
const projectSandboxBashrc = path.join(
|
||||
SETTINGS_DIRECTORY_NAME,
|
||||
'sandbox.bashrc',
|
||||
);
|
||||
if (fs.existsSync(projectSandboxBashrc)) {
|
||||
bashCmds.push(`source ${projectSandboxBashrc};`);
|
||||
}
|
||||
|
||||
// also set up redirects (via socat) so servers can listen on localhost instead of 0.0.0.0
|
||||
ports().forEach((p) =>
|
||||
bashCmds.push(
|
||||
`socat TCP4-LISTEN:${p},bind=$(hostname -i),fork,reuseaddr TCP4:127.0.0.1:${p} 2> /dev/null &`,
|
||||
),
|
||||
);
|
||||
|
||||
// append remaining args (bash -c "gemini-code cli_args...")
|
||||
// cli_args need to be quoted before being inserted into bash_cmd
|
||||
const cliArgs = process.argv.slice(2).map((arg) => quote([arg]));
|
||||
const cliCmd =
|
||||
process.env.NODE_ENV === 'development'
|
||||
? process.env.DEBUG
|
||||
? 'npm run debug --'
|
||||
: 'npm run start --'
|
||||
: 'gemini-code';
|
||||
|
||||
const args = [...bashCmds, cliCmd, ...cliArgs];
|
||||
|
||||
return ['bash', '-c', args.join(' ')];
|
||||
}
|
||||
|
||||
export async function start_sandbox(sandbox: string) {
|
||||
// determine full path for gemini-code to distinguish linked vs installed setting
|
||||
const gcPath = execSync(`realpath $(which gemini-code)`).toString().trim();
|
||||
|
||||
// if project is gemini-code, then switch to -dev image & run CLI from ${workdir}/packages/cli
|
||||
let image = process.env.GEMINI_CODE_SANDBOX_IMAGE ?? 'gemini-code-sandbox';
|
||||
const project = path.basename(process.cwd());
|
||||
const image = process.env.GEMINI_CODE_SANDBOX_IMAGE ?? 'gemini-code-sandbox';
|
||||
const workdir = process.cwd();
|
||||
let cliPath = '$(which gemini-code)';
|
||||
if (project === 'gemini-code') {
|
||||
image = 'gemini-code-sandbox-dev';
|
||||
cliPath = quote([`${workdir}/packages/cli`]);
|
||||
}
|
||||
|
||||
// if BUILD_SANDBOX is set, then call scripts/build_sandbox.sh under gemini-code repo
|
||||
// note this can only be done with binary linked from gemini-code repo
|
||||
|
@ -163,6 +231,14 @@ export async function start_sandbox(sandbox: string) {
|
|||
}
|
||||
}
|
||||
|
||||
// expose env-specified ports on the sandbox
|
||||
ports().forEach((p) => args.push('--publish', `${p}:${p}`));
|
||||
|
||||
if (process.env.DEBUG) {
|
||||
const debugPort = process.env.DEBUG_PORT || '9229';
|
||||
args.push(`--publish`, `${debugPort}:${debugPort}`);
|
||||
}
|
||||
|
||||
// name container after image, plus numeric suffix to avoid conflicts
|
||||
const containerName = parseImageName(image);
|
||||
let index = 0;
|
||||
|
@ -205,32 +281,6 @@ export async function start_sandbox(sandbox: string) {
|
|||
args.push('--env', `COLORTERM=${process.env.COLORTERM}`);
|
||||
}
|
||||
|
||||
// copy any paths in PATH that are under working directory in sandbox
|
||||
// note we can't just pass these as --env since that would override base PATH
|
||||
// instead we construct a suffix and append as part of bashCmd below
|
||||
let pathSuffix = '';
|
||||
if (process.env.PATH) {
|
||||
const paths = process.env.PATH.split(':');
|
||||
for (const path of paths) {
|
||||
if (path.startsWith(workdir)) {
|
||||
pathSuffix += `:${path}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copy any paths in PYTHONPATH that are under working directory in sandbox
|
||||
// note we can't just pass these as --env since that would override base PYTHONPATH
|
||||
// instead we construct a suffix and append as part of bashCmd below
|
||||
let pythonPathSuffix = '';
|
||||
if (process.env.PYTHONPATH) {
|
||||
const paths = process.env.PYTHONPATH.split(':');
|
||||
for (const path of paths) {
|
||||
if (path.startsWith(workdir)) {
|
||||
pythonPathSuffix += `:${path}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// copy VIRTUAL_ENV if under working directory
|
||||
// also mount-replace VIRTUAL_ENV directory with <project_settings>/sandbox.venv
|
||||
// sandbox can then set up this new VIRTUAL_ENV directory using sandbox.bashrc (see below)
|
||||
|
@ -274,45 +324,6 @@ export async function start_sandbox(sandbox: string) {
|
|||
args.push('--authfile', emptyAuthFilePath);
|
||||
}
|
||||
|
||||
// enable debugging via node --inspect-brk if DEBUG is set
|
||||
const nodeArgs = [];
|
||||
const debugPort = process.env.DEBUG_PORT || '9229';
|
||||
if (process.env.DEBUG) {
|
||||
args.push('--publish', `${debugPort}:${debugPort}`);
|
||||
nodeArgs.push(`--inspect-brk=0.0.0.0:${debugPort}`);
|
||||
}
|
||||
|
||||
// set up bash command to be run inside container
|
||||
// start with setting up PATH and PYTHONPATH with optional suffixes from host
|
||||
let bashCmd = '';
|
||||
if (pathSuffix) {
|
||||
bashCmd += `export PATH="$PATH${pathSuffix}"; `; // suffix includes leading ':'
|
||||
}
|
||||
if (pythonPathSuffix) {
|
||||
bashCmd += `export PYTHONPATH="$PYTHONPATH${pythonPathSuffix}"; `; // suffix includes leading ':'
|
||||
}
|
||||
|
||||
// source sandbox.bashrc if exists under project settings directory
|
||||
const projectSandboxBashrc = path.join(
|
||||
SETTINGS_DIRECTORY_NAME,
|
||||
'sandbox.bashrc',
|
||||
);
|
||||
if (fs.existsSync(projectSandboxBashrc)) {
|
||||
bashCmd += `source ${projectSandboxBashrc}; `;
|
||||
}
|
||||
|
||||
// open additional ports if SANDBOX_PORTS is set
|
||||
// also set up redirects (via socat) so servers can listen on localhost instead of 0.0.0.0
|
||||
if (process.env.SANDBOX_PORTS) {
|
||||
for (let port of process.env.SANDBOX_PORTS.split(',')) {
|
||||
if ((port = port.trim())) {
|
||||
console.log(`SANDBOX_PORTS: ${port}`);
|
||||
args.push('--publish', `${port}:${port}`);
|
||||
bashCmd += `socat TCP4-LISTEN:${port},bind=$(hostname -i),fork,reuseaddr TCP4:127.0.0.1:${port} 2> /dev/null & `;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// specify --user as "$(id -u):$(id -g)" if SANDBOX_SET_UID_GID is 1|true
|
||||
// only necessary if user mapping is not handled by sandboxing setup on host
|
||||
// (e.g. rootful docker on linux w/o userns-remap configured)
|
||||
|
@ -322,12 +333,11 @@ export async function start_sandbox(sandbox: string) {
|
|||
args.push('--user', `${uid}:${gid}`);
|
||||
}
|
||||
|
||||
// append remaining args (image, bash -c "node node_args... cli path cli_args...")
|
||||
// node_args and cli_args need to be quoted before being inserted into bash_cmd
|
||||
const quotedNodeArgs = nodeArgs.map((arg) => quote([arg]));
|
||||
const quotedCliArgs = process.argv.slice(2).map((arg) => quote([arg]));
|
||||
bashCmd += `node ${quotedNodeArgs.join(' ')} ${cliPath} ${quotedCliArgs.join(' ')}`;
|
||||
args.push(image, 'bash', '-c', bashCmd);
|
||||
// push container image name
|
||||
args.push(image);
|
||||
|
||||
// push container entrypoint (including args)
|
||||
args.push(...entrypoint(workdir));
|
||||
|
||||
// spawn child and let it inherit stdio
|
||||
const child = spawn(sandbox, args, {
|
||||
|
|
|
@ -25,7 +25,6 @@ npm run build --workspaces
|
|||
|
||||
# also build container image if sandboxing is enabled
|
||||
# skip (-s) npm install + build since we did that above
|
||||
# use (-d) for dev build that can reuse existing image
|
||||
if scripts/sandbox_command.sh -q; then
|
||||
scripts/build_sandbox.sh -sd
|
||||
if scripts/sandbox_command.sh -q && [[ "${BUILD_SANDBOX:-}" =~ ^(1|true)$ ]]; then
|
||||
scripts/build_sandbox.sh -s
|
||||
fi
|
||||
|
|
|
@ -30,10 +30,6 @@ SKIP_NPM_INSTALL_BUILD=false
|
|||
while getopts "sdf:" opt; do
|
||||
case ${opt} in
|
||||
s) SKIP_NPM_INSTALL_BUILD=true ;;
|
||||
d)
|
||||
DOCKERFILE=Dockerfile-dev
|
||||
IMAGE+="-dev"
|
||||
;;
|
||||
f)
|
||||
DOCKERFILE=$OPTARG
|
||||
;;
|
||||
|
@ -54,38 +50,27 @@ if [ "$SKIP_NPM_INSTALL_BUILD" = false ]; then
|
|||
npm run build --workspaces
|
||||
fi
|
||||
|
||||
# if using Dockerfile-dev, then skip rebuild unless BUILD_SANDBOX is set
|
||||
# rebuild should not be necessary unless Dockerfile-dev is modified
|
||||
if [ "$DOCKERFILE" = "Dockerfile-dev" ]; then
|
||||
if $CMD images -q "$IMAGE" | grep -q . && [ -z "${BUILD_SANDBOX:-}" ]; then
|
||||
echo "using existing $IMAGE (set BUILD_SANDBOX=true to force rebuild)"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# prepare global installation files for prod builds (anything but Dockerfile-dev)
|
||||
if [ "$DOCKERFILE" != "Dockerfile-dev" ]; then
|
||||
# pack cli
|
||||
echo "packing @gemini-code/cli ..."
|
||||
rm -f packages/cli/dist/gemini-code-cli-*.tgz
|
||||
npm pack -w @gemini-code/cli --pack-destination ./packages/cli/dist &>/dev/null
|
||||
# pack server
|
||||
echo "packing @gemini-code/server ..."
|
||||
rm -f packages/server/dist/gemini-code-server-*.tgz
|
||||
npm pack -w @gemini-code/server --pack-destination ./packages/server/dist &>/dev/null
|
||||
# give node user (used during installation, see Dockerfile) access to these files
|
||||
chmod 755 packages/*/dist/gemini-code-*.tgz
|
||||
fi
|
||||
# prepare global installation files for prod builds
|
||||
# pack cli
|
||||
echo "packing @gemini-code/cli ..."
|
||||
rm -f packages/cli/dist/gemini-code-cli-*.tgz
|
||||
npm pack -w @gemini-code/cli --pack-destination ./packages/cli/dist &>/dev/null
|
||||
# pack server
|
||||
echo "packing @gemini-code/server ..."
|
||||
rm -f packages/server/dist/gemini-code-server-*.tgz
|
||||
npm pack -w @gemini-code/server --pack-destination ./packages/server/dist &>/dev/null
|
||||
# give node user (used during installation, see Dockerfile) access to these files
|
||||
chmod 755 packages/*/dist/gemini-code-*.tgz
|
||||
|
||||
# build container image & prune older unused images
|
||||
echo "building $IMAGE ... (can be slow first time)"
|
||||
|
||||
if [[ "$CMD" == "podman" ]]; then
|
||||
# use empty --authfile to skip unnecessary auth refresh overhead
|
||||
$CMD build --authfile=<(echo '{}') -f "$DOCKERFILE" -t "$IMAGE" . >/dev/null
|
||||
$CMD build --authfile=<(echo '{}') -f "$DOCKERFILE" -t "$IMAGE" .
|
||||
elif [[ "$CMD" == "docker" ]]; then
|
||||
# use an empty config directory to skip unnecessary auth refresh overhead
|
||||
$CMD --config="empty" build -f "$DOCKERFILE" -t "$IMAGE" . >/dev/null
|
||||
$CMD --config="empty" buildx build -f "$DOCKERFILE" -t "$IMAGE" .
|
||||
else
|
||||
$CMD build -f "$DOCKERFILE" -t "$IMAGE" . >/dev/null
|
||||
fi
|
||||
|
|
|
@ -22,7 +22,12 @@ node ./scripts/check-build-status.js
|
|||
# note with sandboxing this flag is passed to the binary inside the sandbox
|
||||
node_args=()
|
||||
if [ -n "${DEBUG:-}" ] && ! scripts/sandbox_command.sh -q; then
|
||||
node_args=(--inspect-brk)
|
||||
if [ -n "${SANDBOX:-}" ]; then
|
||||
port="${DEBUG_PORT:-9229}"
|
||||
node_args=("--inspect-brk=0.0.0.0:$port")
|
||||
else
|
||||
node_args=(--inspect-brk)
|
||||
fi
|
||||
fi
|
||||
node_args+=("./packages/cli" "$@")
|
||||
|
||||
|
|
Loading…
Reference in New Issue