diff --git a/ci/docker-compose.yml b/ci/docker-compose.yml new file mode 100644 index 000000000..6c4d9678f --- /dev/null +++ b/ci/docker-compose.yml @@ -0,0 +1,85 @@ +version: "3.9" + +services: + node1: + image: ghcr.io/xline-kv/xline:latest + networks: + xline_network: + ipv4_address: 172.22.0.2 + volumes: + - ./fixtures:/mnt + ports: + - "2479:2379" # No change for node1 + environment: + RUST_LOG: curp=debug,xline=debug + command: > + xline + --name node1 + --members node1=http://172.22.0.2:2379,node2=http://172.22.0.3:2379,node3=http://172.22.0.4:2379 + --storage-engine rocksdb + --data-dir /usr/local/xline/data-dir + --auth-public-key /mnt/public.pem + --auth-private-key /mnt/private.pem + --client-listen-urls http://172.22.0.2:2379 + --peer-listen-urls http://172.22.0.2:2380 + + node2: + image: ghcr.io/xline-kv/xline:latest + networks: + xline_network: + ipv4_address: 172.22.0.3 + volumes: + - ./fixtures:/mnt + ports: + - "2480:2379" # Changed from 2479:2379 to avoid conflict + environment: + RUST_LOG: curp=debug,xline=debug + command: > + xline + --name node2 + --members node1=http://172.22.0.2:2379,node2=http://172.22.0.3:2379,node3=http://172.22.0.4:2379 + --storage-engine rocksdb + --data-dir /usr/local/xline/data-dir + --auth-public-key /mnt/public.pem + --auth-private-key /mnt/private.pem + --client-listen-urls http://172.22.0.3:2379 + --peer-listen-urls http://172.22.0.3:2380 + + node3: + image: ghcr.io/xline-kv/xline:latest + networks: + xline_network: + ipv4_address: 172.22.0.4 + volumes: + - ./fixtures:/mnt + ports: + - "2381:2379" # No conflict here + environment: + RUST_LOG: curp=debug,xline=debug + command: > + xline + --name node3 + --members node1=http://172.22.0.2:2379,node2=http://172.22.0.3:2379,node3=http://172.22.0.4:2379 + --storage-engine rocksdb + --data-dir /usr/local/xline/data-dir + --auth-public-key /mnt/public.pem + --auth-private-key /mnt/private.pem + --client-listen-urls http://172.22.0.4:2379 + --peer-listen-urls http://172.22.0.4:2380 + + client: + image: ghcr.io/xline-kv/etcdctl:v3.5.9 + depends_on: + - node1 + - node2 + - node3 + networks: + - xline_network + command: sh -c "while true; do sleep 30; done" # Keeps the client running + +networks: + xline_network: + driver: bridge + ipam: + config: + - subnet: "172.22.0.0/16" diff --git a/ci/fixtures/private.pem b/ci/fixtures/private.pem new file mode 100644 index 000000000..a88842602 --- /dev/null +++ b/ci/fixtures/private.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCnAxxSXJYWZCKr +6f6j0HRUwkhX/0+GXjEclWoLA5+KZAuWMSu8bz6X+IScv4vNwORlGSWOnrz+8mb2 +I0F6teVZWfWFqsnyWk7IxM+h9yTg7aY/8685YfWTL7fpWq1/3Fniz4QbsYFuzB1V +gaZ5fD2CSYIKzSD+qVSlXF25JDFHV7b2OdHrX0UKZOTWY/VE//STt+PJKdX9R3pl +kGwAzJIkkcAZy0vhvqT3ASTgXchNeN8wGYYb3YirkqIsQB5Xcs1R1W+yz+IrVa6/ +0WMcyE6qtJPZ0lviyT0nHV/pZjXuD4B0aja/1fk/HmXDPMjpK1BuCBTStM/KlcrA +oAxo+YDhAgMBAAECggEAIyJhY+Y8YMuCC753JkklH+ubQn/gX/kSxduc6mJBvuBb +G6aOd97DQT8zzrHxHEDXC3ml0AIO6mdeR6uVC9aWQBzPrOYIA+cBqfTVZVJTvMnh +7pQ6KY01F1izjPDZjQtzEWbseNL30rI3/ZP/zJDZc745EEKlDU3cE8mBogA+Ka6w +GLozT9qQf8knBrtzxH6SvrZpfaRlP95is82b4IuPhqYdG7dVYFTALE1MyVrCbS4Y +KytjNLgwp1bIQtWrzMebBGoiU+DvDcRY8zvOfFupDwpYCt3p1aU5wyYYdr74esV7 +jjqHj89Ua65JHJ3XnMAaMc4dHM2FsGqMsOv/DDKInQKBgQDawckQEekx0QuP3eJP +GWdZ87oc+FVjDe3bYhAnCf/yXRJoqcs5vr1m1yCXFfsjbQFYHWXR9AUtNn5HCwOZ +zoT1Mv96fXBVGQORgzvlUWS43uKpfIPDVv2I6ZcKSIQAGOgcWYvmBDhYqPHgmx3o +VSrNGWtLdyw3rD1J6O+1RwtbiwKBgQDDchmY59EXBiTvlyT3Qjl0vZFMHa+TElbh +ikNtYltbUHtamOXZzpdk/KA7X2dYi0QpVfbbpfP/ly5lYvgZwl8h90Obopru+ACM +ndlKBfNQYArmWY6bJ2CwF7j1aTCCHZuVuX6/pzFVStRcssn15uoVaIyKd/MhJzLF +S3ertQkSwwKBgAniMYRhWsjeaghQ/RWXzzyYL3N5oNn92h5MWvB4mjDIFbnW2hC8 +1m/cDmPlIVijZyklAuGuhcFaMfBhxgLf+s/dQv+0xSuDGs8rP7yHpeZYY6NGtelQ +d9oEu8dCKXybo3kMbq6wyB7xWyRLvdkuZ+WmXVumgb/uL0K0nIfzMscrAoGAeA1e +K845YSslBQaSbk7/e/X1iguyDWT2eRO01zvTYgPNwZipl2CPHjkPM2km0fy5oaps +N/94IUd7+EsSmsAKL5LytGbtRFyR+c376rw8+OIFz/iy4BsQCRqJQjWa1lHZf96x +PIg2hW2xhD9OTv3IS94sdeG4NmUdipMQryhEqoECgYEAkvXOg66IAVTrO6qgoyl5 +42oufa/QE+qOAYoQEpmx3SZx6tMkycfAQqUHYcXhW1HNjyGbbg/sl13yddnPQqig ++ObtQNSIqGZWCc/HIqM//pPI3MHPhWARMOmAbk0I1mT0QKhuFfSugV2xb1Dj/Rvf +0VdB8txY+5Wz6zP1F2g46gM= +-----END PRIVATE KEY----- diff --git a/ci/fixtures/public.pem b/ci/fixtures/public.pem new file mode 100644 index 000000000..4c52eb652 --- /dev/null +++ b/ci/fixtures/public.pem @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApwMcUlyWFmQiq+n+o9B0 +VMJIV/9Phl4xHJVqCwOfimQLljErvG8+l/iEnL+LzcDkZRkljp68/vJm9iNBerXl +WVn1harJ8lpOyMTPofck4O2mP/OvOWH1ky+36Vqtf9xZ4s+EG7GBbswdVYGmeXw9 +gkmCCs0g/qlUpVxduSQxR1e29jnR619FCmTk1mP1RP/0k7fjySnV/Ud6ZZBsAMyS +JJHAGctL4b6k9wEk4F3ITXjfMBmGG92Iq5KiLEAeV3LNUdVvss/iK1Wuv9FjHMhO +qrST2dJb4sk9Jx1f6WY17g+AdGo2v9X5Px5lwzzI6StQbggU0rTPypXKwKAMaPmA +4QIDAQAB +-----END PUBLIC KEY----- diff --git a/scripts/benchmark.sh b/scripts/benchmark.sh index 8bd96a300..a1194794b 100755 --- a/scripts/benchmark.sh +++ b/scripts/benchmark.sh @@ -1,281 +1,60 @@ #!/bin/bash +# Set the working directory and output directory WORKDIR=$(pwd) OUTPUT_DIR="${WORKDIR}/out" -SERVERS=("172.20.0.2" "172.20.0.3" "172.20.0.4" "172.20.0.5") -MEMBERS="node1=${SERVERS[1]}:2379,node2=${SERVERS[2]}:2379,node3=${SERVERS[3]}:2379" +mkdir -p "${OUTPUT_DIR}" # Ensure output directory exists + +# Define the benchmark test cases +# Adjust the MEMBERS strings as necessary to match your docker-compose service names and configurations +MEMBERS="node1=http://node1:2379,node2=http://node2:2379,node3=http://node3:2379" -# container use_curp endpoints -# XLINE_TESTCASE[0] VS ETCD_TESTCASE[0]: In the best performance case contrast, xline uses the curp-client while the -# etcd uses etcd-client to propose to the leader directly. -# XLINE_TESTCASE[1] VS ETCD_TESTCASE[1]: Both etcd and xline use etcd-client to issue a proposal to their nearest follower. -# XLINE_TESTCASE[0] VS ETCD_TESTCASE[2]: The performance contrast between one using curp-client to propose and the other use etcd-client. XLINE_TESTCASE=( - "client true ${MEMBERS}" - "client false node3=${SERVERS[3]}:2379" - "client false node1=${SERVERS[1]}:2379" -) -ETCD_TESTCASE=( - "client false node1=${SERVERS[1]}:2379" - "client false node3=${SERVERS[2]}:2379" + "client true ${MEMBERS}" + "client false node3=http://node3:2379" + "client false node1=http://node1:2379" ) + +# Additional benchmark parameters KEY_SPACE_SIZE=("1" "100000") CLIENTS_TOTAL=("1 50" "10 300" "50 1000" "100 3000" "200 5000") -FORMAT="%-8s\t%-12s\t%-8s\t%-s\n" +FORMAT="%-8s\\t%-12s\\t%-8s\\t%-s\\n" -# generate benchmark command by arguments -# args: -# $1: container to run benchmark -# $2: endpoints to connect -# $3: use_curp flag -# $4: clients number -# $5: total requests number -# $6: size of the key in the request +# Function to run the benchmark command benchmark_cmd() { - container_name=${1} - endpoints=${2} - use_curp=${3} - clients=${4} - total=${5} - key_space_size=${6} - echo "docker exec ${container_name} /usr/local/bin/benchmark --endpoints ${endpoints} ${use_curp} --clients=${clients} --stdout put --key-size=8 --val-size=256 --total=${total} --key-space-size=${key_space_size}" -} - -# run xline node by index -# args: -# $1: index of the node -run_xline() { - cmd="/usr/local/bin/xline \ - --name node${1} \ - --members ${MEMBERS} \ - --storage-engine rocksdb \ - --data-dir /usr/local/xline/data-dir \ - --retry-timeout 150ms \ - --rpc-timeout 5s \ - --server-wait-synced-timeout 10s \ - --client-wait-synced-timeout 10s \ - --client-propose-timeout 5s \ - --batch-timeout 1ms \ - --cmd-workers 16" - - if [ ${1} -eq 1 ]; then - cmd="${cmd} --is-leader" + local container_name=$1 + local endpoints=$2 + local use_curp=$3 + local clients=$4 + local total=$5 + local key_space_size=$6 + # Note: Adjust the command as per your actual benchmark tool's syntax + echo "docker-compose exec ${container_name} benchmark --endpoints ${endpoints} ${use_curp} --clients=${clients} --stdout put --key-size=8 --val-size=256 --total=${total} --key-space-size=${key_space_size}" +} + +# Ensure docker-compose is up and running +echo "Starting the test environment using docker-compose..." +docker-compose -f "${WORKDIR}/ci/docker-compose.yml" up -d + +# Run benchmark tests +echo "Starting benchmark tests..." +for testcase in "${XLINE_TESTCASE[@]}"; do + IFS=' ' read -r container_name use_curp_flag endpoints <<< "$testcase" + use_curp="" + if [[ $use_curp_flag == "true" ]]; then + use_curp="--use-curp" fi - - docker exec -e RUST_LOG=curp,xline. -d node${1} ${cmd} - echo "docker exec -e RUST_LOG=curp,xline -d node${1} ${cmd}" -} - -# run etcd node by index -# args: -# $1: index of the node -run_etcd() { - cmd="/usr/local/bin/etcd --name node${1} \ - --data-dir /tmp/node${1} \ - --listen-client-urls http://0.0.0.0:2379 \ - --listen-peer-urls http://0.0.0.0:2380 \ - --advertise-client-urls http://${SERVERS[$1]}:2379 \ - --initial-advertise-peer-urls http://${SERVERS[$1]}:2380 \ - --initial-cluster-token etcd-cluster-1 \ - --initial-cluster node1=http://${SERVERS[1]}:2380,node2=http://${SERVERS[2]}:2380,node3=http://${SERVERS[3]}:2380 \ - --initial-cluster-state new \ - --logger zap" - docker exec -d node${1} ${cmd} -} - -# run cluster of xline/etcd in container -# args: -# $1: xline/etcd cluster -run_cluster() { - server=${1} - echo cluster starting - case ${server} in - xline) - run_xline 1 & - run_xline 2 & - run_xline 3 & - sleep 3 - ;; - etcd) - run_etcd 1 - sleep 3 # in order to let etcd node1 become leader - run_etcd 2 & - run_etcd 3 & - ;; - esac - wait - echo cluster started -} - -# stop cluster of xline/etcd and remove etcd data files -# args: -# $1: xline/etcd cluster -stop_cluster() { - server=${1} - echo cluster stopping - for x in 1 2 3; do - docker exec node${x} pkill -9 ${server} & - docker exec node${x} rm -rf /tmp/node${x} & - docker exec node${x} rm -rf /usr/local/xline/data-dir & - done - wait - echo cluster stopped -} - -# stop all containers -stop_all() { - echo stopping - for name in "node1" "node2" "node3" "client"; do - docker_id=$(docker ps -qf "name=${name}") - if [ -n "$docker_id" ]; then - docker stop $docker_id - fi - done - sleep 1 - echo stopped -} - -# set latency between two containers -# args: -# $1: source container -# $2: destination ip address -# $3: latency between two nodes -# $4: idx required by tc -set_latency() { - container_name=${1} - dst_ip=${2} - latency=${3} - idx=${4} - docker exec ${container_name} tc filter add dev eth0 protocol ip parent 1:0 u32 match ip dst ${dst_ip} flowid 1:${idx} - docker exec ${container_name} tc qdisc add dev eth0 parent 1:${idx} handle ${idx}0: netem delay ${latency} -} - -# set latency of cluster -# args: -# $1: size of cluster -set_cluster_latency() { - cluster_size=${1} - client_ipaddr=${SERVERS[0]} - docker exec client tc qdisc add dev eth0 root handle 1: prio bands $((cluster_size + 4)) - # set latency: - # client -> node1 : 75ms - # client -> node2 : 75ms - for ((i = 1; i < ${cluster_size}; i++)); do - set_latency client ${SERVERS[$i]} 75ms $((i + 3)) & - done - # client -> node3: 50ms - set_latency client ${SERVERS[3]} 50ms $((cluster_size + 3)) & - for ((i = 1; i <= ${cluster_size}; i++)); do - docker exec node$i tc qdisc add dev eth0 root handle 1: prio bands $((cluster_size + 4)) - idx=4 - # node1 <-> node2, node2 <-> node3, node3 <-> node1: 50ms - for ((j = 1; j <= ${cluster_size}; j++)); do - if [ ${i} -ne ${j} ]; then - set_latency node$i ${SERVERS[$j]} 50ms ${idx} & - idx=$((idx + 1)) - fi + for key_space_size in "${KEY_SPACE_SIZE[@]}"; do + for clients_total in "${CLIENTS_TOTAL[@]}"; do + IFS=' ' read -r clients total <<< "$clients_total" + cmd=$(benchmark_cmd "${container_name}" "${endpoints}" "${use_curp}" "${clients}" "${total}" "${key_space_size}") + echo "Executing benchmark: $cmd" + eval $cmd done - if [[ ${i} -eq ${cluster_size} ]]; then - # node3 -> client: 50ms - set_latency node${i} ${SERVERS[0]} 50ms ${idx} & - else - # node1, node2 -> client: 75ms - set_latency node${i} ${SERVERS[0]} 75ms ${idx} & - fi done - wait -} - -# run container of xline/etcd use specified image -# args: -# $1: size of cluster -# $2: image name -run_container() { - echo container starting - size=${1} - case ${2} in - xline) - image="ghcr.io/xline-kv/xline:latest" - ;; - etcd) - image="datenlord/etcd:v3.5.5" - ;; - esac - docker run -d -it --rm --name=client --net=xline_net --ip=${SERVERS[0]} --cap-add=NET_ADMIN --cpu-shares=512 -m=512M -v ${WORKDIR}:/mnt ${image} bash & - for ((i = 1; i <= ${size}; i++)); do - docker run -d -it --rm --name=node${i} --net=xline_net --ip=${SERVERS[$i]} --cap-add=NET_ADMIN -m=2048M -v ${WORKDIR}:/mnt ${image} bash & - done - wait - set_cluster_latency ${size} - echo container started -} - -stop_all -docker network create --subnet=172.20.0.0/24 xline_net >/dev/null 2>&1 -echo "A Docker network named 'xline_net' is created for communication among various xline nodes. You can use the command 'docker network rm xline_net' to remove it after use." -rm -r ${OUTPUT_DIR} >/dev/null 2>&1 -mkdir ${OUTPUT_DIR} -mkdir ${OUTPUT_DIR}/logs - -for server in "xline" "etcd"; do - count=0 - logs_dir=${OUTPUT_DIR}/logs/${server}_logs - mkdir -p ${logs_dir} - result_file=${OUTPUT_DIR}/${server}.txt - printf ${FORMAT} "QPS" "Latency(ms)" "Time(s)" "Command" >>${result_file} - case ${server} in - xline) - TESTCASE=("${XLINE_TESTCASE[@]}") - ;; - etcd) - TESTCASE=("${ETCD_TESTCASE[@]}") - ;; - esac - run_container 3 ${server} - - for testcase in "${TESTCASE[@]}"; do - - tmp=(${testcase}) - container_name=${tmp[0]} - case ${tmp[1]} in - true) - use_curp="--use-curp" - ;; - false) - use_curp="" - ;; - esac - endpoints=${tmp[@]:2} - for key_space_size in "${KEY_SPACE_SIZE[@]}"; do - for clients_total in "${CLIENTS_TOTAL[@]}"; do - echo "" - echo "##################################################" - echo "" - echo "server: $server" - echo "testcase: ${testcase}" - echo "clients_total: ${clients_total}" - echo "key_space_size: ${key_space_size}" - echo "" - echo "##################################################" - echo "" - tmp=(${clients_total}) - clients=${tmp[0]} - total=${tmp[1]} - output_file=${logs_dir}/case_${count}.txt - count=$((count + 1)) - - run_cluster ${server} - cmd=$(benchmark_cmd "${container_name}" "${endpoints}" "${use_curp}" "${clients}" "${total}" "${key_space_size}") - ${cmd} >${output_file} - stop_cluster ${server} - - Latency=$(cat ${output_file} | grep Average | awk '{printf "%.1f",$2*1000}') - QPS=$(cat ${output_file} | grep Requests/sec | awk '{printf "%.1f",$2}') - Time=$(cat ${output_file} | grep Total | awk '{printf "%.1f",$2}') +done - printf ${FORMAT} ${QPS} ${Latency} ${Time} "${cmd}" >>${result_file} +# Stop and remove the test environment +echo "Stopping and removing the test environment..." +docker-compose -f "${WORKDIR}/ci/docker-compose.yml" down - done - done - done - stop_all -done +echo "Benchmark tests completed." diff --git a/scripts/quick_start.sh b/scripts/quick_start.sh index 1185bdbf2..5b1937da1 100755 --- a/scripts/quick_start.sh +++ b/scripts/quick_start.sh @@ -1,110 +1,29 @@ #!/bin/bash -DIR=$( - cd "$(dirname "$0")" - pwd -) -SERVERS=("172.20.0.2" "172.20.0.3" "172.20.0.4" "172.20.0.5") -MEMBERS="node1=${SERVERS[1]}:2380,${SERVERS[1]}:2381,node2=${SERVERS[2]}:2380,${SERVERS[2]}:2381,node3=${SERVERS[3]}:2380,${SERVERS[3]}:2381" -source $DIR/log.sh +# This script is designed to quickly start the Xline environment using Docker Compose. +# It ensures that all required services are up and running in a containerized setup. -# stop all containers -stop_all() { - log::info stopping - for name in "node1" "node2" "node3" "client"; do - docker_id=$(docker ps -qf "name=${name}") - if [ -n "$docker_id" ]; then - docker stop $docker_id - fi - done - docker network rm xline_net >/dev/null 2>&1 - docker stop "prometheus" - sleep 1 - log::info stopped -} - -# run xline node by index -# args: -# $1: index of the node -run_xline() { - cmd="/usr/local/bin/xline \ - --name node${1} \ - --members ${MEMBERS} \ - --storage-engine rocksdb \ - --data-dir /usr/local/xline/data-dir \ - --auth-public-key /mnt/public.pem \ - --auth-private-key /mnt/private.pem \ - --client-listen-urls=http://${SERVERS[$1]}:2379 \ - --peer-listen-urls=http://${SERVERS[$1]}:2380,http://${SERVERS[$1]}:2381 \ - --client-advertise-urls=http://${SERVERS[$1]}:2379 \ - --peer-advertise-urls=http://${SERVERS[$1]}:2380,http://${SERVERS[$1]}:2381" - - if [ -n "$LOG_LEVEL" ]; then - cmd="${cmd} --log-level ${LOG_LEVEL}" - fi - - if [ ${1} -eq 1 ]; then - cmd="${cmd} --is-leader" - fi - - docker exec -e RUST_LOG=debug -d node${1} ${cmd} - log::info "command is: docker exec -e RUST_LOG=debug -d node${1} ${cmd}" -} - -# run cluster of xline/etcd in container -run_cluster() { - log::info cluster starting - run_xline 1 & - run_xline 2 & - run_xline 3 & - wait - log::info cluster started -} +# Navigate to the directory containing docker-compose.yml +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +COMPOSE_FILE="${DIR}/../ci/docker-compose.yml" -# run container of xline/etcd use specified image -# args: -# $1: size of cluster -run_container() { - log::info container starting - size=${1} - image="ghcr.io/xline-kv/xline:latest" - for ((i = 1; i <= ${size}; i++)); do - mount_point="-v ${DIR}:/mnt" - if [ -n "$LOG_PATH" ]; then - mkdir -p ${LOG_PATH}/node${i} - mount_point="${mount_point} -v ${LOG_PATH}/node${i}:/var/log/xline" - fi - docker run -d -it --rm --name=node${i} --net=xline_net \ - --ip=${SERVERS[$i]} --cap-add=NET_ADMIN --cpu-shares=1024 \ - -m=512M ${mount_point} ${image} bash & - done - docker run -d -it --rm --name=client \ - --net=xline_net --ip=${SERVERS[0]} --cap-add=NET_ADMIN \ - --cpu-shares=1024 -m=512M -v ${DIR}:/mnt ghcr.io/xline-kv/etcdctl:v3.5.9 bash & - wait - log::info container started +# Function to start the environment +start_environment() { + echo "Starting the Xline environment with Docker Compose..." + docker-compose -f "${COMPOSE_FILE}" up -d + echo "The Xline environment has started successfully." } -# run prometheus -run_prometheus() { - docker run -d -it --rm --name=prometheus --net=xline_net -p 9090:9090 \ - --ip=${1} --cap-add=NET_ADMIN -v ${DIR}/prometheus.yml:/etc/prometheus/prometheus.yml \ - prom/prometheus +# Function to stop and clean up the environment +stop_environment() { + echo "Stopping and cleaning up the Xline environment..." + docker-compose -f "${COMPOSE_FILE}" down + echo "The Xline environment has been stopped and cleaned up." } -if [ -z "$1" ]; then - stop_all - docker network create --subnet=172.20.0.0/24 xline_net >/dev/null 2>&1 - log::warn "A Docker network named 'xline_net' is created for communication among various xline nodes. You can use the command 'docker network rm xline_net' to remove it after use." - run_container 3 - run_cluster - run_prometheus "172.20.0.6" - echo "Prometheus starts on http://172.20.0.6:9090/graph and http://127.0.0.1:9090/graph" - exit 0 -elif [ "$1" == "stop" ]; then - stop_all - exit 0 +# Check if the first command-line argument is 'stop'; if so, call stop_environment +if [ "$1" == "stop" ]; then + stop_environment else - echo "Unexpected argument: $1" - exit 1 + start_environment fi