diff --git a/.github/workflows/script/launch_network.sh b/.github/workflows/script/launch_network.sh index 9e346960f1..8df792974e 100755 --- a/.github/workflows/script/launch_network.sh +++ b/.github/workflows/script/launch_network.sh @@ -71,8 +71,8 @@ if ! grep -q "schemaVersion: 2" ./local-config-after.yaml; then fi # check remote-config-after.yaml should contains 'schemaVersion: 1' -if ! grep -q "schemaVersion: 1" ./remote-config-after.yaml; then - echo "schemaVersion: 1 not found in remote-config-after.yaml" +if ! grep -q "schemaVersion: 2" ./remote-config-after.yaml; then + echo "schemaVersion: 2 not found in remote-config-after.yaml" exit 1 fi echo "::endgroup::" diff --git a/src/business/utils/port-utilities.ts b/src/business/utils/port-utilities.ts new file mode 100644 index 0000000000..12b6ebf2ff --- /dev/null +++ b/src/business/utils/port-utilities.ts @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: Apache-2.0 + +import net from 'node:net'; +import * as constants from '../../core/constants.js'; +import {type SoloLogger} from '../../core/logging/solo-logger.js'; + +/** + * Utility class for port-related operations + */ +export class PortUtilities { + /** + * Check if a TCP port is available on the local machine + * @param port Port number to check + * @returns Promise that resolves to true if port is available, false otherwise + */ + public static async isPortAvailable(port: number): Promise { + return new Promise((resolve, reject) => { + const server: net.Server = net.createServer(); + const timeout = setTimeout(() => { + server.close(); + reject(new Error(`Timeout while checking port ${port}`)); + }, 5000); // 5-second timeout + + server.once('error', error => { + clearTimeout(timeout); + if ((error as NodeJS.ErrnoException).code === 'EADDRINUSE') { + // Port is in use + resolve(false); + } else { + // Unexpected error + reject(error); + } + }); + + server.once('listening', () => { + clearTimeout(timeout); + // Port is available + server.close(() => { + resolve(true); + }); + }); + server.listen(port, constants.LOCAL_HOST); + }); + } + + /** + * Find an available port starting from the given port + * @param startPort Port number to start checking from + * @param timeoutMs Timeout in milliseconds before giving up (default: 30000) + * @param logger logger for debug messages + * @returns Promise that resolves to the first available port or throws an error if timeout is reached + * @throws Error if no available port is found within the timeout period + */ + public static async findAvailablePort( + startPort: number, + timeoutMs: number = 30_000, + logger: SoloLogger, + ): Promise { + if (!Number.isInteger(startPort) || startPort < 1 || startPort > 65_535) { + throw new Error(`Invalid startPort: ${startPort}. Must be an integer between 1 and 65535.`); + } + let port: number = startPort; + let attempts: number = 0; + const startTime: number = Date.now(); + + while (!(await this.isPortAvailable(port))) { + logger.debug(`Port ${port} is not available, trying ${port + 1}`); + port++; + attempts++; + + if (Date.now() - startTime > timeoutMs) { + const errorMessage: string = `Failed to find an available port after ${timeoutMs}ms timeout, starting from port ${startPort}`; + logger.error(errorMessage); + throw new Error(errorMessage); + } + } + + return port; + } +} + +// The managePortForward function has been moved to components-data-wrapper.ts diff --git a/src/commands/explorer.ts b/src/commands/explorer.ts index bb55ec5d97..23fa39052a 100644 --- a/src/commands/explorer.ts +++ b/src/commands/explorer.ts @@ -261,6 +261,9 @@ export class ExplorerCommand extends BaseCommand { ]) as ExplorerDeployConfigClass; context_.config.valuesArg += await self.prepareValuesArg(context_.config); + context_.config.clusterReference = + (this.configManager.getFlag(flags.clusterRef) as string) ?? + this.k8Factory.default().clusters().readCurrent(); context_.config.clusterContext = context_.config.clusterRef ? this.localConfig.configuration.clusterRefs.get(context_.config.clusterRef)?.toString() : this.k8Factory.default().contexts().readCurrent(); @@ -450,7 +453,7 @@ export class ExplorerCommand extends BaseCommand { }, this.addMirrorNodeExplorerComponents(), { - title: 'Enable port forwarding', + title: 'Enable port forwarding for explorer', skip: context_ => !context_.config.forcePortForward, task: async context_ => { const pods: Pod[] = await this.k8Factory @@ -461,16 +464,19 @@ export class ExplorerCommand extends BaseCommand { throw new SoloError('No Hiero Explorer pod found'); } const podReference: PodReference = pods[0].podReference; + const clusterReference: ClusterReferenceName = context_.config.clusterReference; - await this.k8Factory - .getK8(context_.config.clusterContext) - .pods() - .readByReference(podReference) - .portForward(constants.EXPLORER_PORT, constants.EXPLORER_PORT, true); - this.logger.addMessageGroup(constants.PORT_FORWARDING_MESSAGE_GROUP, 'Port forwarding enabled'); - this.logger.addMessageGroupMessage( - constants.PORT_FORWARDING_MESSAGE_GROUP, - `Explorer port forward enabled on http://localhost:${constants.EXPLORER_PORT}`, + await this.remoteConfig.configuration.components.managePortForward( + clusterReference, + podReference, + constants.EXPLORER_PORT, // Pod port + constants.EXPLORER_PORT, // Local port + this.k8Factory.getK8(context_.config.clusterContext), + this.logger, + ComponentTypes.Explorers, + + 'Explorer', + context_.config.isChartInstalled, // Reuse existing port if chart is already installed ); }, }, diff --git a/src/commands/mirror-node.ts b/src/commands/mirror-node.ts index 74bd63cf26..add373148f 100644 --- a/src/commands/mirror-node.ts +++ b/src/commands/mirror-node.ts @@ -53,6 +53,7 @@ import {Base64} from 'js-base64'; import {Lock} from '../core/lock/lock.js'; import {Version} from '../business/utils/version.js'; import {IngressClass} from '../integration/kube/resources/ingress-class/ingress-class.js'; +// Port forwarding is now a method on the components object interface MirrorNodeDeployConfigClass { isChartInstalled: boolean; @@ -614,6 +615,10 @@ export class MirrorNodeCommand extends BaseCommand { title: 'Check Monitor', labels: ['app.kubernetes.io/component=monitor', 'app.kubernetes.io/name=monitor'], }, + { + title: 'Check Web3', + labels: ['app.kubernetes.io/component=web3', 'app.kubernetes.io/name=web3'], + }, { title: 'Check Importer', labels: ['app.kubernetes.io/component=importer', 'app.kubernetes.io/name=importer'], @@ -771,7 +776,7 @@ export class MirrorNodeCommand extends BaseCommand { }, this.addMirrorNodeComponents(), { - title: 'Enable port forwarding', + title: 'Enable port forwarding for mirror ingress controller', skip: context_ => !context_.config.forcePortForward || !context_.config.enableIngress, task: async context_ => { const pods: Pod[] = await this.k8Factory @@ -789,15 +794,19 @@ export class MirrorNodeCommand extends BaseCommand { } } - await this.k8Factory - .getK8(context_.config.clusterContext) - .pods() - .readByReference(podReference) - .portForward(constants.MIRROR_NODE_PORT, 80, true); - this.logger.addMessageGroup(constants.PORT_FORWARDING_MESSAGE_GROUP, 'Port forwarding enabled'); - this.logger.addMessageGroupMessage( - constants.PORT_FORWARDING_MESSAGE_GROUP, - `Mirror Node port forward enabled on localhost:${constants.MIRROR_NODE_PORT}`, + const clusterReference: ClusterReferenceName = context_.config.clusterReference; + + await this.remoteConfig.configuration.components.managePortForward( + clusterReference, + podReference, + 80, // Pod port + constants.MIRROR_NODE_PORT, // Local port + this.k8Factory.getK8(context_.config.clusterContext), + this.logger, + ComponentTypes.MirrorNode, + + 'Mirror ingress controller', + context_.config.isChartInstalled, // Reuse existing port if chart is already installed ); }, }, diff --git a/src/commands/node/tasks.ts b/src/commands/node/tasks.ts index a7160ef4b7..e7baab34bc 100644 --- a/src/commands/node/tasks.ts +++ b/src/commands/node/tasks.ts @@ -1573,7 +1573,7 @@ export class NodeCommandTasks { public enablePortForwarding(enablePortForwardHaProxy: boolean = false) { return { - title: 'Enable port forwarding', + title: 'Enable port forwarding for debug port and/or GRPC port', task: async context_ => { const nodeAlias: NodeAlias = context_.config.debugNodeAlias || 'node1'; const context = helpers.extractContextFromConsensusNodes(nodeAlias, context_.config.consensusNodes); @@ -1600,15 +1600,19 @@ export class NodeCommandTasks { throw new SoloError(`No HAProxy pod found for node alias: ${nodeAlias}`); } const podReference: PodReference = pods[0].podReference; - await this.k8Factory - .getK8(context) - .pods() - .readByReference(podReference) - .portForward(constants.GRPC_PORT, constants.GRPC_PORT, true); - this.logger.addMessageGroup(constants.PORT_FORWARDING_MESSAGE_GROUP, 'Port forwarding enabled'); - this.logger.addMessageGroupMessage( - constants.PORT_FORWARDING_MESSAGE_GROUP, - `Consensus Node gRPC port forward enabled on localhost:${constants.GRPC_PORT}`, + const nodeId: number = Templates.nodeIdFromNodeAlias(nodeAlias); + await this.remoteConfig.configuration.components.managePortForward( + undefined, + podReference, + constants.GRPC_PORT, // Pod port + constants.GRPC_PORT, // Local port + this.k8Factory.getK8(context_.config.clusterContext), + this.logger, + ComponentTypes.ConsensusNode, + + 'Consensus Node gRPC', + context_.config.isChartInstalled, // Reuse existing port if chart is already installed + nodeId, ); } }, diff --git a/src/commands/relay.ts b/src/commands/relay.ts index 238df2240c..bce2235149 100644 --- a/src/commands/relay.ts +++ b/src/commands/relay.ts @@ -420,7 +420,7 @@ export class RelayCommand extends BaseCommand { }, this.addRelayComponent(), { - title: 'Enable port forwarding', + title: 'Enable port forwarding for relay node', task: async (context_): Promise => { const pods: Pod[] = await this.k8Factory .getK8(context_.config.clusterContext) @@ -430,15 +430,21 @@ export class RelayCommand extends BaseCommand { throw new SoloError('No Relay pod found'); } const podReference: PodReference = pods[0].podReference; - await this.k8Factory - .getK8(context_.config.context) - .pods() - .readByReference(podReference) - .portForward(constants.JSON_RPC_RELAY_PORT, constants.JSON_RPC_RELAY_PORT, true); - this.logger.addMessageGroup(constants.PORT_FORWARDING_MESSAGE_GROUP, 'Port forwarding enabled'); - this.logger.addMessageGroupMessage( - constants.PORT_FORWARDING_MESSAGE_GROUP, - `JSON RPC Relay forward enabled on localhost:${constants.JSON_RPC_RELAY_PORT}`, + const clusterReference: string = + (this.configManager.getFlag(flags.clusterRef) as string) ?? + this.k8Factory.default().clusters().readCurrent(); + + await this.remoteConfig.configuration.components.managePortForward( + clusterReference, + podReference, + constants.JSON_RPC_RELAY_PORT, // Pod port + constants.JSON_RPC_RELAY_PORT, // Local port + this.k8Factory.getK8(context_.config.clusterContext), + this.logger, + ComponentTypes.RelayNodes, + + 'JSON RPC Relay', + context_.config.isChartInstalled, // Reuse existing port if chart is already installed ); }, skip: context_ => !context_.config.forcePortForward, @@ -626,12 +632,14 @@ export class RelayCommand extends BaseCommand { title: 'Add relay component in remote config', skip: context_ => !this.remoteConfig.isLoaded() || context_.config.isChartInstalled, task: async (context_): Promise => { - const {namespace, nodeAliases, clusterRef} = context_.config; + const {namespace, nodeAliases} = context_.config; const nodeIds: NodeId[] = nodeAliases.map((nodeAlias: NodeAlias) => Templates.nodeIdFromNodeAlias(nodeAlias)); - + const clusterReference: string = + (this.configManager.getFlag(flags.clusterRef) as string) ?? + this.k8Factory.default().clusters().readCurrent(); this.remoteConfig.configuration.components.addNewComponent( - this.componentFactory.createNewRelayComponent(clusterRef, namespace, nodeIds), + this.componentFactory.createNewRelayComponent(clusterReference, namespace, nodeIds), ComponentTypes.RelayNodes, ); @@ -648,12 +656,22 @@ export class RelayCommand extends BaseCommand { task: async (context_): Promise => { const clusterReference: ClusterReferenceName = context_.config.clusterRef; - const relayComponents: RelayNodeStateSchema[] = - this.remoteConfig.configuration.components.getComponentsByClusterReference( - ComponentTypes.RelayNodes, - clusterReference, - ); + // if clusterReference not defined then we will remove all relay nodes + const relayComponents: RelayNodeStateSchema[] = clusterReference + ? this.remoteConfig.configuration.components.getComponentsByClusterReference( + ComponentTypes.RelayNodes, + clusterReference, + ) + : this.remoteConfig.configuration.components.getComponentByType( + ComponentTypes.RelayNodes, + ); + if (relayComponents.length === 0) { + this.logger.showUser( + `Did not find any relay node in remote config to be removed, clusterReference = ${clusterReference}`, + ); + return; + } for (const relayComponent of relayComponents) { this.remoteConfig.configuration.components.removeComponent( relayComponent.metadata.id, diff --git a/src/core/account-manager.ts b/src/core/account-manager.ts index a3e1fe0305..441d912927 100644 --- a/src/core/account-manager.ts +++ b/src/core/account-manager.ts @@ -427,16 +427,15 @@ export class AccountManager { const targetPort: number = localPort; if (this._portForwards.length < totalNodes) { - this._portForwards.push( - await this.k8Factory - .getK8(networkNodeService.context) - .pods() - .readByReference(PodReference.of(networkNodeService.namespace, networkNodeService.haProxyPodName)) - .portForward(localPort, port), - ); + const portForward: ExtendedNetServer = await this.k8Factory + .getK8(networkNodeService.context) + .pods() + .readByReference(PodReference.of(networkNodeService.namespace, networkNodeService.haProxyPodName)) + .portForward(localPort, port); + this._portForwards.push(portForward); + this.logger.debug(`using local host port forward: ${host}:${portForward.localPort}`); } - this.logger.debug(`using local host port forward: ${host}:${targetPort}`); object[`${host}:${targetPort}`] = accountId; await this.testNodeClientConnection(object, accountId); diff --git a/src/core/config/remote/api/component-factory-api.ts b/src/core/config/remote/api/component-factory-api.ts index 386f0aba0b..6af58694a3 100644 --- a/src/core/config/remote/api/component-factory-api.ts +++ b/src/core/config/remote/api/component-factory-api.ts @@ -1,6 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 -import {type ClusterReferenceName} from '../../../../types/index.js'; +import {type ClusterReferenceName, type portForwardConfig} from '../../../../types/index.js'; import {type NodeId} from '../../../../types/aliases.js'; import {type NamespaceName} from '../../../../types/namespace/namespace-name.js'; import {type DeploymentPhase} from '../../../../data/schema/model/remote/deployment-phase.js'; @@ -34,6 +34,7 @@ export interface ComponentFactoryApi { clusterReference: ClusterReferenceName, namespace: NamespaceName, phase: DeploymentPhase.REQUESTED | DeploymentPhase.STARTED, + portForwardConfigs?: portForwardConfig[], ): ConsensusNodeStateSchema; createConsensusNodeComponentsFromNodeIds( diff --git a/src/core/config/remote/api/components-data-wrapper-api.ts b/src/core/config/remote/api/components-data-wrapper-api.ts index 699595eb63..acc4b27ca7 100644 --- a/src/core/config/remote/api/components-data-wrapper-api.ts +++ b/src/core/config/remote/api/components-data-wrapper-api.ts @@ -5,11 +5,14 @@ import {type ComponentTypes} from '../enumerations/component-types.js'; import {type DeploymentPhase} from '../../../../data/schema/model/remote/deployment-phase.js'; import {type ClusterReferenceName, type ComponentId} from '../../../../types/index.js'; import {type DeploymentStateSchema} from '../../../../data/schema/model/remote/deployment-state-schema.js'; +import {type PodReference} from '../../../../integration/kube/resources/pod/pod-reference.js'; +import {type K8} from '../../../../integration/kube/k8.js'; +import {type SoloLogger} from '../../../logging/solo-logger.js'; export interface ComponentsDataWrapperApi { state: DeploymentStateSchema; - addNewComponent(component: BaseStateSchema, type: ComponentTypes): void; + addNewComponent(component: BaseStateSchema, type: ComponentTypes, isReplace?: boolean): void; changeNodePhase(componentId: ComponentId, phase: DeploymentPhase): void; @@ -17,6 +20,8 @@ export interface ComponentsDataWrapperApi { getComponent(type: ComponentTypes, componentId: ComponentId): T; + getComponentByType(type: ComponentTypes): T[]; + getComponentsByClusterReference( type: ComponentTypes, clusterReference: ClusterReferenceName, @@ -25,4 +30,17 @@ export interface ComponentsDataWrapperApi { getComponentById(type: ComponentTypes, id: number): T; getNewComponentId(componentType: ComponentTypes): number; + + managePortForward( + clusterReference: ClusterReferenceName, + podReference: PodReference, + podPort: number, + localPort: number, + k8Client: K8, + logger: SoloLogger, + componentType: ComponentTypes, + label: string, + reuse?: boolean, + nodeId?: number, + ): Promise; } diff --git a/src/core/config/remote/component-factory.ts b/src/core/config/remote/component-factory.ts index 8e482b22d5..5ec17d3ccd 100644 --- a/src/core/config/remote/component-factory.ts +++ b/src/core/config/remote/component-factory.ts @@ -5,7 +5,7 @@ import {DeploymentPhase} from '../../../data/schema/model/remote/deployment-phas import {type NodeId} from '../../../types/aliases.js'; import {ComponentStateMetadataSchema} from '../../../data/schema/model/remote/state/component-state-metadata-schema.js'; import {type NamespaceName} from '../../../types/namespace/namespace-name.js'; -import {type ClusterReferenceName, type ComponentId} from '../../../types/index.js'; +import {type ClusterReferenceName, type ComponentId, portForwardConfig} from '../../../types/index.js'; import {type RemoteConfigRuntimeStateApi} from '../../../business/runtime-state/api/remote-config-runtime-state-api.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from '../../dependency-injection/container-helper.js'; @@ -129,12 +129,14 @@ export class ComponentFactory implements ComponentFactoryApi { clusterReference: ClusterReferenceName, namespace: NamespaceName, phase: DeploymentPhase.REQUESTED | DeploymentPhase.STARTED, + portForwardConfigs?: portForwardConfig[], ): ConsensusNodeStateSchema { const metadata: ComponentStateMetadataSchema = new ComponentStateMetadataSchema( nodeId, namespace.name, clusterReference, phase, + portForwardConfigs, ); return new ConsensusNodeStateSchema(metadata); @@ -144,9 +146,16 @@ export class ComponentFactory implements ComponentFactoryApi { nodeIds: NodeId[], clusterReference: ClusterReferenceName, namespace: NamespaceName, + portForwardConfigs?: portForwardConfig[], ): ConsensusNodeStateSchema[] { return nodeIds.map((nodeId: NodeId) => - this.createNewConsensusNodeComponent(nodeId, clusterReference, namespace, DeploymentPhase.REQUESTED), + this.createNewConsensusNodeComponent( + nodeId, + clusterReference, + namespace, + DeploymentPhase.REQUESTED, + portForwardConfigs, + ), ); } } diff --git a/src/core/config/remote/components-data-wrapper.ts b/src/core/config/remote/components-data-wrapper.ts index d592e5e41b..6d1a206b88 100644 --- a/src/core/config/remote/components-data-wrapper.ts +++ b/src/core/config/remote/components-data-wrapper.ts @@ -8,6 +8,10 @@ import {type DeploymentPhase} from '../../../data/schema/model/remote/deployment import {type ClusterReferenceName, type ComponentId} from '../../../types/index.js'; import {type ComponentsDataWrapperApi} from './api/components-data-wrapper-api.js'; import {type DeploymentStateSchema} from '../../../data/schema/model/remote/deployment-state-schema.js'; +import {type PodReference} from '../../../integration/kube/resources/pod/pod-reference.js'; +import {type K8} from '../../../integration/kube/k8.js'; +import {type SoloLogger} from '../../logging/solo-logger.js'; +import * as constants from '../../constants.js'; export class ComponentsDataWrapper implements ComponentsDataWrapperApi { public constructor(public state: DeploymentStateSchema) {} @@ -15,7 +19,7 @@ export class ComponentsDataWrapper implements ComponentsDataWrapperApi { /* -------- Modifiers -------- */ /** Used to add new component to their respective group. */ - public addNewComponent(component: BaseStateSchema, type: ComponentTypes): void { + public addNewComponent(component: BaseStateSchema, type: ComponentTypes, isReplace?: boolean): void { const componentId: ComponentId = component.metadata.id; if (typeof componentId !== 'number' || componentId < 0) { @@ -27,7 +31,7 @@ export class ComponentsDataWrapper implements ComponentsDataWrapperApi { } const addComponentCallback: (components: BaseStateSchema[]) => void = (components): void => { - if (this.checkComponentExists(components, component)) { + if (this.checkComponentExists(components, component) && !isReplace) { throw new SoloError('Component exists', undefined, component); } components[componentId] = component; @@ -84,6 +88,18 @@ export class ComponentsDataWrapper implements ComponentsDataWrapperApi { return component; } + public getComponentByType(type: ComponentTypes): T[] { + let components: T[] = []; + + const getComponentsByTypeCallback: (comps: BaseStateSchema[]) => void = (comps): void => { + components = comps as T[]; + }; + + this.applyCallbackToComponentGroup(type, getComponentsByTypeCallback); + + return components; + } + public getComponentsByClusterReference( type: ComponentTypes, clusterReference: ClusterReferenceName, @@ -191,4 +207,84 @@ export class ComponentsDataWrapper implements ComponentsDataWrapperApi { return newComponentId; } + + /** + * Manages port forwarding for a component, checking if it's already enabled and persisting configuration + * @param clusterReference The cluster reference to forward to + * @param podReference The pod reference to forward to + * @param podPort The port on the pod to forward from + * @param localPort The local port to forward to (starting port if not available) + * @param k8Client The Kubernetes client to use for port forwarding + * @param logger Logger for messages + * @param componentType The component type for persistence + * @param label Label for the port forward + * @param reuse Whether to reuse existing port forward if available + * @param nodeId Optional node ID for finding component when cluster reference is not available + * @returns The local port number that was used for port forwarding + */ + public async managePortForward( + clusterReference: ClusterReferenceName, + podReference: PodReference, + podPort: number, + localPort: number, + k8Client: K8, + logger: SoloLogger, + componentType: ComponentTypes, + label: string, + reuse: boolean = false, + nodeId?: number, + ): Promise { + // found component by cluster reference or nodeId + let component: BaseStateSchema; + if (clusterReference) { + const schemeComponents: BaseStateSchema[] = this.getComponentsByClusterReference( + componentType, + clusterReference, + ); + component = schemeComponents[0]; + } else { + component = this.getComponentById(componentType, nodeId); + } + + if (component === undefined) { + // it is possible we are upgrading a chart and previous version has no clusterReference save in configMap + // so we will not be able to find component by clusterReference + reuse = true; + logger.showUser(`Port forward config not found for previous installed ${label}, reusing existing port forward`); + } else if (component.metadata.portForwardConfigs) { + for (const portForwardConfig of component.metadata.portForwardConfigs) { + if (portForwardConfig.podPort === podPort) { + logger.showUser(`${label} Port forward already enabled at ${portForwardConfig.localPort}`); + return portForwardConfig.localPort; + } + } + } + + // Enable port forwarding + const portForwardPortNumber: number = await k8Client + .pods() + .readByReference(podReference) + .portForward(localPort, podPort, true, reuse); + + logger.addMessageGroup(constants.PORT_FORWARDING_MESSAGE_GROUP, 'Port forwarding enabled'); + logger.addMessageGroupMessage( + constants.PORT_FORWARDING_MESSAGE_GROUP, + `${label} port forward enabled on localhost:${portForwardPortNumber}`, + ); + + if (component !== undefined) { + if (component.metadata.portForwardConfigs === undefined) { + component.metadata.portForwardConfigs = []; + } + + logger.info(`add port localPort=${portForwardPortNumber}, podPort=${podPort}`); + // Save port forward config to component + component.metadata.portForwardConfigs.push({ + podPort, + localPort: portForwardPortNumber, + }); + } + + return portForwardPortNumber; + } } diff --git a/src/data/schema/migration/impl/remote/remote-config-schema-definition.ts b/src/data/schema/migration/impl/remote/remote-config-schema-definition.ts index 1a4bd53a0b..695d154bd1 100644 --- a/src/data/schema/migration/impl/remote/remote-config-schema-definition.ts +++ b/src/data/schema/migration/impl/remote/remote-config-schema-definition.ts @@ -10,6 +10,7 @@ import {InjectTokens} from '../../../../../core/dependency-injection/inject-toke import {type ObjectMapper} from '../../../../mapper/api/object-mapper.js'; import {RemoteConfigV1Migration} from './remote-config-v1-migration.js'; import {inject, injectable} from 'tsyringe-neo'; +import {RemoteConfigV2Migration} from './remote-config-v2-migration.js'; @injectable() export class RemoteConfigSchemaDefinition @@ -33,6 +34,6 @@ export class RemoteConfigSchemaDefinition } public get migrations(): SchemaMigration[] { - return [new RemoteConfigV1Migration()]; + return [new RemoteConfigV1Migration(), new RemoteConfigV2Migration()]; } } diff --git a/src/data/schema/migration/impl/remote/remote-config-v2-migration.ts b/src/data/schema/migration/impl/remote/remote-config-v2-migration.ts new file mode 100644 index 0000000000..59e600f72d --- /dev/null +++ b/src/data/schema/migration/impl/remote/remote-config-v2-migration.ts @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: Apache-2.0 + +import {type SchemaMigration} from '../../api/schema-migration.js'; +import {VersionRange} from '../../../../../business/utils/version-range.js'; +import {Version} from '../../../../../business/utils/version.js'; + +import {IllegalArgumentError} from '../../../../../business/errors/illegal-argument-error.js'; +import {InvalidSchemaVersionError} from '../../api/invalid-schema-version-error.js'; + +export class RemoteConfigV2Migration implements SchemaMigration { + public get range(): VersionRange { + return VersionRange.fromIntegerVersion(1); + } + + public get version(): Version { + return new Version(2); + } + + public migrate(source: object): Promise { + if (!source) { + // We should never pass null or undefined to this method, if this happens we should throw an error + throw new IllegalArgumentError('source must not be null or undefined'); + } + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const clone: any = structuredClone(source); + + if (clone.schemaVersion && clone.schemaVersion !== 1) { + throw new InvalidSchemaVersionError(clone.schemaVersion, 1); + } + + // Update metadata with lastUpdated information + if (!clone.metadata) { + clone.metadata = {}; + } + + clone.metadata = { + ...clone.metadata, + lastUpdatedAt: new Date(), + lastUpdatedBy: { + name: 'system', + hostname: 'migration', + }, + }; + + // Add portForwardConfigs to each component state metadata if it doesn't exist + const initializePortForwardConfigs = (componentArray: any[]) => { + if (!componentArray) { + return; + } + + for (const component of componentArray) { + if (component.metadata && !component.metadata.portForwardConfigs) { + component.metadata.portForwardConfigs = []; + } + } + }; + + // Initialize portForwardConfigs for all component types + if (clone.state) { + initializePortForwardConfigs(clone.state.consensusNodes); + initializePortForwardConfigs(clone.state.blockNodes); + initializePortForwardConfigs(clone.state.mirrorNodes); + initializePortForwardConfigs(clone.state.relayNodes); + initializePortForwardConfigs(clone.state.haProxies); + initializePortForwardConfigs(clone.state.envoyProxies); + initializePortForwardConfigs(clone.state.explorers); + } + + // Set the schema version to the new version + clone.schemaVersion = this.version.value; + + return Promise.resolve(clone); + } +} diff --git a/src/data/schema/model/remote/state/component-state-metadata-schema.ts b/src/data/schema/model/remote/state/component-state-metadata-schema.ts index a97cdfd944..f7873d8757 100644 --- a/src/data/schema/model/remote/state/component-state-metadata-schema.ts +++ b/src/data/schema/model/remote/state/component-state-metadata-schema.ts @@ -3,7 +3,12 @@ import {Exclude, Expose, Transform} from 'class-transformer'; import {Transformations} from '../../utils/transformations.js'; import {type DeploymentPhase} from '../deployment-phase.js'; -import {type ClusterReferenceName, type ComponentId, type NamespaceNameAsString} from '../../../../../types/index.js'; +import { + type ClusterReferenceName, + type ComponentId, + type NamespaceNameAsString, + portForwardConfig, +} from '../../../../../types/index.js'; @Exclude() export class ComponentStateMetadataSchema { @@ -20,15 +25,20 @@ export class ComponentStateMetadataSchema { @Transform(Transformations.DeploymentPhase) public phase: DeploymentPhase; + @Expose() + public portForwardConfigs: portForwardConfig[]; + public constructor( id?: ComponentId, namespace?: NamespaceNameAsString, cluster?: ClusterReferenceName, phase?: DeploymentPhase, + portForwardConfigs?: portForwardConfig[], ) { this.id = id; this.namespace = namespace; this.cluster = cluster; this.phase = phase; + this.portForwardConfigs = portForwardConfigs; } } diff --git a/src/integration/kube/k8-client/resources/pod/k8-client-pod.ts b/src/integration/kube/k8-client/resources/pod/k8-client-pod.ts index 09140470f5..4277337232 100644 --- a/src/integration/kube/k8-client/resources/pod/k8-client-pod.ts +++ b/src/integration/kube/k8-client/resources/pod/k8-client-pod.ts @@ -2,6 +2,7 @@ import {type Pod} from '../../../resources/pod/pod.js'; import {type ExtendedNetServer} from '../../../../../types/index.js'; +import {PortUtilities} from '../../../../../business/utils/port-utilities.js'; import {PodReference} from '../../../resources/pod/pod-reference.js'; import {SoloError} from '../../../../../core/errors/solo-error.js'; import {sleep} from '../../../../../core/helpers.js'; @@ -30,6 +31,7 @@ import {PodName} from '../../../resources/pod/pod-name.js'; import {K8ClientPodCondition} from './k8-client-pod-condition.js'; import {type PodCondition} from '../../../resources/pod/pod-condition.js'; import {ShellRunner} from '../../../../../core/shell-runner.js'; +import chalk from 'chalk'; import http from 'node:http'; export class K8ClientPod implements Pod { @@ -90,12 +92,31 @@ export class K8ClientPod implements Pod { } } - public async portForward(localPort: number, podPort: number, detach: boolean = false): Promise { - try { - this.logger.debug( - `Creating port-forwarder for ${this.podReference.name}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`, - ); + /** + * Forward a local port to a port on the pod + * @param localPort The local port to forward from + * @param podPort The pod port to forward to + * @param detach Whether to run the port forwarding in detached mode + * @param reuse - if true, reuse the port number from previous port forward operation + * @returns Promise resolving to the port forwarder server when not detached, + * or the port number (which may differ from localPort if it was in use) when detached + */ + public async portForward(localPort: number, podPort: number, detach: true, reuse?: boolean): Promise; + public async portForward( + localPort: number, + podPort: number, + detach?: false, + reuse?: boolean, + ): Promise; + public async portForward( + localPort: number, + podPort: number, + detach: boolean = false, + reuse: boolean = false, + ): Promise { + let availablePort: number = localPort; + try { // first use http.request(url[, options][, callback]) GET method against localhost:localPort to kill any pre-existing // port-forward that is no longer active. It doesn't matter what the response is. const url: string = `http://${constants.LOCAL_HOST}:${localPort}`; @@ -126,6 +147,72 @@ export class K8ClientPod implements Pod { }); this.logger.debug(`Returned from http request against http://${constants.LOCAL_HOST}:${localPort}`); + if (reuse) { + // use `ps -ef | grep "kubectl port-forward"`|grep ${this.podReference.name} + // to find previous port-forward port number + const shellCommand: string[] = [ + 'ps', + '-ef', + '|', + 'grep', + 'port-forward', + '|', + 'grep', + `${this.podReference.name}`, + ]; + const shellRunner: ShellRunner = new ShellRunner(); + let result: string[]; + try { + result = await shellRunner.run(shellCommand.join(' '), [], true, false); + } catch (error) { + this.logger.error(`Failed to execute shell command: ${shellCommand.join(' ')}`); + this.logger.error(`Error: ${error instanceof Error ? error.message : String(error)}`); + throw new SoloError( + `Shell command execution failed: ${shellCommand.join(' ')}. Error: ${error instanceof Error ? error.message : String(error)}`, + ); + } + this.logger.debug(`ps -ef port-forward command result is ${result}`); + + // if length of result is 1 then could not find previous port forward running, then we can use next available port + if (!result || result.length === 0) { + this.logger.warn(`Shell command returned no output: ${shellCommand.join(' ')}`); + } + if (result.length > 1) { + // extract local port number from command output + const splitArray: string[] = result[0].split(/\s+/).filter(Boolean); + + // The port number should be the last element in the command + // It might be in the format localPort:podPort + const lastElement: string = splitArray.at(-1); + if (lastElement === undefined) { + throw new SoloError(`Failed to extract port: lastElement is undefined in command output: ${result[0]}`); + } + const extractedString: string = lastElement.split(':')[0]; + this.logger.debug(`extractedString = ${extractedString}`); + const parsedPort: number = Number.parseInt(extractedString, 10); + if (Number.isNaN(parsedPort) || parsedPort <= 0 || parsedPort > 65_535) { + throw new SoloError(`Invalid port extracted: ${extractedString}.`); + } else { + availablePort = parsedPort; + this.logger.info(`Reuse already enabled port ${availablePort}`); + } + // port forward already enabled + return availablePort; + } + } + + // Find an available port starting from localPort with a 30-second timeout + availablePort = await PortUtilities.findAvailablePort(localPort, Duration.ofSeconds(30).toMillis(), this.logger); + + if (availablePort === localPort) { + this.logger.showUser(chalk.yellow(`Using requested port ${localPort}`)); + } else { + this.logger.showUser(chalk.yellow(`Using available port ${availablePort}`)); + } + this.logger.debug( + `Creating port-forwarder for ${this.podReference.name}:${podPort} -> ${constants.LOCAL_HOST}:${availablePort}`, + ); + // if detach is true, start a port-forwarder in detached mode if (detach) { this.logger.warn( @@ -133,12 +220,12 @@ export class K8ClientPod implements Pod { 'is connected to terminates.', ); await new ShellRunner().run( - `kubectl port-forward -n ${this.podReference.namespace.name} pods/${this.podReference.name} ${localPort}:${podPort}`, + `kubectl port-forward -n ${this.podReference.namespace.name} pods/${this.podReference.name} ${availablePort}:${podPort}`, [], false, true, ); - return undefined; // detached mode does not return a server instance + return availablePort; } const ns: NamespaceName = this.podReference.namespace; @@ -152,12 +239,12 @@ export class K8ClientPod implements Pod { })) as ExtendedNetServer; // add info for logging - server.info = `${this.podReference.name}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`; - server.localPort = localPort; + server.info = `${this.podReference.name}:${podPort} -> ${constants.LOCAL_HOST}:${availablePort}`; + server.localPort = availablePort; this.logger.debug(`Starting port-forwarder [${server.info}]`); - return server.listen(localPort, constants.LOCAL_HOST); + return server.listen(availablePort, constants.LOCAL_HOST); } catch (error) { - const message: string = `failed to start port-forwarder [${this.podReference.name}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}]: ${error.message}`; + const message: string = `failed to start port-forwarder [${this.podReference.name}:${podPort} -> ${constants.LOCAL_HOST}:${availablePort}]: ${error.message}`; throw new SoloError(message, error); } } diff --git a/src/integration/kube/resources/pod/pod.ts b/src/integration/kube/resources/pod/pod.ts index 952d55aebb..61fa47d53b 100644 --- a/src/integration/kube/resources/pod/pod.ts +++ b/src/integration/kube/resources/pod/pod.ts @@ -64,10 +64,13 @@ export interface Pod { * -> localhost:localPort -> port-forward-tunnel -> kubernetes-pod:targetPort * @param localPort - the local port to forward to * @param podPort - the port on the pod to forward from - * @param detach - if true, the port forward will run in the background and return immediately - * @returns an instance of ExtendedNetServer + * @param detach - if true, the port forward will run in the background and return the port number + * @param reuse - if true, reuse the port number from previous port forward operation + * @returns Promise resolving to the port forwarder server when not detached, + * or the port number (which may differ from localPort if it was in use) when detached */ - portForward(localPort: number, podPort: number, detach?: boolean): Promise; + portForward(localPort: number, podPort: number, detach: true, reuse?: boolean): Promise; + portForward(localPort: number, podPort: number, detach?: false, reuse?: boolean): Promise; /** * Stop the port forward diff --git a/src/types/index.ts b/src/types/index.ts index 8f4e095802..611e6aa873 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -124,6 +124,11 @@ export interface GossipEndpoint { port: number; } +export interface portForwardConfig { + localPort: number; + podPort: number; +} + export interface CommandDefinition { command: string; desc: string; diff --git a/test/unit/core/network/port-utilities.test.ts b/test/unit/core/network/port-utilities.test.ts new file mode 100644 index 0000000000..3e71366376 --- /dev/null +++ b/test/unit/core/network/port-utilities.test.ts @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: Apache-2.0 + +import {expect} from 'chai'; +import {describe, it} from 'mocha'; +import net from 'node:net'; +import {PortUtilities} from '../../../../src/business/utils/port-utilities.js'; + +// Mock logger for testing +const mockLogger = { + debug: (): void => {}, + error: (): void => {}, + info: (): void => {}, + warn: (): void => {}, + showUser: (): void => {}, + showUserError: (): void => {}, + setDevMode: (): void => {}, + nextTraceId: (): void => {}, + prepMeta: (meta?: object): object => meta || {}, + showList: (): void => {}, + showJSON: (): void => {}, + addMessageGroup: (): void => {}, + addMessageGroupMessage: (): void => {}, + showMessageGroup: (): void => {}, + getMessageGroupKeys: (): string[] => [], + showAllMessageGroups: (): void => {}, +}; + +describe('Port Utils', (): void => { + describe('findAvailablePort', (): void => { + it('should find the next available port when the initial port is in use', async (): Promise => { + // Create a server to occupy a port + const server = net.createServer(); + const basePort = 8000; // Use a port that's likely to be available for testing + + // Start the server on the base port + await new Promise(resolve => { + server.listen(basePort, '127.0.0.1', () => { + resolve(); + }); + }); + + try { + // Verify the port is actually in use + const isBasePortAvailable = await PortUtilities.isPortAvailable(basePort); + expect(isBasePortAvailable).to.be.false; + + // Call findAvailablePort with the occupied port + const availablePort = await PortUtilities.findAvailablePort(basePort, 5000, mockLogger); + + // Verify that the returned port is the next port (basePort + 1) + expect(availablePort).to.equal(basePort + 1); + + // Verify that the returned port is actually available + const isNextPortAvailable = await PortUtilities.isPortAvailable(basePort + 1); + expect(isNextPortAvailable).to.be.true; + } finally { + // Clean up: close the server + await new Promise(resolve => { + server.close(() => { + resolve(); + }); + }); + } + }); + + it('should return the initial port if it is available', async (): Promise => { + const basePort = 9000; // Use a different port for this test + + // Verify the port is available first + const isBasePortAvailable = await PortUtilities.isPortAvailable(basePort); + expect(isBasePortAvailable).to.be.true; + + // Call findAvailablePort with an available port + const availablePort = await PortUtilities.findAvailablePort(basePort, 5000, mockLogger); + + // Verify that the returned port is the same as the input port + expect(availablePort).to.equal(basePort); + }); + }); +}); diff --git a/test/unit/data/schema/migration/impl/remote/remote-config-schema.test.ts b/test/unit/data/schema/migration/impl/remote/remote-config-schema.test.ts index 52eca852e6..4dedd8d22c 100644 --- a/test/unit/data/schema/migration/impl/remote/remote-config-schema.test.ts +++ b/test/unit/data/schema/migration/impl/remote/remote-config-schema.test.ts @@ -38,7 +38,7 @@ describe('RemoteConfigSchema', () => { const schema: RemoteConfigSchemaDefinition = new RemoteConfigSchemaDefinition(objectMapper); const migrations: SchemaMigration[] = schema.migrations; expect(Array.isArray(migrations)).equal(true); - expect(migrations.length).equal(1); + expect(migrations.length).equal(2); expect(migrations[0]).instanceOf(RemoteConfigV1Migration); }); });