Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ require (
github.com/Masterminds/sprig/v3 v3.3.0
github.com/compose-spec/compose-go/v2 v2.10.2
github.com/go-viper/mapstructure/v2 v2.5.0
github.com/score-spec/score-go v1.13.0
github.com/score-spec/score-go v1.13.1-0.20260408135027-12a305ce4a35
github.com/spf13/cobra v1.10.2
github.com/spf13/pflag v1.0.10
github.com/stretchr/testify v1.11.1
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,8 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6Ng
github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=
github.com/score-spec/score-go v1.13.0 h1:m4Df2U775DaEIdmfeYXULK9iS6Cb8eSXka9QD9udJcA=
github.com/score-spec/score-go v1.13.0/go.mod h1:3abE79XO6CEiAzZNzfnHQIm18TWrefL0ihmObCvRWO8=
github.com/score-spec/score-go v1.13.1-0.20260408135027-12a305ce4a35 h1:a7btdDwz4Aiy0TnUhKt+zpM7Ghro0u55ypeafc19i98=
github.com/score-spec/score-go v1.13.1-0.20260408135027-12a305ce4a35/go.mod h1:3abE79XO6CEiAzZNzfnHQIm18TWrefL0ihmObCvRWO8=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
Expand Down
71 changes: 71 additions & 0 deletions internal/command/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,11 @@ arguments.
return fmt.Errorf("failed to convert '%s' to structure: %w", workloadName, err)
}

// Validate container before relationships (cycles, unknown refs, self-refs)
if err := validateContainerBefore(&out); err != nil {
return fmt.Errorf("validation errors in workload '%s': %w", workloadName, err)
}

// Gather container build contexts, these will be stored and added to the generated compose output later
containerBuildContexts := make(map[string]types.BuildConfig)
if v, _ := cmd.Flags().GetStringArray(generateCmdBuildFlag); len(v) > 0 {
Expand Down Expand Up @@ -537,3 +542,69 @@ func injectWaitService(p *types.Project) (string, bool) {
p.Services[newService.Name] = newService
return newService.Name, true
}

// validateContainerBefore validates before relationships in a workload:
// - All container names referenced in before entries must exist
// - A container may not reference itself in a before entry
// - The before relationships must not contain cycles
func validateContainerBefore(workload *score.Workload) error {
containerNames := make(map[string]struct{}, len(workload.Containers))
for name := range workload.Containers {
containerNames[name] = struct{}{}
}

errMsgs := []string{}

// waitingFor maps a container name to the containers it references in its before field.
waitingFor := make(map[string][]string)
for containerName, container := range workload.Containers {
for _, beforeElem := range container.Before {
for _, dep := range beforeElem.Containers {
if dep == containerName {
errMsgs = append(errMsgs, fmt.Sprintf("container %q has a self-referencing before entry", containerName))
continue
}
if _, exists := containerNames[dep]; !exists {
errMsgs = append(errMsgs, fmt.Sprintf("container %q before refers to unknown container %q", containerName, dep))
continue
}
waitingFor[containerName] = append(waitingFor[containerName], dep)
}
}
}

// DFS cycle detection using white/gray/black coloring.
const (
white = 0
gray = 1
black = 2
)
color := make(map[string]int)
var dfs func(node string) bool
dfs = func(node string) bool {
color[node] = gray
for _, dep := range waitingFor[node] {
if color[dep] == gray {
return true
}
if color[dep] == white {
if dfs(dep) {
return true
}
}
}
color[node] = black
return false
}
for name := range workload.Containers {
if color[name] == white && dfs(name) {
errMsgs = append(errMsgs, "containers before relationships contain a cycle")
break
}
}

if len(errMsgs) > 0 {
return fmt.Errorf("%s", strings.Join(errMsgs, "; "))
}
return nil
}
71 changes: 71 additions & 0 deletions internal/command/generate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -453,6 +453,77 @@ resources:
})
}

func TestInitAndGenerate_with_before_ordering(t *testing.T) {
td := changeToTempDir(t)
stdout, _, err := executeAndResetCommand(context.Background(), rootCmd, []string{"init"})
assert.NoError(t, err)
assert.Equal(t, "", stdout)

assert.NoError(t, os.WriteFile("score.yaml", []byte(`
apiVersion: score.dev/v1b1
metadata:
name: example
containers:
init:
image: busybox
before:
- ready: complete
containers:
- main
main:
image: nginx
`), 0644))
// generate
stdout, _, err = executeAndResetCommand(context.Background(), rootCmd, []string{"generate", "score.yaml"})
assert.NoError(t, err)
assert.Equal(t, "", stdout)
raw, err := os.ReadFile(filepath.Join(td, "compose.yaml"))
assert.NoError(t, err)
assert.Contains(t, string(raw), "depends_on")
assert.Contains(t, string(raw), "service_completed_successfully")

t.Run("validate compose spec", func(t *testing.T) {
if os.Getenv("NO_DOCKER") != "" {
t.Skip("NO_DOCKER is set")
return
}
dockerCmd, err := exec.LookPath("docker")
require.NoError(t, err)
cmd := exec.Command(dockerCmd, "compose", "-f", "compose.yaml", "config", "--quiet", "--dry-run")
cmd.Dir = td
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
assert.NoError(t, cmd.Run())
})
}

func TestInitAndGenerate_with_before_cycle(t *testing.T) {
_ = changeToTempDir(t)
stdout, _, err := executeAndResetCommand(context.Background(), rootCmd, []string{"init"})
assert.NoError(t, err)
assert.Equal(t, "", stdout)

assert.NoError(t, os.WriteFile("score.yaml", []byte(`
apiVersion: score.dev/v1b1
metadata:
name: example
containers:
app:
image: busybox
before:
- containers:
- backend
backend:
image: busybox
before:
- containers:
- app
`), 0644))
_, _, err = executeAndResetCommand(context.Background(), rootCmd, []string{"generate", "score.yaml"})
assert.Error(t, err)
assert.Contains(t, err.Error(), "cycle")
}

func TestGeneratePostgresResource(t *testing.T) {
td := changeToTempDir(t)
stdout, _, err := executeAndResetCommand(context.Background(), rootCmd, []string{"init"})
Expand Down
93 changes: 86 additions & 7 deletions internal/compose/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,21 +66,44 @@ func ConvertSpec(state *project.State, spec *score.Workload) (*compose.Project,

// When multiple containers are specified we need to identify one container as the "main" container which will own
// the network and use the native workload name. All other containers in this workload will have the container
// name appended as a suffix. We use the natural sort order of the container names and pick the first one
// name appended as a suffix. We use the natural sort order of the container names and pick the first one that
// is not expected to exit (i.e. does not have all before entries with ready: complete).
containerNames := make([]string, 0, len(spec.Containers))
for name := range spec.Containers {
containerNames = append(containerNames, name)
}
sort.Strings(containerNames)

// Determine which container should own the network. Skip containers that are expected to exit
// (all their before entries specify ready: complete) since their network namespace goes away.
primaryContainer := containerNames[0] // default to first alphabetically
for _, name := range containerNames {
cSpec := spec.Containers[name]
if len(cSpec.Before) == 0 {
// Container has no before entries, it stays running — good candidate
primaryContainer = name
break
}
allComplete := true
for _, entry := range cSpec.Before {
if entry.Ready == nil || *entry.Ready != score.ContainerBeforeElemReadyComplete {
allComplete = false
break
}
}
if !allComplete {
primaryContainer = name
break
}
}

variablesSubstitutor := framework.Substituter{
Replacer: deferredSubstitutionFunction,
UnEscaper: func(s string) (string, error) {
return s, nil
},
}

var firstService string
for _, containerName := range containerNames {
cSpec := spec.Containers[containerName]

Expand Down Expand Up @@ -171,22 +194,78 @@ func ConvertSpec(state *project.State, spec *score.Workload) (*compose.Project,
svc.Image = ""
}

// if we are not the "first" service, then inherit the network from the first service
if firstService == "" {
firstService = svc.Name
// if we are not the primary container, then inherit the network from the primary service.
// However, skip network_mode for containers that are expected to exit (all their before
// entries have ready: complete) to avoid circular dependencies.
if containerName == primaryContainer {
// We name the containers as (workload name)-(container name) but we want the name for the main network
// interface for be (workload name). So we set the hostname itself. This means that workloads cannot have
// the same name within the project. But that's already enforced elsewhere.
svc.Hostname = workloadName
} else {
} else if !isInitContainer(spec.Containers[containerName]) {
svc.Ports = nil
svc.NetworkMode = "service:" + firstService
svc.NetworkMode = "service:" + workloadName + "-" + primaryContainer
}
composeProject.Services[svc.Name] = svc
}

// Invert before -> depends_on: if container A declares before [B], then service B depends_on A.
for _, containerName := range containerNames {
cSpec := spec.Containers[containerName]
for _, beforeElem := range cSpec.Before {
// Determine the compose condition from the ready field
condition := "service_started" // default when ready is not specified
if beforeElem.Ready != nil {
switch *beforeElem.Ready {
case score.ContainerBeforeElemReadyComplete:
condition = "service_completed_successfully"
case score.ContainerBeforeElemReadyHealthy:
condition = "service_healthy"
case score.ContainerBeforeElemReadyStarted:
condition = "service_started"
}
}

sourceServiceName := workloadName + "-" + containerName

for _, targetContainerName := range beforeElem.Containers {
targetServiceName := workloadName + "-" + targetContainerName

if condition == "service_healthy" && cSpec.ReadinessProbe == nil && cSpec.LivenessProbe == nil {
return nil, fmt.Errorf("containers.%s.before: ready 'healthy' requires a readiness or liveness probe to be defined", containerName)
}

// Add depends_on to the target service
svc := composeProject.Services[targetServiceName]
if svc.DependsOn == nil {
svc.DependsOn = make(compose.DependsOnConfig)
}
svc.DependsOn[sourceServiceName] = compose.ServiceDependency{
Condition: condition,
Required: true,
}
composeProject.Services[targetServiceName] = svc
}
}
}

return &composeProject, nil
}

// isInitContainer returns true if the container is expected to exit. This is determined
// by checking if all its before entries specify ready: complete.
func isInitContainer(c score.Container) bool {
if len(c.Before) == 0 {
return false
}
for _, entry := range c.Before {
if entry.Ready == nil || *entry.Ready != score.ContainerBeforeElemReadyComplete {
return false
}
}
return true
}

// buildWorkloadAnnotations returns an annotation set for the workload service.
func buildWorkloadAnnotations(name string, spec *score.Workload) map[string]string {
var out map[string]string
Expand Down
Loading