Unverified Commit 2cf2f6eb authored by Steve Azzopardi's avatar Steve Azzopardi
Browse files

Merge remote-tracking branch 'security/master' into master

parents 2d8a8da7 08d9001a
......@@ -78,6 +78,7 @@ const (
BuildStageArchiveCache BuildStage = "archive_cache"
BuildStageUploadOnSuccessArtifacts BuildStage = "upload_artifacts_on_success"
BuildStageUploadOnFailureArtifacts BuildStage = "upload_artifacts_on_failure"
BuildStageCleanupFileVariables BuildStage = "cleanup_file_variables"
)
// staticBuildStages is a list of BuildStages which are executed on every build
......@@ -91,6 +92,7 @@ var staticBuildStages = []BuildStage{
BuildStageArchiveCache,
BuildStageUploadOnSuccessArtifacts,
BuildStageUploadOnFailureArtifacts,
BuildStageCleanupFileVariables,
}
const (
......@@ -382,6 +384,7 @@ func getPredefinedEnv(buildStage BuildStage) bool {
BuildStageArchiveCache: true,
BuildStageUploadOnFailureArtifacts: true,
BuildStageUploadOnSuccessArtifacts: true,
BuildStageCleanupFileVariables: true,
}
predefined, ok := env[buildStage]
......@@ -402,6 +405,7 @@ func GetStageDescription(stage BuildStage) string {
BuildStageArchiveCache: "Saving cache",
BuildStageUploadOnFailureArtifacts: "Uploading artifacts for failed job",
BuildStageUploadOnSuccessArtifacts: "Uploading artifacts for successful job",
BuildStageCleanupFileVariables: "Cleaning up file based variables",
}
description, ok := descriptions[stage]
......@@ -470,6 +474,8 @@ func (b *Build) executeScript(ctx context.Context, executor Executor) error {
endTime := time.Now()
b.executeUploadReferees(ctx, startTime, endTime)
b.removeFileBasedVariables(ctx, executor)
// Use job's error as most important
if err != nil {
return err
......@@ -499,6 +505,13 @@ func (b *Build) createReferees(executor Executor) {
b.Referees = referees.CreateReferees(executor, b.Runner.Referees, b.Log())
}
func (b *Build) removeFileBasedVariables(ctx context.Context, executor Executor) {
err := b.executeStage(ctx, BuildStageCleanupFileVariables, executor)
if err != nil {
b.Log().WithError(err).Warning("Error while executing file based variables removal script")
}
}
func (b *Build) executeUploadReferees(ctx context.Context, startTime, endTime time.Time) {
if b.Referees == nil || b.ArtifactUploader == nil {
b.Log().Debug("Skipping referees execution")
......
......@@ -362,6 +362,7 @@ func TestJobFailure(t *testing.T) {
executor.On("Shell").Return(&ShellScriptInfo{Shell: "script-shell"})
executor.On("Run", matchBuildStage(BuildStagePrepare)).Return(nil).Once()
executor.On("Run", mock.Anything).Return(thrownErr).Times(2)
executor.On("Run", matchBuildStage(BuildStageCleanupFileVariables)).Return(nil).Once()
executor.On("Finish", thrownErr).Once()
RegisterExecutorProvider("build-run-job-failure", provider)
......@@ -446,6 +447,7 @@ func TestRunFailureRunsAfterScriptAndArtifactsOnFailure(t *testing.T) {
executor.On("Run", matchBuildStage("step_script")).Return(errors.New("build fail")).Once()
executor.On("Run", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageCleanupFileVariables)).Return(nil).Once()
executor.On("Finish", errors.New("build fail")).Once()
RegisterExecutorProvider("build-run-run-failure", provider)
......@@ -477,6 +479,7 @@ func TestGetSourcesRunFailure(t *testing.T) {
executor.On("Run", matchBuildStage(BuildStagePrepare)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageGetSources)).Return(errors.New("build fail")).Times(3)
executor.On("Run", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageCleanupFileVariables)).Return(nil).Once()
executor.On("Finish", errors.New("build fail")).Once()
build := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))
......@@ -500,6 +503,7 @@ func TestArtifactDownloadRunFailure(t *testing.T) {
executor.On("Run", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageDownloadArtifacts)).Return(errors.New("build fail")).Times(3)
executor.On("Run", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageCleanupFileVariables)).Return(nil).Once()
executor.On("Finish", errors.New("build fail")).Once()
build := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))
......@@ -517,7 +521,7 @@ func TestArtifactUploadRunFailure(t *testing.T) {
executor.On("Cleanup").Once()
// Successful build script
executor.On("Shell").Return(&ShellScriptInfo{Shell: "script-shell"}).Times(8)
executor.On("Shell").Return(&ShellScriptInfo{Shell: "script-shell"}).Times(9)
executor.On("Run", matchBuildStage(BuildStagePrepare)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageGetSources)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()
......@@ -526,6 +530,7 @@ func TestArtifactUploadRunFailure(t *testing.T) {
executor.On("Run", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageArchiveCache)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageUploadOnSuccessArtifacts)).Return(errors.New("upload fail")).Once()
executor.On("Run", matchBuildStage(BuildStageCleanupFileVariables)).Return(nil).Once()
executor.On("Finish", errors.New("upload fail")).Once()
build := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))
......@@ -555,6 +560,7 @@ func TestRestoreCacheRunFailure(t *testing.T) {
executor.On("Run", matchBuildStage(BuildStageGetSources)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageRestoreCache)).Return(errors.New("build fail")).Times(3)
executor.On("Run", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()
executor.On("Run", matchBuildStage(BuildStageCleanupFileVariables)).Return(nil).Once()
executor.On("Finish", errors.New("build fail")).Once()
build := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))
......@@ -1576,6 +1582,9 @@ func setupSuccessfulMockExecutor(
executor.On("Run", matchBuildStage(BuildStageUploadOnSuccessArtifacts)).
Return(nil).
Once()
executor.On("Run", matchBuildStage(BuildStageCleanupFileVariables)).
Return(nil).
Once()
return provider, assertFn
}
......
......@@ -251,6 +251,8 @@ type KubernetesConfig struct {
PodSecurityContext KubernetesPodSecurityContext `toml:"pod_security_context,omitempty" namespace:"pod-security-context" description:"A security context attached to each build pod"`
Volumes KubernetesVolumes `toml:"volumes"`
Services []Service `toml:"services,omitempty" json:"services" description:"Add service that is started with container"`
CapAdd []string `toml:"cap_add" json:"cap_add" long:"cap-add" env:"KUBERNETES_CAP_ADD" description:"Add Linux capabilities"`
CapDrop []string `toml:"cap_drop" json:"cap_drop" long:"cap-drop" env:"KUBERNETES_CAP_DROP" description:"Drop Linux capabilities"`
}
type KubernetesVolumes struct {
......
......@@ -122,6 +122,8 @@ The following keywords help to define the behavior of the Runner within Kubernet
pattern](https://docs.microsoft.com/en-us/azure/architecture/patterns/sidecar).
Read more about [using services](#using-services).
- `affinity`: Specify affinity rules that determine which node runs the build. Read more about [using affinity](#using-affinity).
- `cap_add`: Specify Linux capabilities that should be added to the job pod containers. [Read more about capabilities configuration in Kubernetes executor](#capabilities-configuration).
- `cap_drop`: Specify Linux capabilities that should be dropped from the job pod containers. [Read more about capabilities configuration in Kubernetes executor](#capabilities-configuration).
### Configuring executor Service Account
......@@ -504,6 +506,71 @@ concurrent = 1
]
```
## Capabilities configuration
[Kubernetes allows](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container)
to configure different [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) that should be
added or dropped from a container.
GitLab Runner supports configuration of capabilities with the `cap_add` and `cap_drop` keywords in the `[runners.kubernetes]`
section of the configuration file. By default, GitLab Runner provides
a [list of capabilities](#default-list-of-dropped-capabilities) that should be dropped.
Because the default list of dropped capabilities can intersect with user-defined capabilities, the following rules are
applied to determine the final list:
1. User-defined `cap_drop` has priority over user-defined `cap_add`. If
you define the same capability in both settings, only the one from `cap_drop` is passed
to the container.
1. User-defined `cap_add` has priority over the default list of dropped capabilities.
If you want to add the capability that is dropped by default, explicitly add it to
`cap_add`.
The final list of capabilities is added to all containers in the job's pod.
A few notes:
- Remove the `CAP_` prefix from capability identifiers passed to the container configuration.
For example, if you want to add or drop the `CAP_SYS_TIME` capability,
in the configuration file, set a `SYS_TIME` string for `cap_add` or `cap_drop`.
- The owner of the Kubernetes cluster
[can define a PodSecurityPolicy](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#capabilities),
where specific capabilities are allowed, restricted, or added by default.
These rules take precedence over any user-defined configuration.
Container runtimes can also define a default list of capabilities that you can add,
like those seen in
[Docker](https://github.com/moby/moby/blob/19.03/oci/defaults.go#L14-L32)
or [containerd](https://github.com/containerd/containerd/blob/v1.4.0/oci/spec.go#L93-L110).
### Default list of dropped capabilities
GitLab Runner tries to drop these capabilities by default. If any of them are required for the job
to be executed properly, you should explicitly add the capability with the `cap_add` setting:
<!-- kubernetes_default_cap_drop_list_start -->
- `NET_RAW`
<!-- kubernetes_default_cap_drop_list_end -->
### Example configuration
```toml
concurrent = 1
check_interval = 30
[[runners]]
name = "myRunner"
url = "gitlab.example.com"
executor = "kubernetes"
[runners.kubernetes]
# ...
cap_add = ["SYS_TIME", "IPC_LOCK"]
cap_drop = ["SYS_ADMIN"]
# ...
```
## Using Docker in your builds
There are a couple of caveats when using Docker in your builds while running on
......
......@@ -188,12 +188,16 @@ func (e *executor) getDockerImage(imageName string) (image *types.ImageInspect,
}
}
registryInfo := auth.ResolveConfigForImage(
registryInfo, err := auth.ResolveConfigForImage(
imageName,
e.Build.GetDockerAuthConfig(),
e.Shell().User,
e.Build.Credentials,
)
if err != nil {
return nil, err
}
if registryInfo != nil {
e.Println(fmt.Sprintf("Authenticating with credentials from %v", registryInfo.Source))
e.Debugln(fmt.Sprintf(
......
......@@ -60,6 +60,16 @@ var (
detectShellScript = shells.BashDetectShellScript
)
// GetDefaultCapDrop returns the default capabilities that should be dropped
// from a build container.
func GetDefaultCapDrop() []string {
return []string{
// Reasons for disabling NET_RAW by default were
// discussed in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26833
"NET_RAW",
}
}
type commandTerminatedError struct {
exitCode int
}
......@@ -503,6 +513,11 @@ func (s *executor) buildContainer(
VolumeMounts: s.getVolumeMounts(),
SecurityContext: &api.SecurityContext{
Privileged: &privileged,
Capabilities: getCapabilities(
GetDefaultCapDrop(),
s.Config.Kubernetes.CapAdd,
s.Config.Kubernetes.CapDrop,
),
},
Stdin: true,
}
......@@ -769,7 +784,10 @@ func (s *executor) getVolumesForEmptyDirs() []api.Volume {
func (s *executor) setupCredentials() error {
s.Debugln("Setting up secrets")
authConfigs := auth.ResolveConfigs(s.Build.GetDockerAuthConfig(), s.Shell().User, s.Build.Credentials)
authConfigs, err := auth.ResolveConfigs(s.Build.GetDockerAuthConfig(), s.Shell().User, s.Build.Credentials)
if err != nil {
return err
}
if len(authConfigs) == 0 {
return nil
......
......@@ -2742,6 +2742,111 @@ func TestSetupBuildPod(t *testing.T) {
require.Equal(t, def.InitContainers, pod.Spec.InitContainers)
},
},
"support setting linux capabilities": {
RunnerConfig: common.RunnerConfig{
RunnerSettings: common.RunnerSettings{
Kubernetes: &common.KubernetesConfig{
Namespace: "default",
CapAdd: []string{"CAP_1", "CAP_2"},
CapDrop: []string{"CAP_3", "CAP_4"},
},
},
},
VerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {
require.NotEmpty(t, pod.Spec.Containers)
capabilities := pod.Spec.Containers[0].SecurityContext.Capabilities
require.NotNil(t, capabilities)
assert.Len(t, capabilities.Add, 2)
assert.Contains(t, capabilities.Add, api.Capability("CAP_1"))
assert.Contains(t, capabilities.Add, api.Capability("CAP_2"))
assert.Len(t, capabilities.Drop, 3)
assert.Contains(t, capabilities.Drop, api.Capability("CAP_3"))
assert.Contains(t, capabilities.Drop, api.Capability("CAP_4"))
assert.Contains(t, capabilities.Drop, api.Capability("NET_RAW"))
},
},
"setting linux capabilities overriding defaults": {
RunnerConfig: common.RunnerConfig{
RunnerSettings: common.RunnerSettings{
Kubernetes: &common.KubernetesConfig{
Namespace: "default",
CapAdd: []string{"NET_RAW", "CAP_2"},
},
},
},
VerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {
require.NotEmpty(t, pod.Spec.Containers)
capabilities := pod.Spec.Containers[0].SecurityContext.Capabilities
require.NotNil(t, capabilities)
assert.Len(t, capabilities.Add, 2)
assert.Contains(t, capabilities.Add, api.Capability("NET_RAW"))
assert.Contains(t, capabilities.Add, api.Capability("CAP_2"))
assert.Empty(t, capabilities.Drop)
},
},
"setting same linux capabilities, drop wins": {
RunnerConfig: common.RunnerConfig{
RunnerSettings: common.RunnerSettings{
Kubernetes: &common.KubernetesConfig{
Namespace: "default",
CapAdd: []string{"CAP_1"},
CapDrop: []string{"CAP_1"},
},
},
},
VerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {
require.NotEmpty(t, pod.Spec.Containers)
capabilities := pod.Spec.Containers[0].SecurityContext.Capabilities
require.NotNil(t, capabilities)
assert.Empty(t, capabilities.Add)
assert.Len(t, capabilities.Drop, 2)
assert.Contains(t, capabilities.Drop, api.Capability("NET_RAW"))
assert.Contains(t, capabilities.Drop, api.Capability("CAP_1"))
},
},
"support setting linux capabilities on all containers": {
RunnerConfig: common.RunnerConfig{
RunnerSettings: common.RunnerSettings{
Kubernetes: &common.KubernetesConfig{
Namespace: "default",
CapAdd: []string{"CAP_1"},
CapDrop: []string{"CAP_2"},
},
},
},
Options: &kubernetesOptions{
Services: common.Services{
{
Name: "test-service-0",
Command: []string{"application", "--debug"},
},
{
Name: "test-service-1",
Entrypoint: []string{"application", "--debug"},
},
},
},
VerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {
require.Len(t, pod.Spec.Containers, 4)
assertContainerCap := func(container api.Container) {
t.Run("container-"+container.Name, func(t *testing.T) {
capabilities := container.SecurityContext.Capabilities
require.NotNil(t, capabilities)
assert.Len(t, capabilities.Add, 1)
assert.Contains(t, capabilities.Add, api.Capability("CAP_1"))
assert.Len(t, capabilities.Drop, 2)
assert.Contains(t, capabilities.Drop, api.Capability("CAP_2"))
assert.Contains(t, capabilities.Drop, api.Capability("NET_RAW"))
})
}
assertContainerCap(pod.Spec.Containers[0])
assertContainerCap(pod.Spec.Containers[1])
assertContainerCap(pod.Spec.Containers[2])
assertContainerCap(pod.Spec.Containers[3])
},
},
}
for testName, test := range tests {
......
......@@ -276,3 +276,39 @@ func buildVariables(bv common.JobVariables) []api.EnvVar {
}
return e
}
func getCapabilities(defaultCapDrop []string, capAdd []string, capDrop []string) *api.Capabilities {
enabled := make(map[string]bool)
for _, v := range defaultCapDrop {
enabled[v] = false
}
for _, v := range capAdd {
enabled[v] = true
}
for _, v := range capDrop {
enabled[v] = false
}
if len(enabled) < 1 {
return nil
}
return buildCapabilities(enabled)
}
func buildCapabilities(enabled map[string]bool) *api.Capabilities {
capabilities := new(api.Capabilities)
for c, add := range enabled {
if add {
capabilities.Add = append(capabilities.Add, api.Capability(c))
continue
}
capabilities.Drop = append(capabilities.Drop, api.Capability(c))
}
return capabilities
}
......@@ -479,3 +479,105 @@ func testVersionAndCodec() (version string, codec runtime.Codec) {
return
}
func TestGetCapabilities(t *testing.T) {
tests := map[string]struct {
defaultCapDrop []string
capAdd []string
capDrop []string
assertCapabilities func(t *testing.T, a *api.Capabilities)
}{
"no data provided": {
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
assert.Nil(t, a)
},
},
"only default_cap_drop provided": {
defaultCapDrop: []string{"CAP_1", "CAP_2"},
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
require.NotNil(t, a)
assert.Empty(t, a.Add)
assert.Len(t, a.Drop, 2)
assert.Contains(t, a.Drop, api.Capability("CAP_1"))
assert.Contains(t, a.Drop, api.Capability("CAP_2"))
},
},
"only custom cap_add provided": {
capAdd: []string{"CAP_1", "CAP_2"},
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
require.NotNil(t, a)
assert.Len(t, a.Add, 2)
assert.Contains(t, a.Add, api.Capability("CAP_1"))
assert.Contains(t, a.Add, api.Capability("CAP_2"))
assert.Empty(t, a.Drop)
},
},
"only custom cap_drop provided": {
capDrop: []string{"CAP_1", "CAP_2"},
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
require.NotNil(t, a)
assert.Empty(t, a.Add)
assert.Len(t, a.Drop, 2)
assert.Contains(t, a.Drop, api.Capability("CAP_1"))
assert.Contains(t, a.Drop, api.Capability("CAP_2"))
},
},
"default_cap_drop and custom cap_drop sums": {
defaultCapDrop: []string{"CAP_1", "CAP_2"},
capDrop: []string{"CAP_3", "CAP_4"},
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
require.NotNil(t, a)
assert.Empty(t, a.Add)
assert.Len(t, a.Drop, 4)
assert.Contains(t, a.Drop, api.Capability("CAP_1"))
assert.Contains(t, a.Drop, api.Capability("CAP_2"))
assert.Contains(t, a.Drop, api.Capability("CAP_3"))
assert.Contains(t, a.Drop, api.Capability("CAP_4"))
},
},
"default_cap_drop and custom cap_drop duplicate": {
defaultCapDrop: []string{"CAP_1", "CAP_2"},
capDrop: []string{"CAP_2", "CAP_3"},
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
require.NotNil(t, a)
assert.Empty(t, a.Add)
assert.Len(t, a.Drop, 3)
assert.Contains(t, a.Drop, api.Capability("CAP_1"))
assert.Contains(t, a.Drop, api.Capability("CAP_2"))
assert.Contains(t, a.Drop, api.Capability("CAP_3"))
},
},
"default_cap_drop and custom cap_add intersect": {
defaultCapDrop: []string{"CAP_1", "CAP_2"},
capAdd: []string{"CAP_2", "CAP_3"},
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
require.NotNil(t, a)
assert.Len(t, a.Add, 2)
assert.Contains(t, a.Add, api.Capability("CAP_2"))
assert.Contains(t, a.Add, api.Capability("CAP_3"))
assert.Len(t, a.Drop, 1)
assert.Contains(t, a.Drop, api.Capability("CAP_1"))
},
},
"default_cap_drop and custom cap_add intersect and cap_drop forces": {
defaultCapDrop: []string{"CAP_1", "CAP_2"},
capAdd: []string{"CAP_2", "CAP_3"},
capDrop: []string{"CAP_2", "CAP_4"},
assertCapabilities: func(t *testing.T, a *api.Capabilities) {
require.NotNil(t, a)
assert.Len(t, a.Add, 1)
assert.Contains(t, a.Add, api.Capability("CAP_3"))
assert.Len(t, a.Drop, 3)
assert.Contains(t, a.Drop, api.Capability("CAP_1"))
assert.Contains(t, a.Drop, api.Capability("CAP_2"))
assert.Contains(t, a.Drop, api.Capability("CAP_4"))
},
},
}
for tn, tt := range tests {
t.Run(tn, func(t *testing.T) {
tt.assertCapabilities(t, getCapabilities(tt.defaultCapDrop, tt.capAdd, tt.capDrop))
})
}
}
......@@ -1306,3 +1306,52 @@ func TestBuildWithGitCleanFlags(t *testing.T) {
assert.Error(t, err, "cleanup_file does not exist")
})
}
func TestBuildFileVariablesRemoval(t *testing.T) {
getJobResponse := func(t *testing.T, jobResponseRequester func() (common.JobResponse, error)) common.JobResponse {
jobResponse, err := jobResponseRequester()
require.NoError(t, err)
return jobResponse
}
tests := map[string]struct {
jobResponse common.JobResponse
}{
"succeeded job": {
jobResponse: getJobResponse(t, common.GetSuccessfulBuild),
},
"failed job": {
jobResponse: getJobResponse(t, common.GetFailedBuild),
},
}
for tn, tt := range tests {
t.Run(tn, func(t *testing.T) {
shellstest.OnEachShell(t, func(t *testing.T, shell string) {
build, cleanup := newBuild(t, tt.jobResponse, shell)
defer cleanup()
testVariableName := "TEST_VARIABLE"
build.Variables = append(
build.Variables,
common.JobVariable{Key: testVariableName, Value: "test", File: true},
)
_ = buildtest.RunBuild(t, build)
tmpDir := fmt.Sprintf("%s.tmp", build.BuildDir)
variableFile := filepath.Join(tmpDir, testVariableName)
_, err := os.Stat(variableFile)
assert.Error(t, err)
assert.True(
t,
errors.Is(err, os.ErrNotExist),
`Expected that os.Stat on the variable file will return the "doesn't exist" error`,
)
})
})
}
}
......@@ -15,6 +15,7 @@ import (
"github.com/docker/cli/cli/config/credentials"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/homedir"
"gitlab.com/gitlab-org/gitlab-runner/common"
)
......@@ -26,8 +27,9 @@ const (