Commit 78ba7344 authored by Kamil Trzcinski's avatar Kamil Trzcinski
Browse files

Reduce code complexity

parent b69bf8d8
......@@ -16,10 +16,7 @@ import (
"io/ioutil"
"os"
"os/exec"
"os/signal"
"strings"
"syscall"
"time"
)
type ExecCommand struct {
......@@ -64,139 +61,112 @@ func (c *ExecCommand) supportedOption(key string, _ interface{}) bool {
}
}
func (c *ExecCommand) parseYaml(job string, build *common.GetBuildResponse) error {
data, err := ioutil.ReadFile(".gitlab-ci.yml")
if err != nil {
return err
}
// parse gitlab-ci.yml
config := make(map[string]interface{})
err = yaml.Unmarshal(data, config)
if err != nil {
return err
}
// get job
jobConfig, ok := config[job].(map[interface{}]interface{})
if !ok {
return fmt.Errorf("no job named %q", job)
}
func (c *ExecCommand) buildCommands(configBeforeScript, jobScript interface{}) (commands string, err error) {
// get before_script
beforeScript, err := c.getCommands(config["before_script"])
beforeScript, err := c.getCommands(configBeforeScript)
if err != nil {
return err
return
}
build.Commands = beforeScript
commands += beforeScript
// get script
script, err := c.getCommands(jobConfig["script"])
script, err := c.getCommands(jobScript)
if err != nil {
return err
} else if jobConfig["script"] == nil {
return fmt.Errorf("missing 'script' for %q", job)
return
} else if jobScript == nil {
err = fmt.Errorf("missing 'script' for job")
return
}
build.Commands += script
commands += script
return
}
// parse variables
if variables, ok := config["variables"].(map[interface{}]interface{}); ok {
func (c *ExecCommand) buildVariables(configVariables interface{}) (buildVariables common.BuildVariables, err error) {
if variables, ok := configVariables.(map[interface{}]interface{}); ok {
for key, value := range variables {
if valueText, ok := value.(string); ok {
build.Variables = append(build.Variables, common.BuildVariable{
buildVariables = append(buildVariables, common.BuildVariable{
Key: key.(string),
Value: valueText,
Public: true,
})
} else {
return fmt.Errorf("invalid value for variable %q", key)
err = fmt.Errorf("invalid value for variable %q", key)
}
}
} else if config["variables"] != nil {
return errors.New("unsupported variables")
} else if configVariables != nil {
err = errors.New("unsupported variables")
}
return
}
build.Options = make(common.BuildOptions)
func (c *ExecCommand) buildOptions(config map[string]interface{},
jobConfig map[interface{}]interface{}) (options common.BuildOptions, err error) {
options = make(common.BuildOptions)
// parse global options
for key, value := range config {
if c.supportedOption(key, value) {
build.Options[key] = value
options[key] = value
}
}
// parse job options
for key, value := range jobConfig {
if c.supportedOption(key.(string), value) {
build.Options[key.(string)] = value
options[key.(string)] = value
}
}
build.Name = job
if stage, ok := jobConfig["stage"].(string); ok {
build.Stage = stage
} else {
build.Stage = "test"
}
return nil
return
}
func (c *ExecCommand) Execute(context *cli.Context) {
wd, err := os.Getwd()
func (c *ExecCommand) parseYaml(job string, build *common.GetBuildResponse) error {
data, err := ioutil.ReadFile(".gitlab-ci.yml")
if err != nil {
logrus.Fatalln(err)
}
switch len(context.Args()) {
case 1:
c.Job = context.Args().Get(0)
default:
cli.ShowSubcommandHelp(context)
os.Exit(1)
return
return err
}
c.Executor = context.Command.Name
build.Name = job
signals := make(chan os.Signal)
signal.Notify(signals, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)
abortSignal := make(chan os.Signal)
doneSignal := make(chan int, 1)
// parse gitlab-ci.yml
config := make(map[string]interface{})
err = yaml.Unmarshal(data, config)
if err != nil {
return err
}
go func() {
interrupt := <-signals
// get job
jobConfig, ok := config[job].(map[interface{}]interface{})
if !ok {
return fmt.Errorf("no job named %q", job)
}
// request stop, but wait for force exit
for interrupt == syscall.SIGQUIT {
logrus.Warningln("Requested quit, waiting for builds to finish")
interrupt = <-signals
}
build.Commands, err = c.buildCommands(config["before_script"], jobConfig["script"])
if err != nil {
return err
}
logrus.Warningln("Requested exit:", interrupt)
build.Variables, err = c.buildVariables(config["variables"])
if err != nil {
return err
}
go func() {
for {
abortSignal <- interrupt
}
}()
select {
case newSignal := <-signals:
logrus.Fatalln("forced exit:", newSignal)
case <-time.After(common.ShutdownTimeout * time.Second):
logrus.Fatalln("shutdown timedout")
case <-doneSignal:
}
}()
build.Options, err = c.buildOptions(config, jobConfig)
if err != nil {
return err
}
// Add self-volume to docker
if c.RunnerSettings.Docker == nil {
c.RunnerSettings.Docker = &common.DockerConfig{}
if stage, ok := jobConfig["stage"].(string); ok {
build.Stage = stage
} else {
build.Stage = "test"
}
c.RunnerSettings.Docker.Volumes = append(c.RunnerSettings.Docker.Volumes, wd+":"+wd+":ro")
return nil
}
// Check if we have uncomitted changes
func (c *ExecCommand) createBuild(repoURL string, abortSignal chan os.Signal) (build *common.Build, err error) {
// Check if we have uncommitted changes
_, err = c.runCommand("git", "diff", "--quiet", "HEAD")
if err != nil {
logrus.Warningln("You most probably have uncommitted changes.")
......@@ -206,7 +176,7 @@ func (c *ExecCommand) Execute(context *cli.Context) {
// Parse Git settings
sha, err := c.runCommand("git", "rev-parse", "HEAD")
if err != nil {
logrus.Fatalln(err)
return
}
beforeSha, err := c.runCommand("git", "rev-parse", "HEAD~1")
......@@ -216,14 +186,14 @@ func (c *ExecCommand) Execute(context *cli.Context) {
refName, err := c.runCommand("git", "rev-parse", "--abbrev-ref", "HEAD")
if err != nil {
logrus.Fatalln(err)
return
}
newBuild := common.Build{
build = &common.Build{
GetBuildResponse: common.GetBuildResponse{
ID: 1,
ProjectID: 1,
RepoURL: wd,
RepoURL: repoURL,
Commands: "",
Sha: strings.TrimSpace(sha),
RefName: strings.TrimSpace(refName),
......@@ -241,15 +211,51 @@ func (c *ExecCommand) Execute(context *cli.Context) {
BuildAbort: abortSignal,
Network: nil,
}
return
}
func (c *ExecCommand) Execute(context *cli.Context) {
wd, err := os.Getwd()
if err != nil {
logrus.Fatalln(err)
}
switch len(context.Args()) {
case 1:
c.Job = context.Args().Get(0)
default:
cli.ShowSubcommandHelp(context)
os.Exit(1)
return
}
c.Executor = context.Command.Name
abortSignal := make(chan os.Signal)
doneSignal := make(chan int, 1)
go waitForInterrupts(nil, abortSignal, doneSignal)
// Add self-volume to docker
if c.RunnerSettings.Docker == nil {
c.RunnerSettings.Docker = &common.DockerConfig{}
}
c.RunnerSettings.Docker.Volumes = append(c.RunnerSettings.Docker.Volumes, wd+":"+wd+":ro")
// Create build
build, err := c.createBuild(wd, abortSignal)
if err != nil {
logrus.Fatalln(err)
}
err = c.parseYaml(c.Job, &newBuild.GetBuildResponse)
err = c.parseYaml(c.Job, &build.GetBuildResponse)
if err != nil {
logrus.Fatalln(err)
}
newBuild.AssignID()
build.AssignID()
err = newBuild.Run(&common.Config{})
err = build.Run(&common.Config{})
if err != nil {
logrus.Fatalln(err)
}
......
......@@ -222,6 +222,27 @@ func (mr *RunCommand) loadConfig() error {
}
mr.healthy = nil
mr.log().Println("Config loaded.")
return nil
}
func (mr *RunCommand) checkConfig() (err error) {
info, err := os.Stat(mr.ConfigFile)
if err != nil {
return err
}
if !mr.config.ModTime.Before(info.ModTime()) {
return nil
}
err = mr.loadConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
// don't reload the same file
mr.config.ModTime = info.ModTime()
return
}
return nil
}
......@@ -253,6 +274,51 @@ func (mr *RunCommand) Start(s service.Service) error {
return nil
}
func (mr *RunCommand) updateWorkers(currentWorkers, workerIndex *int, startWorker chan int, stopWorker chan bool) os.Signal {
buildLimit := mr.config.Concurrent
for *currentWorkers > buildLimit {
select {
case stopWorker <- true:
case signaled := <-mr.interruptSignal:
return signaled
}
*currentWorkers--
}
for *currentWorkers < buildLimit {
select {
case startWorker <- *workerIndex:
case signaled := <-mr.interruptSignal:
return signaled
}
*currentWorkers++
*workerIndex++
}
return nil
}
func (mr *RunCommand) updateConfig() os.Signal {
select {
case <-time.After(common.ReloadConfigInterval * time.Second):
err := mr.checkConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
}
case <-mr.reloadSignal:
err := mr.loadConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
}
case signaled := <-mr.interruptSignal:
return signaled
}
return nil
}
func (mr *RunCommand) Run() {
runners := make(chan *common.RunnerConfig)
go mr.feedRunners(runners)
......@@ -268,63 +334,15 @@ func (mr *RunCommand) Run() {
workerIndex := 0
var signaled os.Signal
finish_worker:
for {
buildLimit := mr.config.Concurrent
for currentWorkers > buildLimit {
select {
case stopWorker <- true:
case signaled = <-mr.interruptSignal:
break finish_worker
}
currentWorkers--
signaled = mr.updateWorkers(&currentWorkers, &workerIndex, startWorker, stopWorker)
if signaled != nil {
break
}
for currentWorkers < buildLimit {
select {
case startWorker <- workerIndex:
case signaled = <-mr.interruptSignal:
break finish_worker
}
currentWorkers++
workerIndex++
}
select {
case <-time.After(common.ReloadConfigInterval * time.Second):
info, err := os.Stat(mr.ConfigFile)
if err != nil {
mr.log().Errorln("Failed to stat config", err)
break
}
if !mr.config.ModTime.Before(info.ModTime()) {
break
}
err = mr.loadConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
// don't reload the same file
mr.config.ModTime = info.ModTime()
break
}
mr.log().Println("Config reloaded.")
case <-mr.reloadSignal:
err := mr.loadConfig()
if err != nil {
mr.log().Errorln("Failed to load config", err)
break
}
mr.log().Println("Config reloaded.")
case signaled = <-mr.interruptSignal:
break finish_worker
signaled = mr.updateConfig()
if signaled != nil {
break
}
}
mr.finished = true
......
......@@ -29,6 +29,34 @@ type RegisterCommand struct {
common.RunnerConfig
}
func (s *RegisterCommand) askOnce(prompt string, result *string, allowEmpty bool) bool {
println(prompt)
if *result != "" {
print("["+*result, "]: ")
}
if s.reader == nil {
s.reader = bufio.NewReader(os.Stdin)
}
data, _, err := s.reader.ReadLine()
if err != nil {
panic(err)
}
newResult := string(data)
newResult = strings.TrimSpace(newResult)
if newResult != "" {
*result = newResult
return true
}
if allowEmpty || *result != "" {
return true
}
return false
}
func (s *RegisterCommand) ask(key, prompt string, allowEmptyOptional ...bool) string {
allowEmpty := len(allowEmptyOptional) > 0 && allowEmptyOptional[0]
......@@ -43,30 +71,12 @@ func (s *RegisterCommand) ask(key, prompt string, allowEmptyOptional ...bool) st
}
for {
println(prompt)
if result != "" {
print("["+result, "]: ")
}
if s.reader == nil {
s.reader = bufio.NewReader(os.Stdin)
}
data, _, err := s.reader.ReadLine()
if err != nil {
panic(err)
}
newResult := string(data)
newResult = strings.TrimSpace(newResult)
if newResult != "" {
return newResult
}
if allowEmpty || result != "" {
return result
if !s.askOnce(prompt, &result, allowEmpty) {
break
}
}
return result
}
func (s *RegisterCommand) askExecutor() {
......@@ -143,6 +153,37 @@ func (s *RegisterCommand) askRunner() {
}
}
func (c *RegisterCommand) askExecutorOptions() {
switch c.Executor {
case "docker":
c.askDocker()
c.SSH = nil
c.Parallels = nil
c.VirtualBox = nil
case "docker-ssh":
c.askDocker()
c.askSSHLogin()
c.Parallels = nil
c.VirtualBox = nil
case "ssh":
c.askSSHServer()
c.askSSHLogin()
c.Docker = nil
c.Parallels = nil
c.VirtualBox = nil
case "parallels":
c.askParallels()
c.askSSHServer()
c.Docker = nil
c.VirtualBox = nil
case "VirtualBox":
c.askVirtualBox()
c.askSSHLogin()
c.Docker = nil
c.Parallels = nil
}
}
func (c *RegisterCommand) Execute(context *cli.Context) {
userModeWarning(true)
......@@ -155,6 +196,7 @@ func (c *RegisterCommand) Execute(context *cli.Context) {
if !c.LeaveRunner {
defer func() {
// De-register runner on panic
if r := recover(); r != nil {
if c.registered {
c.network.DeleteRunner(c.RunnerCredentials)
......@@ -181,35 +223,7 @@ func (c *RegisterCommand) Execute(context *cli.Context) {
log.Warningf("Specified limit (%d) larger then current concurrent limit (%d). Concurrent limit will not be enlarged.", c.Limit, c.config.Concurrent)
}
switch c.Executor {
case "docker":
c.askDocker()
c.SSH = nil
c.Parallels = nil
c.VirtualBox = nil
case "docker-ssh":
c.askDocker()
c.askSSHLogin()
c.Parallels = nil
c.VirtualBox = nil
case "ssh":
c.askSSHServer()
c.askSSHLogin()
c.Docker = nil
c.Parallels = nil
c.VirtualBox = nil
case "parallels":
c.askParallels()
c.askSSHServer()
c.Docker = nil
c.VirtualBox = nil
case "VirtualBox":
c.askVirtualBox()
c.askSSHLogin()
c.Docker = nil
c.Parallels = nil
}
c.askExecutorOptions()
c.addRunner(&c.RunnerConfig)
c.saveConfig()
......
......@@ -90,7 +90,28 @@ func runServiceStatus(displayName string, s service.Service, c *cli.Context) err
return nil
}
func RunServiceControl(c *cli.Context) {
func getServiceArguments(c *cli.Context, isUserService bool) (arguments []string) {
if wd := c.String("working-directory"); wd != "" {
arguments = append(arguments, "--working-directory", wd)
}
if config := c.String("config"); config != "" {
arguments = append(arguments, "--config", config)
}
if sn := c.String("service"); sn != "" {
arguments = append(arguments, "--service", sn)
}
if user := c.String("user"); !isUserService && user != "" {
arguments = append(arguments, "--user", user)
}
arguments = append(arguments, "--syslog")
return
}