diff --git a/addon/adapter.go b/addon/adapter.go index 6f5d8f14..f9b89105 100644 --- a/addon/adapter.go +++ b/addon/adapter.go @@ -21,6 +21,14 @@ var ( Log = logr.WithName("addon") ) +// Environment. +const ( + EnvSharedDir = settings.EnvSharedPath + EnvCacheDir = settings.EnvCachePath + EnvToken = settings.EnvHubToken + EnvTask = settings.EnvTask +) + // Addon An addon adapter configured for a task execution. var Addon *Adapter diff --git a/addon/injector.go b/addon/injector.go new file mode 100644 index 00000000..1f002262 --- /dev/null +++ b/addon/injector.go @@ -0,0 +1,76 @@ +package addon + +import ( + "encoding/json" + "os" + "regexp" + "strings" + + "github.com/konveyor/tackle2-hub/api" + "github.com/konveyor/tackle2-hub/task" +) + +var ( + EnvRegex = regexp.MustCompile(`(\$\()([^)]+)(\))`) +) + +// EnvInjector inject key into extension metadata. +type EnvInjector struct { + env map[string]string + dict map[string]string +} + +// Inject inject into extension metadata. +func (r *EnvInjector) Inject(extension *api.Extension) { + r.buildEnv(extension) + mp := make(map[string]any) + b, _ := json.Marshal(extension.Metadata) + _ = json.Unmarshal(b, &mp) + mp = r.inject(mp).(map[string]any) + extension.Metadata = mp +} + +// buildEnv builds the extension `env`. +func (r *EnvInjector) buildEnv(extension *api.Extension) { + r.env = make(map[string]string) + for _, env := range extension.Container.Env { + key := task.ExtEnv(extension.Name, env.Name) + r.env[env.Name] = os.Getenv(key) + } +} + +// inject replaces both `dict` keys and `env` environment +// variables referenced in metadata. +func (r *EnvInjector) inject(in any) (out any) { + switch node := in.(type) { + case map[string]any: + for k, v := range node { + node[k] = r.inject(v) + } + out = node + case []any: + var injected []any + for _, n := range node { + injected = append( + injected, + r.inject(n)) + } + out = injected + case string: + for { + match := EnvRegex.FindStringSubmatch(node) + if len(match) < 3 { + break + } + node = strings.Replace( + node, + match[0], + r.env[match[2]], + -1) + } + out = node + default: + out = node + } + return +} diff --git a/addon/task.go b/addon/task.go index 0f3a91c6..ddb9142a 100644 --- a/addon/task.go +++ b/addon/task.go @@ -42,9 +42,48 @@ func (h *Task) Application() (r *api.Application, err error) { return } +// Addon returns the addon associated with the task. +// The extensions are filtered to include those specified in the task. +// inject: perform injection. +func (h *Task) Addon(inject bool) (r *api.Addon, err error) { + name := h.task.Addon + if name == "" { + err = &NotFound{} + return + } + r, err = h.richClient.Addon.Get(name) + if err != nil { + return + } + // filter + included := map[string]int{} + for _, name := range h.task.Extensions { + included[name] = 0 + } + var extensions []api.Extension + for i := range r.Extensions { + extension := r.Extensions[i] + if _, found := included[extension.Name]; found { + extensions = append( + extensions, + extension) + } + } + r.Extensions = extensions + // inject + if inject { + for i := range r.Extensions { + extension := &r.Extensions[i] + injector := EnvInjector{} + injector.Inject(extension) + } + } + return +} + // Data returns the addon data. -func (h *Task) Data() (d map[string]interface{}) { - d = h.task.Data.(map[string]interface{}) +func (h *Task) Data() (d api.Map) { + d = h.task.Data return } @@ -55,11 +94,6 @@ func (h *Task) DataWith(object interface{}) (err error) { return } -// Variant returns the task variant. -func (h *Task) Variant() string { - return h.task.Variant -} - // Started report addon started. func (h *Task) Started() { h.deleteReport() @@ -169,10 +203,8 @@ func (h *Task) AttachAt(f *api.File, activity int) { h.report.Attached, api.Attachment{ Activity: activity, - Ref: api.Ref{ - ID: f.ID, - Name: f.Name, - }, + ID: f.ID, + Name: f.Name, }) h.pushReport() return @@ -215,7 +247,7 @@ func (h *Task) Bucket() (b *binding.BucketContent) { } // Result report addon result. -func (h *Task) Result(object interface{}) { +func (h *Task) Result(object api.Map) { h.report.Result = object h.pushReport() Log.Info("Addon reported: result.") diff --git a/api/addon.go b/api/addon.go index 4f11a88a..d08d5cb1 100644 --- a/api/addon.go +++ b/api/addon.go @@ -2,10 +2,12 @@ package api import ( "context" + "encoding/json" "net/http" "github.com/gin-gonic/gin" crd "github.com/konveyor/tackle2-hub/k8s/api/tackle/v1alpha1" + core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" k8s "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -57,8 +59,19 @@ func (h AddonHandler) Get(ctx *gin.Context) { return } } + extensions := &crd.ExtensionList{} + err = h.Client(ctx).List( + context.TODO(), + extensions, + &k8s.ListOptions{ + Namespace: Settings.Namespace, + }) + if err != nil { + _ = ctx.Error(err) + return + } r := Addon{} - r.With(addon) + r.With(addon, extensions.Items...) h.Respond(ctx, http.StatusOK, r) } @@ -94,12 +107,43 @@ func (h AddonHandler) List(ctx *gin.Context) { // Addon REST resource. type Addon struct { - Name string `json:"name"` - Image string `json:"image"` + Name string `json:"name"` + Container core.Container `json:"container"` + Extensions []Extension `json:"extensions,omitempty"` + Metadata any `json:"metadata,omitempty"` } // With model. -func (r *Addon) With(m *crd.Addon) { +func (r *Addon) With(m *crd.Addon, extensions ...crd.Extension) { r.Name = m.Name - r.Image = m.Spec.Image + r.Container = m.Spec.Container + if m.Spec.Metadata.Raw != nil { + _ = json.Unmarshal(m.Spec.Metadata.Raw, &r.Metadata) + } + for i := range extensions { + extension := Extension{} + extension.With(&extensions[i]) + r.Extensions = append( + r.Extensions, + extension) + } +} + +// Extension REST resource. +type Extension struct { + Name string `json:"name"` + Addon string `json:"addon"` + Capabilities []string `json:"capabilities,omitempty"` + Container core.Container `json:"container"` + Metadata any `json:"metadata,omitempty"` +} + +// With model. +func (r *Extension) With(m *crd.Extension) { + r.Name = m.Name + r.Addon = m.Spec.Addon + r.Container = m.Spec.Container + if m.Spec.Metadata.Raw != nil { + _ = json.Unmarshal(m.Spec.Metadata.Raw, &r.Metadata) + } } diff --git a/api/base.go b/api/base.go index 073cdda8..96ad1aba 100644 --- a/api/base.go +++ b/api/base.go @@ -461,19 +461,3 @@ func (r *Cursor) pageLimited() (b bool) { b = r.Index > int64(r.Limit) return } - -// StrMap returns a map[string]any. -// The YAML decoder can produce map[any]any which is not valid for json. -// Converts map[any]any to map[string]any as needed. -func StrMap(in any) (out any) { - out = in - if d, cast := in.(map[any]any); cast { - mp := make(map[string]any) - for k, v := range d { - s := fmt.Sprintf("%v", k) - mp[s] = StrMap(v) - } - out = mp - } - return -} diff --git a/api/context.go b/api/context.go index 0a144c7c..6b601d50 100644 --- a/api/context.go +++ b/api/context.go @@ -5,6 +5,7 @@ import ( "github.com/gin-gonic/gin" "github.com/konveyor/tackle2-hub/auth" + tasking "github.com/konveyor/tackle2-hub/task" "gorm.io/gorm" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -22,6 +23,8 @@ type Context struct { Client client.Client // Response Response Response + // Task manager. + TaskManager *tasking.Manager } // Response values. diff --git a/api/error.go b/api/error.go index 2425c234..65be1550 100644 --- a/api/error.go +++ b/api/error.go @@ -10,6 +10,7 @@ import ( "github.com/konveyor/tackle2-hub/api/filter" "github.com/konveyor/tackle2-hub/api/sort" "github.com/konveyor/tackle2-hub/model" + tasking "github.com/konveyor/tackle2-hub/task" "github.com/mattn/go-sqlite3" "gorm.io/gorm" ) @@ -173,6 +174,15 @@ func ErrorHandler() gin.HandlerFunc { return } + if errors.Is(err, &tasking.BadRequest{}) { + rtx.Respond( + http.StatusBadRequest, + gin.H{ + "error": err.Error(), + }) + return + } + rtx.Respond( http.StatusInternalServerError, gin.H{ diff --git a/api/task.go b/api/task.go index ef20cef6..72b0a1a8 100644 --- a/api/task.go +++ b/api/task.go @@ -1,7 +1,8 @@ package api import ( - "encoding/json" + "context" + "fmt" "io/ioutil" "net/http" "sort" @@ -10,12 +11,15 @@ import ( "time" "github.com/gin-gonic/gin" + crd "github.com/konveyor/tackle2-hub/k8s/api/tackle/v1alpha1" "github.com/konveyor/tackle2-hub/model" + "github.com/konveyor/tackle2-hub/tar" tasking "github.com/konveyor/tackle2-hub/task" "gorm.io/gorm" "gorm.io/gorm/clause" k8serr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/utils/strings/slices" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" ) // Routes @@ -23,6 +27,7 @@ const ( TasksRoot = "/tasks" TaskRoot = TasksRoot + "/:" + ID TaskReportRoot = TaskRoot + "/report" + TaskAttachedRoot = TaskRoot + "/attached" TaskBucketRoot = TaskRoot + "/bucket" TaskBucketContentRoot = TaskBucketRoot + "/*" + Wildcard TaskSubmitRoot = TaskRoot + "/submit" @@ -65,6 +70,8 @@ func (h TaskHandler) AddRoutes(e *gin.Engine) { routeGroup.POST(TaskReportRoot, h.CreateReport) routeGroup.PUT(TaskReportRoot, h.UpdateReport) routeGroup.DELETE(TaskReportRoot, h.DeleteReport) + // Attached + routeGroup.GET(TaskAttachedRoot, h.GetAttached) } // Get godoc @@ -138,33 +145,28 @@ func (h TaskHandler) List(ctx *gin.Context) { // @router /tasks [post] // @param task body api.Task true "Task data" func (h TaskHandler) Create(ctx *gin.Context) { - r := Task{} - err := h.Bind(ctx, &r) + r := &Task{} + err := h.Bind(ctx, r) if err != nil { _ = ctx.Error(err) return } - switch r.State { - case "": - r.State = tasking.Created - case tasking.Created, - tasking.Ready: - default: - h.Respond(ctx, - http.StatusBadRequest, - gin.H{ - "error": "state must be (''|Created|Ready)", - }) + err = h.findRefs(ctx, r) + if err != nil { + _ = ctx.Error(err) return } - m := r.Model() - m.CreateUser = h.BaseHandler.CurrentUser(ctx) - result := h.DB(ctx).Create(&m) - if result.Error != nil { - _ = ctx.Error(result.Error) + task := &tasking.Task{} + task.With(r.Model()) + task.CreateUser = h.BaseHandler.CurrentUser(ctx) + rtx := WithContext(ctx) + created, err := rtx.TaskManager.Create(h.DB(ctx), task) + if err != nil { + _ = ctx.Error(err) return } - r.With(m) + + r.With(created.Task) h.Respond(ctx, http.StatusCreated, r) } @@ -178,23 +180,10 @@ func (h TaskHandler) Create(ctx *gin.Context) { // @param id path int true "Task ID" func (h TaskHandler) Delete(ctx *gin.Context) { id := h.pk(ctx) - task := &model.Task{} - result := h.DB(ctx).First(task, id) - if result.Error != nil { - _ = ctx.Error(result.Error) - return - } - rt := tasking.Task{Task: task} - err := rt.Delete(h.Client(ctx)) + rtx := WithContext(ctx) + err := rtx.TaskManager.Delete(h.DB(ctx), id) if err != nil { - if !k8serr.IsNotFound(err) { - _ = ctx.Error(err) - return - } - } - result = h.DB(ctx).Delete(task) - if result.Error != nil { - _ = ctx.Error(result.Error) + _ = ctx.Error(err) return } @@ -217,26 +206,14 @@ func (h TaskHandler) Update(ctx *gin.Context) { if err != nil { return } - switch r.State { - case tasking.Created, - tasking.Ready: - default: - h.Respond(ctx, - http.StatusBadRequest, - gin.H{ - "error": "state must be (Created|Ready)", - }) - return - } - m := r.Model() - m.Reset() - db := h.DB(ctx).Model(m) - db = db.Where("id", id) - db = db.Where("state", tasking.Created) - db = h.omitted(db) - result := db.Updates(h.fields(m)) - if result.Error != nil { - _ = ctx.Error(result.Error) + r.ID = id + rtx := WithContext(ctx) + task := &tasking.Task{} + task.With(r.Model()) + task.UpdateUser = h.BaseHandler.CurrentUser(ctx) + err = rtx.TaskManager.Update(h.DB(ctx), task) + if err != nil { + _ = ctx.Error(err) return } @@ -255,6 +232,11 @@ func (h TaskHandler) Update(ctx *gin.Context) { func (h TaskHandler) Submit(ctx *gin.Context) { id := h.pk(ctx) r := &Task{} + err := h.findRefs(ctx, r) + if err != nil { + _ = ctx.Error(err) + return + } mod := func(withBody bool) (err error) { if !withBody { m := r.Model() @@ -267,7 +249,7 @@ func (h TaskHandler) Submit(ctx *gin.Context) { r.State = tasking.Ready return } - err := h.modBody(ctx, r, mod) + err = h.modBody(ctx, r, mod) if err != nil { _ = ctx.Error(err) return @@ -284,33 +266,8 @@ func (h TaskHandler) Submit(ctx *gin.Context) { // @param id path int true "Task ID" func (h TaskHandler) Cancel(ctx *gin.Context) { id := h.pk(ctx) - m := &model.Task{} - result := h.DB(ctx).First(m, id) - if result.Error != nil { - _ = ctx.Error(result.Error) - return - } - switch m.State { - case tasking.Succeeded, - tasking.Failed, - tasking.Canceled: - h.Respond(ctx, - http.StatusBadRequest, - gin.H{ - "error": "state must not be (Succeeded|Failed|Canceled)", - }) - return - } - db := h.DB(ctx).Model(m) - db = db.Where("id", id) - db = db.Where( - "state not IN ?", - []string{ - tasking.Succeeded, - tasking.Failed, - tasking.Canceled, - }) - err := db.Update("Canceled", true).Error + rtx := WithContext(ctx) + err := rtx.TaskManager.Cancel(h.DB(ctx), id) if err != nil { _ = ctx.Error(err) return @@ -432,7 +389,7 @@ func (h TaskHandler) CreateReport(ctx *gin.Context) { // @tags tasks // @accept json // @produce json -// @success 200 {object} api.TaskReport +// @success 204 // @router /tasks/{id}/report [put] // @param id path int true "Task ID" // @param task body api.TaskReport true "TaskReport data" @@ -448,13 +405,12 @@ func (h TaskHandler) UpdateReport(ctx *gin.Context) { m.UpdateUser = h.BaseHandler.CurrentUser(ctx) db := h.DB(ctx).Model(m) db = db.Where("taskid", id) - result := db.Updates(h.fields(m)) + result := db.Save(m) if result.Error != nil { _ = ctx.Error(result.Error) } - report.With(m) - h.Respond(ctx, http.StatusOK, report) + h.Status(ctx, http.StatusNoContent) } // DeleteReport godoc @@ -480,33 +436,131 @@ func (h TaskHandler) DeleteReport(ctx *gin.Context) { h.Status(ctx, http.StatusNoContent) } -// Fields omitted by: -// - Create -// - Update. -func (h *TaskHandler) omitted(db *gorm.DB) (out *gorm.DB) { - out = db.Omit([]string{ - "BucketID", - "Bucket", - "Image", - "Pod", - "Started", - "Terminated", - "Canceled", - "Error", - "Retries", - }...) +// GetAttached godoc +// @summary Get attached files. +// @description Get attached files. +// @description Returns a tarball with attached files. +// @tags tasks +// @produce octet-stream +// @success 200 +// @router /tasks/{id}/attached [get] +// @param id path int true "Task ID" +func (h TaskHandler) GetAttached(ctx *gin.Context) { + m := &model.Task{} + id := h.pk(ctx) + db := h.DB(ctx).Preload(clause.Associations) + err := db.First(m, id).Error + if err != nil { + _ = ctx.Error(err) + return + } + tarWriter := tar.NewWriter(ctx.Writer) + defer func() { + tarWriter.Close() + }() + r := Task{} + r.With(m) + var files []*model.File + for _, ref := range r.Attached { + file := &model.File{} + err = h.DB(ctx).First(file, ref.ID).Error + if err != nil { + _ = ctx.Error(err) + return + } + err = tarWriter.AssertFile(file.Path) + if err != nil { + _ = ctx.Error(err) + return + } + files = append(files, file) + } + ctx.Status(http.StatusOK) + for _, file := range files { + _ = tarWriter.AddFile( + file.Path, + fmt.Sprintf("%.3d-%s", file.ID, file.Name)) + } +} + +// findRefs find referenced resources. +// - addon +// - extensions +// - kind +// - priority +// The priority is defaulted to the kind as needed. +func (h *TaskHandler) findRefs(ctx *gin.Context, r *Task) (err error) { + client := h.Client(ctx) + if r.Addon != "" { + addon := &crd.Addon{} + name := r.Addon + err = client.Get( + context.TODO(), + k8sclient.ObjectKey{ + Name: name, + Namespace: Settings.Hub.Namespace, + }, + addon) + if err != nil { + if k8serr.IsNotFound(err) { + err = &BadRequestError{ + Reason: "Addon: " + name + " not found", + } + } + return + } + } + for _, name := range r.Extensions { + ext := &crd.Extension{} + err = client.Get( + context.TODO(), + k8sclient.ObjectKey{ + Name: name, + Namespace: Settings.Hub.Namespace, + }, + ext) + if err != nil { + if k8serr.IsNotFound(err) { + err = &BadRequestError{ + Reason: "Extension: " + name + " not found", + } + } + return + } + } + if r.Kind != "" { + kind := &crd.Task{} + name := r.Kind + err = client.Get( + context.TODO(), + k8sclient.ObjectKey{ + Name: name, + Namespace: Settings.Hub.Namespace, + }, + kind) + if err != nil { + if k8serr.IsNotFound(err) { + err = &BadRequestError{ + Reason: "Task: " + name + " not found", + } + } + return + } + if r.Priority == 0 { + r.Priority = kind.Spec.Priority + } + } return } // TTL time-to-live. -type TTL struct { - Created int `json:"created,omitempty"` - Pending int `json:"pending,omitempty"` - Postponed int `json:"postponed,omitempty"` - Running int `json:"running,omitempty"` - Succeeded int `json:"succeeded,omitempty"` - Failed int `json:"failed,omitempty"` -} +type TTL model.TTL + +// TaskPolicy scheduling policies. +type TaskPolicy model.TaskPolicy + +// Map unstructured object. +type Map model.Map // TaskError used in Task.Errors. type TaskError struct { @@ -514,27 +568,33 @@ type TaskError struct { Description string `json:"description"` } +// TaskEvent task event. +type TaskEvent model.TaskEvent + +// Attachment file attachment. +type Attachment model.Attachment + // Task REST resource. type Task struct { Resource `yaml:",inline"` - Name string `json:"name"` + Name string `json:"name,omitempty" yaml:",omitempty"` + Kind string `json:"kind,omitempty" yaml:",omitempty"` + Addon string `json:"addon,omitempty" yaml:",omitempty"` + Extensions []string `json:"extensions,omitempty" yaml:",omitempty"` + State string `json:"state,omitempty" yaml:",omitempty"` Locator string `json:"locator,omitempty" yaml:",omitempty"` Priority int `json:"priority,omitempty" yaml:",omitempty"` - Variant string `json:"variant,omitempty" yaml:",omitempty"` - Policy string `json:"policy,omitempty" yaml:",omitempty"` - TTL *TTL `json:"ttl,omitempty" yaml:",omitempty"` - Addon string `json:"addon,omitempty" binding:"required" yaml:",omitempty"` - Data interface{} `json:"data" swaggertype:"object" binding:"required"` + Policy TaskPolicy `json:"policy,omitempty" yaml:",omitempty"` + TTL TTL `json:"ttl,omitempty" yaml:",omitempty"` + Data Map `json:"data,omitempty" yaml:",omitempty"` Application *Ref `json:"application,omitempty" yaml:",omitempty"` - State string `json:"state"` - Image string `json:"image,omitempty" yaml:",omitempty"` + Actions []string `json:"actions,omitempty" yaml:",omitempty"` + Bucket *Ref `json:"bucket,omitempty" yaml:",omitempty"` Pod string `json:"pod,omitempty" yaml:",omitempty"` Retries int `json:"retries,omitempty" yaml:",omitempty"` Started *time.Time `json:"started,omitempty" yaml:",omitempty"` Terminated *time.Time `json:"terminated,omitempty" yaml:",omitempty"` - Canceled bool `json:"canceled,omitempty" yaml:",omitempty"` - Bucket *Ref `json:"bucket,omitempty" yaml:",omitempty"` - Purged bool `json:"purged,omitempty" yaml:",omitempty"` + Events []TaskEvent `json:"events,omitempty" yaml:",omitempty"` Errors []TaskError `json:"errors,omitempty" yaml:",omitempty"` Activity []string `json:"activity,omitempty" yaml:",omitempty"` Attached []Attachment `json:"attached" yaml:",omitempty"` @@ -544,33 +604,36 @@ type Task struct { func (r *Task) With(m *model.Task) { r.Resource.With(&m.Model) r.Name = m.Name - r.Image = m.Image + r.Kind = m.Kind r.Addon = m.Addon + r.Extensions = m.Extensions + r.State = m.State r.Locator = m.Locator r.Priority = m.Priority - r.Policy = m.Policy - r.Variant = m.Variant + r.Policy = TaskPolicy(m.Policy) + r.TTL = TTL(m.TTL) + r.Data = m.Data r.Application = r.refPtr(m.ApplicationID, m.Application) r.Bucket = r.refPtr(m.BucketID, m.Bucket) - r.State = m.State - r.Started = m.Started - r.Terminated = m.Terminated r.Pod = m.Pod r.Retries = m.Retries - r.Canceled = m.Canceled - _ = json.Unmarshal(m.Data, &r.Data) - if m.TTL != nil { - _ = json.Unmarshal(m.TTL, &r.TTL) + r.Started = m.Started + r.Terminated = m.Terminated + r.Events = nil + r.Errors = nil + r.Attached = nil + for _, event := range m.Events { + r.Events = append(r.Events, TaskEvent(event)) } - if m.Errors != nil { - _ = json.Unmarshal(m.Errors, &r.Errors) + for _, err := range m.Errors { + r.Errors = append(r.Errors, TaskError(err)) } if m.Report != nil { report := &TaskReport{} report.With(m.Report) r.Activity = report.Activity - r.Errors = append(report.Errors, r.Errors...) - r.Attached = report.Attached + r.Errors = append(r.Errors, report.Errors...) + r.Attached = append(r.Attached, report.Attached...) switch r.State { case tasking.Succeeded: switch report.Status { @@ -579,25 +642,30 @@ func (r *Task) With(m *model.Task) { } } } + for _, a := range m.Attached { + r.Attached = append(r.Attached, Attachment(a)) + } + if Settings.Hub.Task.Preemption.Enabled { + r.Policy.PreemptEnabled = true + } } // Model builds a model. func (r *Task) Model() (m *model.Task) { m = &model.Task{ Name: r.Name, + Kind: r.Kind, Addon: r.Addon, + Extensions: r.Extensions, + State: r.State, Locator: r.Locator, - Variant: r.Variant, Priority: r.Priority, - Policy: r.Policy, - State: r.State, + Policy: model.TaskPolicy(r.Policy), + TTL: model.TTL(r.TTL), + Data: r.Data, ApplicationID: r.idPtr(r.Application), } - m.Data, _ = json.Marshal(StrMap(r.Data)) m.ID = r.ID - if r.TTL != nil { - m.TTL, _ = json.Marshal(r.TTL) - } return } @@ -649,7 +717,7 @@ type TaskReport struct { Completed int `json:"completed,omitempty" yaml:",omitempty"` Activity []string `json:"activity,omitempty" yaml:",omitempty"` Attached []Attachment `json:"attached,omitempty" yaml:",omitempty"` - Result interface{} `json:"result,omitempty" yaml:",omitempty" swaggertype:"object"` + Result Map `json:"result,omitempty" yaml:",omitempty"` TaskID uint `json:"task"` } @@ -660,17 +728,15 @@ func (r *TaskReport) With(m *model.TaskReport) { r.Total = m.Total r.Completed = m.Completed r.TaskID = m.TaskID - if m.Activity != nil { - _ = json.Unmarshal(m.Activity, &r.Activity) - } - if m.Errors != nil { - _ = json.Unmarshal(m.Errors, &r.Errors) - } - if m.Attached != nil { - _ = json.Unmarshal(m.Attached, &r.Attached) + r.Activity = m.Activity + r.Result = m.Result + r.Errors = nil + r.Attached = nil + for _, err := range m.Errors { + r.Errors = append(r.Errors, TaskError(err)) } - if m.Result != nil { - _ = json.Unmarshal(m.Result, &r.Result) + for _, a := range m.Attached { + r.Attached = append(r.Attached, Attachment(a)) } } @@ -683,30 +749,16 @@ func (r *TaskReport) Model() (m *model.TaskReport) { Status: r.Status, Total: r.Total, Completed: r.Completed, + Activity: r.Activity, + Result: r.Result, TaskID: r.TaskID, } - if r.Activity != nil { - m.Activity, _ = json.Marshal(r.Activity) - } - if r.Result != nil { - m.Result, _ = json.Marshal(StrMap(r.Result)) - } - if r.Errors != nil { - m.Errors, _ = json.Marshal(r.Errors) + m.ID = r.ID + for _, err := range r.Errors { + m.Errors = append(m.Errors, model.TaskError(err)) } - if r.Attached != nil { - m.Attached, _ = json.Marshal(r.Attached) + for _, at := range r.Attached { + m.Attached = append(m.Attached, model.Attachment(at)) } - m.ID = r.ID - return } - -// Attachment associates Files with a TaskReport. -type Attachment struct { - // Ref references an attached File. - Ref `yaml:",inline"` - // Activity index (1-based) association with an - // activity entry. Zero(0) indicates not associated. - Activity int `json:"activity,omitempty" yaml:",omitempty"` -} diff --git a/api/taskgroup.go b/api/taskgroup.go index 04d936b5..61347f81 100644 --- a/api/taskgroup.go +++ b/api/taskgroup.go @@ -1,14 +1,16 @@ package api import ( - "encoding/json" + "context" "net/http" "github.com/gin-gonic/gin" + crd "github.com/konveyor/tackle2-hub/k8s/api/tackle/v1alpha1" "github.com/konveyor/tackle2-hub/model" tasking "github.com/konveyor/tackle2-hub/task" "gorm.io/gorm/clause" k8serr "k8s.io/apimachinery/pkg/api/errors" + k8sclient "sigs.k8s.io/controller-runtime/pkg/client" ) // Routes @@ -110,19 +112,45 @@ func (h TaskGroupHandler) Create(ctx *gin.Context) { _ = ctx.Error(err) return } + err = h.findRefs(ctx, r) + if err != nil { + _ = ctx.Error(err) + return + } db := h.DB(ctx) + db = db.Omit(clause.Associations) m := r.Model() + m.CreateUser = h.BaseHandler.CurrentUser(ctx) switch r.State { case "": m.State = tasking.Created fallthrough case tasking.Created: - db = h.DB(ctx).Omit(clause.Associations) + result := db.Create(&m) + if result.Error != nil { + _ = ctx.Error(result.Error) + return + } case tasking.Ready: err := m.Propagate() if err != nil { return } + result := db.Create(&m) + if result.Error != nil { + _ = ctx.Error(result.Error) + return + } + rtx := WithContext(ctx) + for i := range m.Tasks { + task := &tasking.Task{} + task.With(&m.Tasks[i]) + task, err = rtx.TaskManager.Create(h.DB(ctx), task) + if err != nil { + _ = ctx.Error(err) + return + } + } default: h.Respond(ctx, http.StatusBadRequest, @@ -131,12 +159,6 @@ func (h TaskGroupHandler) Create(ctx *gin.Context) { }) return } - m.CreateUser = h.BaseHandler.CurrentUser(ctx) - result := db.Create(&m) - if result.Error != nil { - _ = ctx.Error(result.Error) - return - } r.With(m) @@ -165,20 +187,46 @@ func (h TaskGroupHandler) Update(ctx *gin.Context) { _ = ctx.Error(err) return } + err = h.findRefs(ctx, updated) + if err != nil { + _ = ctx.Error(err) + return + } + db := h.DB(ctx) + db = db.Omit( + clause.Associations, + "BucketID", + "Bucket") m := updated.Model() - m.ID = current.ID + m.ID = id m.UpdateUser = h.BaseHandler.CurrentUser(ctx) - db := h.DB(ctx).Model(m) - - omit := []string{"BucketID", "Bucket"} switch updated.State { case "", tasking.Created: - omit = append(omit, clause.Associations) + err = db.Save(m).Error + if err != nil { + _ = ctx.Error(err) + return + } case tasking.Ready: err := m.Propagate() if err != nil { return } + err = db.Save(m).Error + if err != nil { + _ = ctx.Error(err) + return + } + rtx := WithContext(ctx) + for i := range m.Tasks { + task := &tasking.Task{} + task.With(&m.Tasks[i]) + err = rtx.TaskManager.Update(h.DB(ctx), task) + if err != nil { + _ = ctx.Error(err) + return + } + } default: h.Respond(ctx, http.StatusBadRequest, @@ -187,13 +235,6 @@ func (h TaskGroupHandler) Update(ctx *gin.Context) { }) return } - db = db.Omit(omit...) - db = db.Where("state IN ?", []string{"", tasking.Created}) - err = db.Updates(h.fields(m)).Error - if err != nil { - _ = ctx.Error(err) - return - } h.Status(ctx, http.StatusNoContent) } @@ -208,31 +249,22 @@ func (h TaskGroupHandler) Update(ctx *gin.Context) { func (h TaskGroupHandler) Delete(ctx *gin.Context) { m := &model.TaskGroup{} id := h.pk(ctx) - db := h.DB(ctx).Preload(clause.Associations) + db := h.DB(ctx) + db = db.Omit(clause.Associations) err := db.First(m, id).Error if err != nil { _ = ctx.Error(err) return } - for _, task := range m.Tasks { - if task.Pod != "" { - rt := tasking.Task{Task: &task} - err := rt.Delete(h.Client(ctx)) - if err != nil { - if !k8serr.IsNotFound(err) { - _ = ctx.Error(err) - return - } - } - } - db := h.DB(ctx).Select(clause.Associations) - err = db.Delete(task).Error + rtx := WithContext(ctx) + for i := range m.Tasks { + task := &m.Tasks[i] + err = rtx.TaskManager.Delete(h.DB(ctx), task.ID) if err != nil { _ = ctx.Error(err) return } } - db = h.DB(ctx).Select(clause.Associations) err = db.Delete(m).Error if err != nil { _ = ctx.Error(err) @@ -254,6 +286,11 @@ func (h TaskGroupHandler) Delete(ctx *gin.Context) { func (h TaskGroupHandler) Submit(ctx *gin.Context) { id := h.pk(ctx) r := &TaskGroup{} + err := h.findRefs(ctx, r) + if err != nil { + _ = ctx.Error(err) + return + } mod := func(withBody bool) (err error) { if !withBody { m := r.Model() @@ -266,7 +303,7 @@ func (h TaskGroupHandler) Submit(ctx *gin.Context) { r.State = tasking.Ready return } - err := h.modBody(ctx, r, mod) + err = h.modBody(ctx, r, mod) if err != nil { _ = ctx.Error(err) return @@ -352,29 +389,113 @@ func (h TaskGroupHandler) BucketDelete(ctx *gin.Context) { h.bucketDelete(ctx, *m.BucketID) } +// findRefs find referenced resources. +// - addon +// - extensions +// - kind +// - priority +// The priority is defaulted to the kind as needed. +func (h *TaskGroupHandler) findRefs(ctx *gin.Context, r *TaskGroup) (err error) { + client := h.Client(ctx) + if r.Addon != "" { + addon := &crd.Addon{} + name := r.Addon + err = client.Get( + context.TODO(), + k8sclient.ObjectKey{ + Name: name, + Namespace: Settings.Hub.Namespace, + }, + addon) + if err != nil { + if k8serr.IsNotFound(err) { + err = &BadRequestError{ + Reason: "Addon: " + name + " not found", + } + } + return + } + } + for _, name := range r.Extensions { + ext := &crd.Extension{} + err = client.Get( + context.TODO(), + k8sclient.ObjectKey{ + Name: r.Kind, + Namespace: Settings.Hub.Namespace, + }, + ext) + if err != nil { + if k8serr.IsNotFound(err) { + err = &BadRequestError{ + Reason: "Extension: " + name + " not found", + } + } + return + } + } + if r.Kind != "" { + kind := &crd.Task{} + name := r.Kind + err = client.Get( + context.TODO(), + k8sclient.ObjectKey{ + Name: name, + Namespace: Settings.Hub.Namespace, + }, + kind) + if err != nil { + if k8serr.IsNotFound(err) { + err = &BadRequestError{ + Reason: "Task: " + name + " not found", + } + } + return + } + if r.Priority == 0 { + r.Priority = kind.Spec.Priority + } + } + return +} + // TaskGroup REST resource. type TaskGroup struct { - Resource `yaml:",inline"` - Name string `json:"name"` - Addon string `json:"addon"` - Data interface{} `json:"data" swaggertype:"object" binding:"required"` - Bucket *Ref `json:"bucket,omitempty"` - State string `json:"state"` - Tasks []Task `json:"tasks"` + Resource `yaml:",inline"` + Name string `json:"name"` + Kind string `json:"kind,omitempty" yaml:",omitempty"` + Addon string `json:"addon,omitempty" yaml:",omitempty"` + Extensions []string `json:"extensions,omitempty" yaml:",omitempty"` + State string `json:"state"` + Priority int `json:"priority,omitempty" yaml:",omitempty"` + Policy TaskPolicy `json:"policy,omitempty" yaml:",omitempty"` + Data Map `json:"data" swaggertype:"object" binding:"required"` + Bucket *Ref `json:"bucket,omitempty"` + Tasks []Task `json:"tasks"` } // With updates the resource with the model. func (r *TaskGroup) With(m *model.TaskGroup) { r.Resource.With(&m.Model) r.Name = m.Name + r.Kind = m.Kind r.Addon = m.Addon + r.Extensions = m.Extensions r.State = m.State + r.Priority = m.Priority + r.Policy = TaskPolicy(m.Policy) + r.Data = m.Data r.Bucket = r.refPtr(m.BucketID, m.Bucket) r.Tasks = []Task{} - _ = json.Unmarshal(m.Data, &r.Data) switch m.State { case "", tasking.Created: - _ = json.Unmarshal(m.List, &r.Tasks) + for _, task := range m.List { + member := Task{} + member.With(&task) + r.Tasks = append( + r.Tasks, + member) + } default: for _, task := range m.Tasks { member := Task{} @@ -389,13 +510,19 @@ func (r *TaskGroup) With(m *model.TaskGroup) { // Model builds a model. func (r *TaskGroup) Model() (m *model.TaskGroup) { m = &model.TaskGroup{ - Name: r.Name, - Addon: r.Addon, - State: r.State, + Name: r.Name, + Kind: r.Kind, + Addon: r.Addon, + Extensions: r.Extensions, + State: r.State, + Priority: r.Priority, + Policy: model.TaskPolicy(r.Policy), + Data: r.Data, } m.ID = r.ID - m.Data, _ = json.Marshal(StrMap(r.Data)) - m.List, _ = json.Marshal(r.Tasks) + for _, task := range r.Tasks { + m.List = append(m.List, *task.Model()) + } if r.Bucket != nil { m.BucketID = &r.Bucket.ID } diff --git a/binding/addon.go b/binding/addon.go new file mode 100644 index 00000000..c83af8cc --- /dev/null +++ b/binding/addon.go @@ -0,0 +1,25 @@ +package binding + +import ( + "github.com/konveyor/tackle2-hub/api" +) + +// Addon API. +type Addon struct { + client *Client +} + +// Get an Addon by name. +func (h *Addon) Get(name string) (r *api.Addon, err error) { + r = &api.Addon{} + path := Path(api.AddonRoot).Inject(Params{api.Name: name}) + err = h.client.Get(path, r) + return +} + +// List Addons. +func (h *Addon) List() (list []api.Addon, err error) { + list = []api.Addon{} + err = h.client.Get(api.AddonsRoot, &list) + return +} diff --git a/binding/richclient.go b/binding/richclient.go index 118a55d6..da2c4073 100644 --- a/binding/richclient.go +++ b/binding/richclient.go @@ -20,7 +20,7 @@ func init() { // The RichClient provides API integration. type RichClient struct { - // Resources APIs. + Addon Addon Application Application Archetype Archetype Assessment Assessment @@ -44,8 +44,7 @@ type RichClient struct { Task Task Ticket Ticket Tracker Tracker - - // A REST client. + // REST client. Client *Client } @@ -58,6 +57,9 @@ func New(baseUrl string) (r *RichClient) { // // Build RichClient. r = &RichClient{ + Addon: Addon{ + client: client, + }, Application: Application{ client: client, }, diff --git a/cmd/main.go b/cmd/main.go index 86980f8e..076b0ae7 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -199,6 +199,7 @@ func main() { rtx := api.WithContext(ctx) rtx.DB = db rtx.Client = client + rtx.TaskManager = &taskManager }) for _, h := range api.All() { h.AddRoutes(router) diff --git a/generated/crd/tackle.konveyor.io_addons.yaml b/generated/crd/tackle.konveyor.io_addons.yaml index ad2824fa..20e44294 100644 --- a/generated/crd/tackle.konveyor.io_addons.yaml +++ b/generated/crd/tackle.konveyor.io_addons.yaml @@ -1,10 +1,9 @@ - --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.5.0 + controller-gen.kubebuilder.io/version: v0.10.0 creationTimestamp: null name: addons.tackle.konveyor.io spec: @@ -28,113 +27,1241 @@ spec: openAPIV3Schema: properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: description: AddonSpec defines the desired state of Addon properties: - image: - description: Addon fqin. - type: string - imagePullPolicy: - default: IfNotPresent - description: ImagePullPolicy an optional image pull policy. - enum: - - IfNotPresent - - Always - - Never - type: string - resources: - description: Resource requirements. + container: + description: Container details. properties: - limits: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period countdown + begins before the PreStop hook is executed. Regardless of + the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period (unless + delayed by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object type: object - requests: - additionalProperties: - anyOf: - - type: integer - - type: string - pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ - x-kubernetes-int-or-string: true - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is + defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer type: object - type: object - required: - - image - type: object - status: - description: AddonStatus defines the observed state of Addon - properties: - conditions: - description: Conditions. - properties: - conditions: - description: List of conditions. + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying + a port here DOES NOT prevent that port from being exposed. Any + port which is listening on the default "0.0.0.0" address inside + a container will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the data. For more + information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. items: - description: Condition + description: ContainerPort represents a network port in a single + container. properties: - category: - description: The condition category. + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. type: string - durable: - description: The condition is durable - never un-staged. - type: boolean - items: - description: A list of items referenced in the `Message`. - items: - type: string - type: array - lastTransitionTime: - description: When the last status transition occurred. - format: date-time + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is + defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More info: + https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. Note that this field cannot be set when + spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. Note that this field cannot be set when spec.os.name + is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. Note + that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is + defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. type: string - message: - description: The human readable description of the condition. + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. type: string - reason: - description: The reason for the condition or transition. + name: + description: This must match the Name of a Volume. type: string - status: - description: The condition status [true,false]. + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). type: string - type: - description: The condition type. + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. type: string required: - - category - - lastTransitionTime - - status - - type + - mountPath + - name type: object type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name type: object + metadata: + description: Metadata details. + type: object + x-kubernetes-preserve-unknown-fields: true + selector: + description: Selector + type: string + task: + description: Task (kind) compatibility. + type: string + required: + - container + type: object + status: + description: AddonStatus defines the observed state of Addon + properties: observedGeneration: description: The most recent generation observed by the controller. format: int64 type: integer - required: - - conditions type: object type: object served: true storage: true subresources: status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/generated/crd/tackle.konveyor.io_extensions.yaml b/generated/crd/tackle.konveyor.io_extensions.yaml new file mode 100644 index 00000000..fa5f746f --- /dev/null +++ b/generated/crd/tackle.konveyor.io_extensions.yaml @@ -0,0 +1,1268 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: extensions.tackle.konveyor.io +spec: + group: tackle.konveyor.io + names: + kind: Extension + listKind: ExtensionList + plural: extensions + singular: extension + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ExtensionSpec defines the desired state of Extension + properties: + addon: + description: Addon compatibility. + type: string + container: + description: Container details. + properties: + args: + description: 'Arguments to the entrypoint. The container image''s + CMD is used if this is not provided. Variable references $(VAR_NAME) + are expanded using the container''s environment. If a variable + cannot be resolved, the reference in the input string will be + unchanged. Double $$ are reduced to a single $, which allows + for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + produce the string literal "$(VAR_NAME)". Escaped references + will never be expanded, regardless of whether the variable exists + or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The + container image''s ENTRYPOINT is used if this is not provided. + Variable references $(VAR_NAME) are expanded using the container''s + environment. If a variable cannot be resolved, the reference + in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: + i.e. "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether + the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. + Cannot be updated. + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must be a + C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in + the container and any service environment variables. If + a variable cannot be resolved, the reference in the input + string will be unchanged. Double $$ are reduced to a single + $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless + of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, `metadata.labels['''']`, + `metadata.annotations['''']`, spec.nodeName, + spec.serviceAccountName, status.hostIP, status.podIP, + status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the + specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the + exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables + in the container. The keys defined within a source must be a + C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take + precedence. Values defined by an Env with a duplicate key will + take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of + ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + description: 'Container image name. More info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher level config management + to default or override container images in workload controllers + like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. + Defaults to Always if :latest tag is specified, or IfNotPresent + otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in + response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container + is created. If the handler fails, the container is terminated + and restarted according to its restart policy. Other management + of the container blocks until the hook completes. More info: + https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container + is terminated due to an API request or management event + such as liveness/startup probe failure, preemption, resource + contention, etc. The handler is not called if the container + crashes or exits. The Pod''s termination grace period countdown + begins before the PreStop hook is executed. Regardless of + the outcome of the handler, the container will eventually + terminate within the Pod''s termination grace period (unless + delayed by finalizers). Other management of the container + blocks until the hook completes or until the termination + grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute + inside the container, the working directory for + the command is root ('/') in the container's filesystem. + The command is simply exec'd, it is not run inside + a shell, so traditional shell instructions ('|', + etc) won't work. To use a shell, you need to explicitly + call out to that shell. Exit status of 0 is treated + as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to + the pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. + HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header + to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: Deprecated. TCPSocket is NOT supported as + a LifecycleHandler and kept for the backward compatibility. + There are no validation of this field and lifecycle + hooks will fail in runtime when tcp handler is specified. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access + on the container. Number must be in the range 1 + to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container + will be restarted if the probe fails. Cannot be updated. More + info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is + defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each + container in a pod must have a unique name (DNS_LABEL). Cannot + be updated. + type: string + ports: + description: List of ports to expose from the container. Not specifying + a port here DOES NOT prevent that port from being exposed. Any + port which is listening on the default "0.0.0.0" address inside + a container will be accessible from the network. Modifying this + array with strategic merge patch may corrupt the data. For more + information See https://github.com/kubernetes/kubernetes/issues/108255. + Cannot be updated. + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, + this must be a valid port number, 0 < x < 65536. If HostNetwork + is specified, this must match ContainerPort. Most containers + do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME + and unique within the pod. Each named port in a pod must + have a unique name. Name for the port that can be referred + to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container + will be removed from service endpoints if the probe fails. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is + defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + securityContext: + description: 'SecurityContext defines the security options the + container should be run with. If set, the fields of SecurityContext + override the equivalent fields of PodSecurityContext. More info: + https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a + process can gain more privileges than its parent process. + This bool directly controls if the no_new_privs flag will + be set on the container process. AllowPrivilegeEscalation + is true always when the container is: 1) run as Privileged + 2) has CAP_SYS_ADMIN Note that this field cannot be set + when spec.os.name is windows.' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. + Defaults to the default set of capabilities granted by the + container runtime. Note that this field cannot be set when + spec.os.name is windows. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities + type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in + privileged containers are essentially equivalent to root + on the host. Defaults to false. Note that this field cannot + be set when spec.os.name is windows. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use + for the containers. The default is DefaultProcMount which + uses the container runtime defaults for readonly paths and + masked paths. This requires the ProcMountType feature flag + to be enabled. Note that this field cannot be set when spec.os.name + is windows. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. + Default is false. Note that this field cannot be set when + spec.os.name is windows. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container + process. Uses runtime default if unset. May also be set + in PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. If true, the Kubelet will validate the image at runtime + to ensure that it does not run as UID 0 (root) and fail + to start the container if it does. If unset or false, no + such validation will be performed. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container + process. Defaults to user specified in image metadata if + unspecified. May also be set in PodSecurityContext. If + set in both SecurityContext and PodSecurityContext, the + value specified in SecurityContext takes precedence. Note + that this field cannot be set when spec.os.name is windows. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. + If unspecified, the container runtime will allocate a random + SELinux context for each container. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. Note that this field cannot be set when + spec.os.name is windows. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: The seccomp options to use by this container. + If seccomp options are provided at both the pod & container + level, the container options override the pod options. Note + that this field cannot be set when spec.os.name is windows. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. The profile must + be preconfigured on the node to work. Must be a descending + path, relative to the kubelet's configured seccomp profile + location. Must only be set if type is "Localhost". + type: string + type: + description: "type indicates which kind of seccomp profile + will be applied. Valid options are: \n Localhost - a + profile defined in a file on the node should be used. + RuntimeDefault - the container runtime default profile + should be used. Unconfined - no profile should be applied." + type: string + required: + - type + type: object + windowsOptions: + description: The Windows specific settings applied to all + containers. If unspecified, the options from the PodSecurityContext + will be used. If set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext takes precedence. + Note that this field cannot be set when spec.os.name is + linux. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA credential spec named + by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. This field is + alpha-level and will only be honored by components that + enable the WindowsHostProcessContainers feature flag. + Setting this field without the feature flag will result + in errors when validating the Pod. All of a Pod's containers + must have the same effective HostProcess value (it is + not allowed to have a mix of HostProcess containers + and non-HostProcess containers). In addition, if HostProcess + is true then HostNetwork must also be set to true. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. Defaults to the user specified + in image metadata if unspecified. May also be set in + PodSecurityContext. If set in both SecurityContext and + PodSecurityContext, the value specified in SecurityContext + takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully + initialized. If specified, no other probes are executed until + this completes successfully. If this probe fails, the Pod will + be restarted, just as if the livenessProbe failed. This can + be used to provide different probe parameters at the beginning + of a Pod''s lifecycle, when it might take a long time to load + data or warm a cache, than during steady-state operation. This + cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside + the container, the working directory for the command is + root ('/') in the container's filesystem. The command + is simply exec'd, it is not run inside a shell, so traditional + shell instructions ('|', etc) won't work. To use a shell, + you need to explicitly call out to that shell. Exit + status of 0 is treated as live/healthy and non-zero + is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to + be considered failed after having succeeded. Defaults to + 3. Minimum value is 1. + format: int32 + type: integer + grpc: + description: GRPC specifies an action involving a GRPC port. + This is a beta field and requires enabling GRPCContainerProbe + feature gate. + properties: + port: + description: Port number of the gRPC service. Number must + be in the range 1 to 65535. + format: int32 + type: integer + service: + description: "Service is the name of the service to place + in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + \n If this is not specified, the default behavior is + defined by gRPC." + type: string + required: + - port + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the + pod IP. You probably want to set "Host" in httpHeaders + instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP + allows repeated headers. + items: + description: HTTPHeader describes a custom header to + be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. + Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started + before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. + Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to + be considered successful after having failed. Defaults to + 1. Must be 1 for liveness and startup. Minimum value is + 1. + format: int32 + type: integer + tcpSocket: + description: TCPSocket specifies an action involving a TCP + port. + properties: + host: + description: 'Optional: Host name to connect to, defaults + to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the + container. Number must be in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + description: Optional duration in seconds the pod needs to + terminate gracefully upon probe failure. The grace period + is the duration in seconds after the processes running in + the pod are sent a termination signal and the time when + the processes are forcibly halted with a kill signal. Set + this value longer than the expected cleanup time for your + process. If this value is nil, the pod's terminationGracePeriodSeconds + will be used. Otherwise, this value overrides the value + provided by the pod spec. Value must be non-negative integer. + The value zero indicates stop immediately via the kill signal + (no opportunity to shut down). This is a beta field and + requires enabling ProbeTerminationGracePeriod feature gate. + Minimum value is 1. spec.terminationGracePeriodSeconds is + used if unset. + format: int64 + type: integer + timeoutSeconds: + description: 'Number of seconds after which the probe times + out. Defaults to 1 second. Minimum value is 1. More info: + https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for + stdin in the container runtime. If this is not set, reads from + stdin in the container will always result in EOF. Default is + false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin + channel after it has been opened by a single attach. When stdin + is true the stdin stream will remain open across multiple attach + sessions. If stdinOnce is set to true, stdin is opened on container + start, is empty until the first client attaches to stdin, and + then remains open and accepts data until the client disconnects, + at which time stdin is closed and remains closed until the container + is restarted. If this flag is false, a container processes that + reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s + termination message will be written is mounted into the container''s + filesystem. Message written is intended to be brief final status, + such as an assertion failure message. Will be truncated by the + node if greater than 4096 bytes. The total message length across + all containers will be limited to 12kb. Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. + File will use the contents of terminationMessagePath to populate + the container status message on both success and failure. FallbackToLogsOnError + will use the last chunk of container log output if the termination + message file is empty and the container exited with an error. + The log output is limited to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for + itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be + used by the container. + items: + description: volumeDevice describes a mapping of a raw block + device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container + that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim + in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. + Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within + a container. + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are + propagated from the host to container and the other way + around. When not set, MountPropagationNone is used. This + field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, + the container runtime's default will be used, which might be + configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + metadata: + description: Metadata details. + type: object + x-kubernetes-preserve-unknown-fields: true + selector: + description: Selector + type: string + required: + - addon + - container + type: object + status: + description: ExtensionStatus defines the observed state of Extension + properties: + observedGeneration: + description: The most recent generation observed by the controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/generated/crd/tackle.konveyor.io_tasks.yaml b/generated/crd/tackle.konveyor.io_tasks.yaml new file mode 100644 index 00000000..e8dcb2cf --- /dev/null +++ b/generated/crd/tackle.konveyor.io_tasks.yaml @@ -0,0 +1,62 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.10.0 + creationTimestamp: null + name: tasks.tackle.konveyor.io +spec: + group: tackle.konveyor.io + names: + kind: Task + listKind: TaskList + plural: tasks + singular: task + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TaskSpec defines the desired state of Task + properties: + data: + description: Data object passed to the addon.. + type: object + x-kubernetes-preserve-unknown-fields: true + dependencies: + description: Dependencies + items: + type: string + type: array + priority: + description: Priority + type: integer + type: object + status: + description: TaskStatus defines the observed state of Task + properties: + observedGeneration: + description: The most recent generation observed by the controller. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/go.mod b/go.mod index 721012ca..75022634 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.20 require ( github.com/Nerzal/gocloak/v10 v10.0.1 + github.com/PaesslerAG/gval v1.2.2 github.com/andygrunwald/go-jira v1.16.0 github.com/gin-gonic/gin v1.9.1 github.com/go-logr/logr v1.2.4 @@ -77,6 +78,7 @@ require ( github.com/prometheus/procfs v0.9.0 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/segmentio/ksuid v1.0.4 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/trivago/tgo v1.0.7 // indirect diff --git a/go.sum b/go.sum index a0cda824..88f63ca3 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,10 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Nerzal/gocloak/v10 v10.0.1 h1:W9pyD4I6w57ceNmjJoS4mXezBAxpupj11ytxper2KA8= github.com/Nerzal/gocloak/v10 v10.0.1/go.mod h1:18jh1lwSHEJeSvmdH+08JyJU/XjPdNYLWEZ7paDB2k8= +github.com/PaesslerAG/gval v1.2.2 h1:Y7iBzhgE09IGTt5QgGQ2IdaYYYOU134YGHBThD+wm9E= +github.com/PaesslerAG/gval v1.2.2/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac= +github.com/PaesslerAG/jsonpath v0.1.0 h1:gADYeifvlqK3R3i2cR5B4DGgxLXIPb3TRTH1mGi0jPI= +github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/andygrunwald/go-jira v1.16.0 h1:PU7C7Fkk5L96JvPc6vDVIrd99vdPnYudHu4ju2c2ikQ= @@ -193,6 +197,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= diff --git a/hack/add/application.sh b/hack/add/application.sh index fd688b55..9a9f17df 100755 --- a/hack/add/application.sh +++ b/hack/add/application.sh @@ -20,6 +20,7 @@ identities: - id: 2 tags: - id: 1 + - id: 16 ' curl -X POST ${host}/applications \ diff --git a/hack/dashboard.sh b/hack/dashboard.sh new file mode 100755 index 00000000..7c7aaeda --- /dev/null +++ b/hack/dashboard.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +pid=$$ +self=$(basename $0) +tmp=/tmp/${self}-${pid} + +usage() { + echo "Usage: ${self}" + echo " -u konveyor URL" + echo " -h help" +} + +while getopts "u:h" arg; do + case $arg in + h) + usage + exit 1 + ;; + u) + host=$OPTARG + ;; + esac +done + +if [ -z "${host}" ] +then + echo "-u required." + usage + exit 0 +fi + +code=$(curl -kSs -o ${tmp} -w "%{http_code}" ${host}/tasks) +if [ ! $? -eq 0 ] +then + exit $? +fi +case ${code} in + 200) + echo ${tmp} + echo "ID | Kind | State | Pty | Application" + echo "--- | ----------|---------------|-----|---------------" + readarray report <<< $(jq -c '.[]|"\(.id) \(.kind) \(.state) \(.priority) \(.application.id) \(.application.name)"' ${tmp}) + for r in "${report[@]}" + do + r=${r//\"/} + t=($r) + id=${t[0]} + kind=${t[1]} + state=${t[2]} + pty=${t[3]} + appId=${t[4]} + appName=${t[5]} + if [ "${pty}" = "null" ] + then + pty=0 + fi + printf "%-6s%-12s%-16s%-4s%4s|%-10s\n" ${id} ${kind} ${state} ${pty} ${appId} ${appName} + done + ;; + *) + echo "FAILED: ${code}." + cat ${tmp} + exit 1 +esac + +rm -f ${tmp} diff --git a/hack/update/task.sh b/hack/update/task.sh index 4bc93b10..4a74af4e 100755 --- a/hack/update/task.sh +++ b/hack/update/task.sh @@ -12,5 +12,6 @@ curl -X PUT ${host}/tasks/${id}/report -d \ "status": "Running", "total": 10, "completed": 9, - "activity": "reading /files/application/dog.java." + "activity": ["reading /files/application/dog.java."], + "attached": [{"id":4,"name":"Test","activity":9}] }' diff --git a/k8s/api/tackle/v1alpha1/addon.go b/k8s/api/tackle/v1alpha1/addon.go index b01f370e..deaf695e 100644 --- a/k8s/api/tackle/v1alpha1/addon.go +++ b/k8s/api/tackle/v1alpha1/addon.go @@ -19,18 +19,19 @@ package v1alpha1 import ( core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // AddonSpec defines the desired state of Addon type AddonSpec struct { - // Addon fqin. - Image string `json:"image"` - // ImagePullPolicy an optional image pull policy. - // +kubebuilder:default=IfNotPresent - // +kubebuilder:validation:Enum=IfNotPresent;Always;Never - ImagePullPolicy core.PullPolicy `json:"imagePullPolicy,omitempty"` - // Resource requirements. - Resources core.ResourceRequirements `json:"resources,omitempty"` + // Task (kind) compatibility. + Task string `json:"task,omitempty"` + // Selector + Selector string `json:"selector,omitempty"` + // Container details. + Container core.Container `json:"container"` + // Metadata details. + Metadata runtime.RawExtension `json:"metadata,omitempty"` } // AddonStatus defines the observed state of Addon diff --git a/k8s/api/tackle/v1alpha1/extension.go b/k8s/api/tackle/v1alpha1/extension.go new file mode 100644 index 00000000..4076a66e --- /dev/null +++ b/k8s/api/tackle/v1alpha1/extension.go @@ -0,0 +1,66 @@ +/* +Copyright 2019 Red Hat Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ExtensionSpec defines the desired state of Extension +type ExtensionSpec struct { + // Addon compatibility. + Addon string `json:"addon"` + // Container details. + Container core.Container `json:"container"` + // Selector + Selector string `json:"selector,omitempty"` + // Metadata details. + Metadata runtime.RawExtension `json:"metadata,omitempty"` +} + +// ExtensionStatus defines the observed state of Extension +type ExtensionStatus struct { + // The most recent generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="READY",type=string,JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +type Extension struct { + meta.TypeMeta `json:",inline"` + meta.ObjectMeta `json:"metadata,omitempty"` + Spec ExtensionSpec `json:"spec,omitempty"` + Status ExtensionStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ExtensionList struct { + meta.TypeMeta `json:",inline"` + meta.ListMeta `json:"metadata,omitempty"` + Items []Extension `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Extension{}, &ExtensionList{}) +} diff --git a/k8s/api/tackle/v1alpha1/task.go b/k8s/api/tackle/v1alpha1/task.go new file mode 100644 index 00000000..9ab65848 --- /dev/null +++ b/k8s/api/tackle/v1alpha1/task.go @@ -0,0 +1,73 @@ +/* +Copyright 2019 Red Hat Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// TaskSpec defines the desired state of Task +type TaskSpec struct { + // Priority + Priority int `json:"priority,omitempty"` + // Dependencies + Dependencies []string `json:"dependencies,omitempty"` + // Data object passed to the addon.. + Data runtime.RawExtension `json:"data,omitempty"` +} + +// TaskStatus defines the observed state of Task +type TaskStatus struct { + // The most recent generation observed by the controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +type Task struct { + meta.TypeMeta `json:",inline"` + meta.ObjectMeta `json:"metadata,omitempty"` + Spec TaskSpec `json:"spec,omitempty"` + Status TaskStatus `json:"status,omitempty"` +} + +// HasDep return true if the task has the dependency. +func (r *Task) HasDep(name string) (found bool) { + for i := range r.Spec.Dependencies { + n := r.Spec.Dependencies[i] + if n == name { + found = true + break + } + } + return +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TaskList struct { + meta.TypeMeta `json:",inline"` + meta.ListMeta `json:"metadata,omitempty"` + Items []Task `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Task{}, &TaskList{}) +} diff --git a/k8s/api/tackle/v1alpha1/zz_generated.deepcopy.go b/k8s/api/tackle/v1alpha1/zz_generated.deepcopy.go index 821a9ba1..2ab85860 100644 --- a/k8s/api/tackle/v1alpha1/zz_generated.deepcopy.go +++ b/k8s/api/tackle/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ limitations under the License. package v1alpha1 import ( - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -87,7 +87,8 @@ func (in *AddonList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddonSpec) DeepCopyInto(out *AddonSpec) { *out = *in - in.Resources.DeepCopyInto(&out.Resources) + in.Container.DeepCopyInto(&out.Container) + in.Metadata.DeepCopyInto(&out.Metadata) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddonSpec. @@ -115,6 +116,97 @@ func (in *AddonStatus) DeepCopy() *AddonStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extension) DeepCopyInto(out *Extension) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extension. +func (in *Extension) DeepCopy() *Extension { + if in == nil { + return nil + } + out := new(Extension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Extension) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionList) DeepCopyInto(out *ExtensionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Extension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionList. +func (in *ExtensionList) DeepCopy() *ExtensionList { + if in == nil { + return nil + } + out := new(ExtensionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExtensionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionSpec) DeepCopyInto(out *ExtensionSpec) { + *out = *in + in.Container.DeepCopyInto(&out.Container) + in.Metadata.DeepCopyInto(&out.Metadata) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionSpec. +func (in *ExtensionSpec) DeepCopy() *ExtensionSpec { + if in == nil { + return nil + } + out := new(ExtensionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtensionStatus) DeepCopyInto(out *ExtensionStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionStatus. +func (in *ExtensionStatus) DeepCopy() *ExtensionStatus { + if in == nil { + return nil + } + out := new(ExtensionStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Tackle) DeepCopyInto(out *Tackle) { *out = *in @@ -171,3 +263,98 @@ func (in *TackleList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Task) DeepCopyInto(out *Task) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task. +func (in *Task) DeepCopy() *Task { + if in == nil { + return nil + } + out := new(Task) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Task) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskList) DeepCopyInto(out *TaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Task, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList. +func (in *TaskList) DeepCopy() *TaskList { + if in == nil { + return nil + } + out := new(TaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { + *out = *in + if in.Dependencies != nil { + in, out := &in.Dependencies, &out.Dependencies + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Data.DeepCopyInto(&out.Data) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. +func (in *TaskSpec) DeepCopy() *TaskSpec { + if in == nil { + return nil + } + out := new(TaskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskStatus) DeepCopyInto(out *TaskStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskStatus. +func (in *TaskStatus) DeepCopy() *TaskStatus { + if in == nil { + return nil + } + out := new(TaskStatus) + in.DeepCopyInto(out) + return out +} diff --git a/k8s/client.go b/k8s/client.go index 3051d86c..ca5f7896 100644 --- a/k8s/client.go +++ b/k8s/client.go @@ -7,6 +7,7 @@ import ( "github.com/konveyor/tackle2-hub/settings" "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + k8s "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" @@ -26,9 +27,15 @@ func NewClient() (newClient client.Client, err error) { client.Options{ Scheme: scheme.Scheme, }) - if err != nil { - err = liberr.Wrap(err) - } + err = liberr.Wrap(err) + return +} + +// NewClientSet builds new k8s client. +func NewClientSet() (newClient *k8s.Clientset, err error) { + cfg, _ := config.GetConfig() + newClient, err = k8s.NewForConfig(cfg) + err = liberr.Wrap(err) return } diff --git a/migration/pkg.go b/migration/pkg.go index 1bb72b55..0887c5f2 100644 --- a/migration/pkg.go +++ b/migration/pkg.go @@ -5,6 +5,7 @@ import ( v10 "github.com/konveyor/tackle2-hub/migration/v10" v11 "github.com/konveyor/tackle2-hub/migration/v11" v12 "github.com/konveyor/tackle2-hub/migration/v12" + v13 "github.com/konveyor/tackle2-hub/migration/v13" v2 "github.com/konveyor/tackle2-hub/migration/v2" v3 "github.com/konveyor/tackle2-hub/migration/v3" v4 "github.com/konveyor/tackle2-hub/migration/v4" @@ -52,5 +53,6 @@ func All() []Migration { v10.Migration{}, v11.Migration{}, v12.Migration{}, + v13.Migration{}, } } diff --git a/migration/v13/migrate.go b/migration/v13/migrate.go new file mode 100644 index 00000000..b4c0b4f4 --- /dev/null +++ b/migration/v13/migrate.go @@ -0,0 +1,29 @@ +package v13 + +import ( + liberr "github.com/jortel/go-utils/error" + "github.com/jortel/go-utils/logr" + "github.com/konveyor/tackle2-hub/migration/v13/model" + "gorm.io/gorm" +) + +var log = logr.WithName("migration|v13") + +type Migration struct{} + +func (r Migration) Apply(db *gorm.DB) (err error) { + err = db.Migrator().DropColumn(&model.Task{}, "Policy") + if err != nil { + err = liberr.Wrap(err) + return + } + err = db.AutoMigrate(r.Models()...) + if err != nil { + return + } + return +} + +func (r Migration) Models() []interface{} { + return model.All() +} diff --git a/migration/v13/model/analysis.go b/migration/v13/model/analysis.go new file mode 100644 index 00000000..6869a5bc --- /dev/null +++ b/migration/v13/model/analysis.go @@ -0,0 +1,156 @@ +package model + +import "gorm.io/gorm" + +// Analysis report. +type Analysis struct { + Model + Effort int + Archived bool `json:"archived"` + Summary JSON `gorm:"type:json"` + Issues []Issue `gorm:"constraint:OnDelete:CASCADE"` + Dependencies []TechDependency `gorm:"constraint:OnDelete:CASCADE"` + ApplicationID uint `gorm:"index;not null"` + Application *Application +} + +// TechDependency report dependency. +type TechDependency struct { + Model + Provider string `gorm:"uniqueIndex:depA"` + Name string `gorm:"uniqueIndex:depA"` + Version string `gorm:"uniqueIndex:depA"` + SHA string `gorm:"uniqueIndex:depA"` + Indirect bool + Labels JSON `gorm:"type:json"` + AnalysisID uint `gorm:"index;uniqueIndex:depA;not null"` + Analysis *Analysis +} + +// Issue report issue (violation). +type Issue struct { + Model + RuleSet string `gorm:"uniqueIndex:issueA;not null"` + Rule string `gorm:"uniqueIndex:issueA;not null"` + Name string `gorm:"index"` + Description string + Category string `gorm:"index;not null"` + Incidents []Incident `gorm:"foreignKey:IssueID;constraint:OnDelete:CASCADE"` + Links JSON `gorm:"type:json"` + Facts JSON `gorm:"type:json"` + Labels JSON `gorm:"type:json"` + Effort int `gorm:"index;not null"` + AnalysisID uint `gorm:"index;uniqueIndex:issueA;not null"` + Analysis *Analysis +} + +// Incident report an issue incident. +type Incident struct { + Model + File string `gorm:"index;not null"` + Line int + Message string + CodeSnip string + Facts JSON `gorm:"type:json"` + IssueID uint `gorm:"index;not null"` + Issue *Issue +} + +// Link URL link. +type Link struct { + URL string `json:"url"` + Title string `json:"title,omitempty"` +} + +// ArchivedIssue resource created when issues are archived. +type ArchivedIssue struct { + RuleSet string `json:"ruleSet"` + Rule string `json:"rule"` + Name string `json:"name,omitempty" yaml:",omitempty"` + Description string `json:"description,omitempty" yaml:",omitempty"` + Category string `json:"category"` + Effort int `json:"effort"` + Incidents int `json:"incidents"` +} + +// RuleSet - Analysis ruleset. +type RuleSet struct { + Model + UUID *string `gorm:"uniqueIndex"` + Kind string + Name string `gorm:"uniqueIndex;not null"` + Description string + Repository JSON `gorm:"type:json"` + IdentityID *uint `gorm:"index"` + Identity *Identity + Rules []Rule `gorm:"constraint:OnDelete:CASCADE"` + DependsOn []RuleSet `gorm:"many2many:RuleSetDependencies;constraint:OnDelete:CASCADE"` +} + +func (r *RuleSet) Builtin() bool { + return r.UUID != nil +} + +// BeforeUpdate hook to avoid cyclic dependencies. +func (r *RuleSet) BeforeUpdate(db *gorm.DB) (err error) { + seen := make(map[uint]bool) + var nextDeps []RuleSet + var nextRuleSetIDs []uint + for _, dep := range r.DependsOn { + nextRuleSetIDs = append(nextRuleSetIDs, dep.ID) + } + for len(nextRuleSetIDs) != 0 { + result := db.Preload("DependsOn").Where("ID IN ?", nextRuleSetIDs).Find(&nextDeps) + if result.Error != nil { + err = result.Error + return + } + nextRuleSetIDs = nextRuleSetIDs[:0] + for _, nextDep := range nextDeps { + for _, dep := range nextDep.DependsOn { + if seen[dep.ID] { + continue + } + if dep.ID == r.ID { + err = DependencyCyclicError{} + return + } + seen[dep.ID] = true + nextRuleSetIDs = append(nextRuleSetIDs, dep.ID) + } + } + } + + return +} + +// Rule - Analysis rule. +type Rule struct { + Model + Name string + Description string + Labels JSON `gorm:"type:json"` + RuleSetID uint `gorm:"uniqueIndex:RuleA;not null"` + RuleSet *RuleSet + FileID *uint `gorm:"uniqueIndex:RuleA" ref:"file"` + File *File +} + +// Target - analysis rule selector. +type Target struct { + Model + UUID *string `gorm:"uniqueIndex"` + Name string `gorm:"uniqueIndex;not null"` + Description string + Provider string + Choice bool + Labels JSON `gorm:"type:json"` + ImageID uint `gorm:"index" ref:"file"` + Image *File + RuleSetID *uint `gorm:"index"` + RuleSet *RuleSet +} + +func (r *Target) Builtin() bool { + return r.UUID != nil +} diff --git a/migration/v13/model/application.go b/migration/v13/model/application.go new file mode 100644 index 00000000..1bd92875 --- /dev/null +++ b/migration/v13/model/application.go @@ -0,0 +1,298 @@ +package model + +import ( + "fmt" + "sync" + "time" + + "gorm.io/gorm" +) + +type Application struct { + Model + BucketOwner + Name string `gorm:"index;unique;not null"` + Description string + Review *Review `gorm:"constraint:OnDelete:CASCADE"` + Repository JSON `gorm:"type:json"` + Binary string + Facts []Fact `gorm:"constraint:OnDelete:CASCADE"` + Comments string + Tasks []Task `gorm:"constraint:OnDelete:CASCADE"` + Tags []Tag `gorm:"many2many:ApplicationTags"` + Identities []Identity `gorm:"many2many:ApplicationIdentity;constraint:OnDelete:CASCADE"` + BusinessServiceID *uint `gorm:"index"` + BusinessService *BusinessService + OwnerID *uint `gorm:"index"` + Owner *Stakeholder `gorm:"foreignKey:OwnerID"` + Contributors []Stakeholder `gorm:"many2many:ApplicationContributors;constraint:OnDelete:CASCADE"` + Analyses []Analysis `gorm:"constraint:OnDelete:CASCADE"` + MigrationWaveID *uint `gorm:"index"` + MigrationWave *MigrationWave + Ticket *Ticket `gorm:"constraint:OnDelete:CASCADE"` + Assessments []Assessment `gorm:"constraint:OnDelete:CASCADE"` +} + +type Fact struct { + ApplicationID uint `gorm:"<-:create;primaryKey"` + Key string `gorm:"<-:create;primaryKey"` + Source string `gorm:"<-:create;primaryKey;not null"` + Value JSON `gorm:"type:json;not null"` + Application *Application +} + +// ApplicationTag represents a row in the join table for the +// many-to-many relationship between Applications and Tags. +type ApplicationTag struct { + ApplicationID uint `gorm:"primaryKey"` + TagID uint `gorm:"primaryKey"` + Source string `gorm:"primaryKey;not null"` + Application Application `gorm:"constraint:OnDelete:CASCADE"` + Tag Tag `gorm:"constraint:OnDelete:CASCADE"` +} + +// TableName must return "ApplicationTags" to ensure compatibility +// with the autogenerated join table name. +func (ApplicationTag) TableName() string { + return "ApplicationTags" +} + +// depMutex ensures Dependency.Create() is not executed concurrently. +var depMutex sync.Mutex + +type Dependency struct { + Model + ToID uint `gorm:"index"` + To *Application `gorm:"foreignKey:ToID;constraint:OnDelete:CASCADE"` + FromID uint `gorm:"index"` + From *Application `gorm:"foreignKey:FromID;constraint:OnDelete:CASCADE"` +} + +// Create a dependency synchronized using a mutex. +func (r *Dependency) Create(db *gorm.DB) (err error) { + depMutex.Lock() + defer depMutex.Unlock() + err = db.Create(r).Error + return +} + +// Validation Hook to avoid cyclic dependencies. +func (r *Dependency) BeforeCreate(db *gorm.DB) (err error) { + var nextDeps []*Dependency + var nextAppsIDs []uint + nextAppsIDs = append(nextAppsIDs, r.FromID) + for len(nextAppsIDs) != 0 { + db.Where("ToID IN ?", nextAppsIDs).Find(&nextDeps) + nextAppsIDs = nextAppsIDs[:0] // empty array, but keep capacity + for _, nextDep := range nextDeps { + if nextDep.FromID == r.ToID { + err = DependencyCyclicError{} + return + } + nextAppsIDs = append(nextAppsIDs, nextDep.FromID) + } + } + + return +} + +// Custom error type to allow API recognize Cyclic Dependency error and assign proper status code. +type DependencyCyclicError struct{} + +func (err DependencyCyclicError) Error() string { + return "cyclic dependencies are not allowed" +} + +type BusinessService struct { + Model + Name string `gorm:"index;unique;not null"` + Description string + Applications []Application `gorm:"constraint:OnDelete:SET NULL"` + StakeholderID *uint `gorm:"index"` + Stakeholder *Stakeholder +} + +type JobFunction struct { + Model + UUID *string `gorm:"uniqueIndex"` + Username string + Name string `gorm:"index;unique;not null"` + Stakeholders []Stakeholder `gorm:"constraint:OnDelete:SET NULL"` +} + +type Stakeholder struct { + Model + Name string `gorm:"not null;"` + Email string `gorm:"index;unique;not null"` + Groups []StakeholderGroup `gorm:"many2many:StakeholderGroupStakeholder;constraint:OnDelete:CASCADE"` + BusinessServices []BusinessService `gorm:"constraint:OnDelete:SET NULL"` + JobFunctionID *uint `gorm:"index"` + JobFunction *JobFunction + Owns []Application `gorm:"foreignKey:OwnerID;constraint:OnDelete:SET NULL"` + Contributes []Application `gorm:"many2many:ApplicationContributors;constraint:OnDelete:CASCADE"` + MigrationWaves []MigrationWave `gorm:"many2many:MigrationWaveStakeholders;constraint:OnDelete:CASCADE"` + Assessments []Assessment `gorm:"many2many:AssessmentStakeholders;constraint:OnDelete:CASCADE"` + Archetypes []Archetype `gorm:"many2many:ArchetypeStakeholders;constraint:OnDelete:CASCADE"` +} + +type StakeholderGroup struct { + Model + Name string `gorm:"index;unique;not null"` + Username string + Description string + Stakeholders []Stakeholder `gorm:"many2many:StakeholderGroupStakeholder;constraint:OnDelete:CASCADE"` + MigrationWaves []MigrationWave `gorm:"many2many:MigrationWaveStakeholderGroups;constraint:OnDelete:CASCADE"` + Assessments []Assessment `gorm:"many2many:AssessmentStakeholderGroups;constraint:OnDelete:CASCADE"` + Archetypes []Archetype `gorm:"many2many:ArchetypeStakeholderGroups;constraint:OnDelete:CASCADE"` +} + +type MigrationWave struct { + Model + Name string `gorm:"uniqueIndex:MigrationWaveA"` + StartDate time.Time `gorm:"uniqueIndex:MigrationWaveA"` + EndDate time.Time `gorm:"uniqueIndex:MigrationWaveA"` + Applications []Application `gorm:"constraint:OnDelete:SET NULL"` + Stakeholders []Stakeholder `gorm:"many2many:MigrationWaveStakeholders;constraint:OnDelete:CASCADE"` + StakeholderGroups []StakeholderGroup `gorm:"many2many:MigrationWaveStakeholderGroups;constraint:OnDelete:CASCADE"` +} + +type Archetype struct { + Model + Name string + Description string + Comments string + Review *Review `gorm:"constraint:OnDelete:CASCADE"` + Assessments []Assessment `gorm:"constraint:OnDelete:CASCADE"` + CriteriaTags []Tag `gorm:"many2many:ArchetypeCriteriaTags;constraint:OnDelete:CASCADE"` + Tags []Tag `gorm:"many2many:ArchetypeTags;constraint:OnDelete:CASCADE"` + Stakeholders []Stakeholder `gorm:"many2many:ArchetypeStakeholders;constraint:OnDelete:CASCADE"` + StakeholderGroups []StakeholderGroup `gorm:"many2many:ArchetypeStakeholderGroups;constraint:OnDelete:CASCADE"` +} + +type Tag struct { + Model + UUID *string `gorm:"uniqueIndex"` + Name string `gorm:"uniqueIndex:tagA;not null"` + Username string + CategoryID uint `gorm:"uniqueIndex:tagA;index;not null"` + Category TagCategory +} + +type TagCategory struct { + Model + UUID *string `gorm:"uniqueIndex"` + Name string `gorm:"index;unique;not null"` + Username string + Rank uint + Color string + Tags []Tag `gorm:"foreignKey:CategoryID;constraint:OnDelete:CASCADE"` +} + +type Ticket struct { + Model + // Kind of ticket in the external tracker. + Kind string `gorm:"not null"` + // Parent resource that this ticket should belong to in the tracker. (e.g. Jira project) + Parent string `gorm:"not null"` + // Custom fields to send to the tracker when creating the ticket + Fields JSON `gorm:"type:json"` + // Whether the last attempt to do something with the ticket reported an error + Error bool + // Error message, if any + Message string + // Whether the ticket was created in the external tracker + Created bool + // Reference id in external tracker + Reference string + // URL to ticket in external tracker + Link string + // Status of ticket in external tracker + Status string + LastUpdated time.Time + Application *Application + ApplicationID uint `gorm:"uniqueIndex:ticketA;not null"` + Tracker *Tracker + TrackerID uint `gorm:"uniqueIndex:ticketA;not null"` +} + +type Tracker struct { + Model + Name string `gorm:"index;unique;not null"` + URL string + Kind string + Identity *Identity + IdentityID uint + Connected bool + LastUpdated time.Time + Message string + Insecure bool + Tickets []Ticket +} + +type Import struct { + Model + Filename string + ApplicationName string + BusinessService string + Comments string + Dependency string + DependencyDirection string + Description string + ErrorMessage string + IsValid bool + RecordType1 string + ImportSummary ImportSummary + ImportSummaryID uint `gorm:"index"` + Processed bool + ImportTags []ImportTag `gorm:"constraint:OnDelete:CASCADE"` + BinaryGroup string + BinaryArtifact string + BinaryVersion string + BinaryPackaging string + RepositoryKind string + RepositoryURL string + RepositoryBranch string + RepositoryPath string + Owner string + Contributors string +} + +func (r *Import) AsMap() (m map[string]interface{}) { + m = make(map[string]interface{}) + m["filename"] = r.Filename + m["applicationName"] = r.ApplicationName + // "Application Name" is necessary in order for + // the UI to display the error report correctly. + m["Application Name"] = r.ApplicationName + m["businessService"] = r.BusinessService + m["comments"] = r.Comments + m["dependency"] = r.Dependency + m["dependencyDirection"] = r.DependencyDirection + m["description"] = r.Description + m["errorMessage"] = r.ErrorMessage + m["isValid"] = r.IsValid + m["processed"] = r.Processed + m["recordType1"] = r.RecordType1 + for i, tag := range r.ImportTags { + m[fmt.Sprintf("category%v", i+1)] = tag.Category + m[fmt.Sprintf("tag%v", i+1)] = tag.Name + } + return +} + +type ImportSummary struct { + Model + Content []byte + Filename string + ImportStatus string + Imports []Import `gorm:"constraint:OnDelete:CASCADE"` + CreateEntities bool +} + +type ImportTag struct { + Model + Name string + Category string + ImportID uint `gorm:"index"` + Import *Import +} diff --git a/migration/v13/model/assessment.go b/migration/v13/model/assessment.go new file mode 100644 index 00000000..3a734e86 --- /dev/null +++ b/migration/v13/model/assessment.go @@ -0,0 +1,46 @@ +package model + +type Questionnaire struct { + Model + UUID *string `gorm:"uniqueIndex"` + Name string `gorm:"unique"` + Description string + Required bool + Sections JSON `gorm:"type:json"` + Thresholds JSON `gorm:"type:json"` + RiskMessages JSON `gorm:"type:json"` + Assessments []Assessment `gorm:"constraint:OnDelete:CASCADE"` +} + +// Builtin returns true if this is a Konveyor-provided questionnaire. +func (r *Questionnaire) Builtin() bool { + return r.UUID != nil +} + +type Assessment struct { + Model + ApplicationID *uint `gorm:"uniqueIndex:AssessmentA"` + Application *Application + ArchetypeID *uint `gorm:"uniqueIndex:AssessmentB"` + Archetype *Archetype + QuestionnaireID uint `gorm:"uniqueIndex:AssessmentA;uniqueIndex:AssessmentB"` + Questionnaire Questionnaire + Sections JSON `gorm:"type:json"` + Thresholds JSON `gorm:"type:json"` + RiskMessages JSON `gorm:"type:json"` + Stakeholders []Stakeholder `gorm:"many2many:AssessmentStakeholders;constraint:OnDelete:CASCADE"` + StakeholderGroups []StakeholderGroup `gorm:"many2many:AssessmentStakeholderGroups;constraint:OnDelete:CASCADE"` +} + +type Review struct { + Model + BusinessCriticality uint `gorm:"not null"` + EffortEstimate string `gorm:"not null"` + ProposedAction string `gorm:"not null"` + WorkPriority uint `gorm:"not null"` + Comments string + ApplicationID *uint `gorm:"uniqueIndex"` + Application *Application + ArchetypeID *uint `gorm:"uniqueIndex"` + Archetype *Archetype +} diff --git a/migration/v13/model/core.go b/migration/v13/model/core.go new file mode 100644 index 00000000..4d5165f4 --- /dev/null +++ b/migration/v13/model/core.go @@ -0,0 +1,366 @@ +package model + +import ( + "encoding/json" + "os" + "path" + "time" + + "github.com/google/uuid" + liberr "github.com/jortel/go-utils/error" + "github.com/konveyor/tackle2-hub/encryption" + "gorm.io/gorm" +) + +// Model Base model. +type Model struct { + ID uint `gorm:"<-:create;primaryKey"` + CreateTime time.Time `gorm:"<-:create;autoCreateTime"` + CreateUser string `gorm:"<-:create"` + UpdateUser string +} + +type Setting struct { + Model + Key string `gorm:"<-:create;uniqueIndex"` + Value JSON `gorm:"type:json"` +} + +// With updates the value of the Setting with the json representation +// of the `value` parameter. +func (r *Setting) With(value interface{}) (err error) { + r.Value, err = json.Marshal(value) + if err != nil { + err = liberr.Wrap(err) + } + return +} + +// As unmarshalls the value of the Setting into the `ptr` parameter. +func (r *Setting) As(ptr interface{}) (err error) { + err = json.Unmarshal(r.Value, ptr) + if err != nil { + err = liberr.Wrap(err) + } + return +} + +type Bucket struct { + Model + Path string `gorm:"<-:create;uniqueIndex"` + Expiration *time.Time +} + +func (m *Bucket) BeforeCreate(db *gorm.DB) (err error) { + if m.Path == "" { + uid := uuid.New() + m.Path = path.Join( + Settings.Hub.Bucket.Path, + uid.String()) + err = os.MkdirAll(m.Path, 0777) + if err != nil { + err = liberr.Wrap( + err, + "path", + m.Path) + } + } + return +} + +type BucketOwner struct { + BucketID *uint `gorm:"index" ref:"bucket"` + Bucket *Bucket +} + +func (m *BucketOwner) BeforeCreate(db *gorm.DB) (err error) { + if !m.HasBucket() { + b := &Bucket{} + err = db.Create(b).Error + m.SetBucket(&b.ID) + } + return +} + +func (m *BucketOwner) SetBucket(id *uint) { + m.BucketID = id + m.Bucket = nil +} + +func (m *BucketOwner) HasBucket() (b bool) { + return m.BucketID != nil +} + +type File struct { + Model + Name string + Path string `gorm:"<-:create;uniqueIndex"` + Expiration *time.Time +} + +func (m *File) BeforeCreate(db *gorm.DB) (err error) { + uid := uuid.New() + m.Path = path.Join( + Settings.Hub.Bucket.Path, + ".file", + uid.String()) + err = os.MkdirAll(path.Dir(m.Path), 0777) + if err != nil { + err = liberr.Wrap( + err, + "path", + m.Path) + } + return +} + +type Task struct { + Model + BucketOwner + Name string `gorm:"index"` + Kind string + Addon string `gorm:"index"` + Extensions []string `gorm:"type:json;serializer:json"` + State string `gorm:"index"` + Locator string `gorm:"index"` + Priority int + Policy TaskPolicy `gorm:"type:json;serializer:json"` + TTL TTL `gorm:"type:json;serializer:json"` + Data Map `gorm:"type:json;serializer:json"` + Started *time.Time + Terminated *time.Time + Errors []TaskError `gorm:"type:json;serializer:json"` + Events []TaskEvent `gorm:"type:json;serializer:json"` + Pod string `gorm:"index"` + Retries int + Attached []Attachment `gorm:"type:json;serializer:json" ref:"[]file"` + Report *TaskReport `gorm:"constraint:OnDelete:CASCADE"` + ApplicationID *uint + Application *Application + TaskGroupID *uint `gorm:"<-:create"` + TaskGroup *TaskGroup +} + +func (m *Task) BeforeCreate(db *gorm.DB) (err error) { + err = m.BucketOwner.BeforeCreate(db) + return +} + +// TaskEvent task event. +type TaskEvent struct { + Kind string `json:"kind"` + Count int `json:"count"` + Reason string `json:"reason,omitempty" yaml:",omitempty"` + Last time.Time `json:"last"` +} + +// Map alias. +type Map = map[string]any + +// TTL time-to-live. +type TTL struct { + Created int `json:"created,omitempty" yaml:",omitempty"` + Pending int `json:"pending,omitempty" yaml:",omitempty"` + Running int `json:"running,omitempty" yaml:",omitempty"` + Succeeded int `json:"succeeded,omitempty" yaml:",omitempty"` + Failed int `json:"failed,omitempty" yaml:",omitempty"` +} + +// Ref represents a FK. +type Ref struct { + ID uint `json:"id" binding:"required"` + Name string `json:"name,omitempty" yaml:",omitempty"` +} + +// TaskError used in Task.Errors. +type TaskError struct { + Severity string `json:"severity"` + Description string `json:"description"` +} + +// TaskPolicy scheduling policy. +type TaskPolicy struct { + Isolated bool `json:"isolated,omitempty" yaml:",omitempty"` + PreemptEnabled bool `json:"preemptEnabled,omitempty" yaml:"preemptEnabled,omitempty"` + PreemptExempt bool `json:"preemptExempt,omitempty" yaml:"preemptExempt,omitempty"` +} + +// Attachment file attachment. +type Attachment struct { + ID uint `json:"id" binding:"required"` + Name string `json:"name,omitempty" yaml:",omitempty"` + Activity int `json:"activity,omitempty" yaml:",omitempty"` +} + +type TaskReport struct { + Model + Status string + Total int + Completed int + Activity []string `gorm:"type:json;serializer:json"` + Errors []TaskError `gorm:"type:json;serializer:json"` + Attached []Attachment `gorm:"type:json;serializer:json" ref:"[]file"` + Result Map `gorm:"type:json;serializer:json"` + TaskID uint `gorm:"<-:create;uniqueIndex"` + Task *Task +} + +type TaskGroup struct { + Model + BucketOwner + Name string + Kind string + Addon string + Extensions []string `gorm:"type:json;serializer:json"` + State string + Priority int + Policy TaskPolicy `gorm:"type:json;serializer:json"` + Data Map `gorm:"type:json;serializer:json"` + List []Task `gorm:"type:json;serializer:json"` + Tasks []Task `gorm:"constraint:OnDelete:CASCADE"` +} + +// Propagate group data into the task. +func (m *TaskGroup) Propagate() (err error) { + for i := range m.Tasks { + task := &m.Tasks[i] + task.Kind = m.Kind + task.Addon = m.Addon + task.Extensions = m.Extensions + task.Priority = m.Priority + task.Policy = m.Policy + task.State = m.State + task.SetBucket(m.BucketID) + if m.Data != nil { + task.Data = m.merge(m.Data, task.Data) + } + } + + return +} + +// merge maps B into A. +// The B map is the authority. +func (m *TaskGroup) merge(a, b Map) (out Map) { + if a == nil { + a = Map{} + } + if b == nil { + b = Map{} + } + out = Map{} + // + // Merge-in elements found in B and in A. + for k, v := range a { + out[k] = v + if bv, found := b[k]; found { + out[k] = bv + if av, cast := v.(Map); cast { + if bv, cast := bv.(Map); cast { + out[k] = m.merge(av, bv) + } else { + out[k] = bv + } + } + } + } + // + // Add elements found only in B. + for k, v := range b { + if _, found := a[k]; !found { + out[k] = v + } + } + + return +} + +// Proxy configuration. +// kind = (http|https) +type Proxy struct { + Model + Enabled bool + Kind string `gorm:"uniqueIndex"` + Host string `gorm:"not null"` + Port int + Excluded JSON `gorm:"type:json"` + IdentityID *uint `gorm:"index"` + Identity *Identity +} + +// Identity represents and identity with a set of credentials. +type Identity struct { + Model + Kind string `gorm:"not null"` + Name string `gorm:"index;unique;not null"` + Description string + User string + Password string + Key string + Settings string + Proxies []Proxy `gorm:"constraint:OnDelete:SET NULL"` +} + +// Encrypt sensitive fields. +// The ref identity is used to determine when sensitive fields +// have changed and need to be (re)encrypted. +func (r *Identity) Encrypt(ref *Identity) (err error) { + passphrase := Settings.Encryption.Passphrase + aes := encryption.New(passphrase) + if r.Password != ref.Password { + if r.Password != "" { + r.Password, err = aes.Encrypt(r.Password) + if err != nil { + err = liberr.Wrap(err) + return + } + } + } + if r.Key != ref.Key { + if r.Key != "" { + r.Key, err = aes.Encrypt(r.Key) + if err != nil { + err = liberr.Wrap(err) + return + } + } + } + if r.Settings != ref.Settings { + if r.Settings != "" { + r.Settings, err = aes.Encrypt(r.Settings) + if err != nil { + err = liberr.Wrap(err) + return + } + } + } + return +} + +// Decrypt sensitive fields. +func (r *Identity) Decrypt() (err error) { + passphrase := Settings.Encryption.Passphrase + aes := encryption.New(passphrase) + if r.Password != "" { + r.Password, err = aes.Decrypt(r.Password) + if err != nil { + err = liberr.Wrap(err) + return + } + } + if r.Key != "" { + r.Key, err = aes.Decrypt(r.Key) + if err != nil { + err = liberr.Wrap(err) + return + } + } + if r.Settings != "" { + r.Settings, err = aes.Decrypt(r.Settings) + if err != nil { + err = liberr.Wrap(err) + return + } + } + return +} diff --git a/migration/v13/model/pkg.go b/migration/v13/model/pkg.go new file mode 100644 index 00000000..d9615512 --- /dev/null +++ b/migration/v13/model/pkg.go @@ -0,0 +1,55 @@ +package model + +import ( + "github.com/konveyor/tackle2-hub/settings" +) + +var ( + Settings = &settings.Settings +) + +// JSON field (data) type. +type JSON = []byte + +// All builds all models. +// Models are enumerated such that each are listed after +// all the other models on which they may depend. +func All() []interface{} { + return []interface{}{ + Application{}, + TechDependency{}, + Incident{}, + Analysis{}, + Issue{}, + Bucket{}, + BusinessService{}, + Dependency{}, + File{}, + Fact{}, + Identity{}, + Import{}, + ImportSummary{}, + ImportTag{}, + JobFunction{}, + MigrationWave{}, + Proxy{}, + Review{}, + Setting{}, + RuleSet{}, + Rule{}, + Stakeholder{}, + StakeholderGroup{}, + Tag{}, + TagCategory{}, + Target{}, + Task{}, + TaskGroup{}, + TaskReport{}, + Ticket{}, + Tracker{}, + ApplicationTag{}, + Questionnaire{}, + Assessment{}, + Archetype{}, + } +} diff --git a/model/pkg.go b/model/pkg.go index 0271d4aa..65992569 100644 --- a/model/pkg.go +++ b/model/pkg.go @@ -1,7 +1,7 @@ package model import ( - "github.com/konveyor/tackle2-hub/migration/v12/model" + "github.com/konveyor/tackle2-hub/migration/v13/model" ) // Field (data) types. @@ -47,6 +47,13 @@ type Ticket = model.Ticket type Tracker = model.Tracker type TTL = model.TTL +type Ref = model.Ref +type Map = model.Map + +type TaskError = model.TaskError +type TaskEvent = model.TaskEvent +type TaskPolicy = model.TaskPolicy +type Attachment = model.Attachment // Join tables type ApplicationTag = model.ApplicationTag diff --git a/model/serializer.go b/model/serializer.go new file mode 100644 index 00000000..1edc3899 --- /dev/null +++ b/model/serializer.go @@ -0,0 +1,144 @@ +package model + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + "gorm.io/gorm/schema" +) + +func init() { + schema.RegisterSerializer("json", jsonSerializer{}) +} + +type jsonSerializer struct { +} + +// Scan implements serializer. +func (r jsonSerializer) Scan(ctx context.Context, field *schema.Field, dst reflect.Value, dbValue any) (err error) { + fieldValue := reflect.New(field.FieldType) + if dbValue != nil { + var b []byte + switch v := dbValue.(type) { + case string: + b = []byte(v) + case []byte: + b = v + default: + return fmt.Errorf("json: failed to decode: %#v", dbValue) + } + if len(b) > 0 { + ptr := fieldValue.Interface() + err = json.Unmarshal(b, ptr) + } + } + v := fieldValue.Elem() + field.ReflectValueOf(ctx, dst).Set(v) + return +} + +// Value implements serializer. +func (r jsonSerializer) Value(_ context.Context, _ *schema.Field, _ reflect.Value, fieldValue any) (v any, err error) { + mp := r.jMap(fieldValue) + v, err = json.Marshal(mp) + return +} + +// jMap returns a map[string]any. +// The YAML decoder can produce map[any]any which is not valid for json. +// Converts map[any]any to map[string]any as needed. +func (r jsonSerializer) jMap(in any) (out any) { + defer func() { + r := recover() + if r != nil { + out = in + } + }() + if in == nil { + return + } + switch in.(type) { + case time.Time, *time.Time: + out = in + return + } + t := reflect.TypeOf(in) + v := reflect.ValueOf(in) + if t.Kind() == reflect.Ptr { + t = t.Elem() + v = v.Elem() + } + switch t.Kind() { + case reflect.Struct: + mp := make(map[string]any) + for i := 0; i < t.NumField(); i++ { + t := t.Field(i) + v := v.Field(i) + if !t.IsExported() { + continue + } + var object any + switch v.Kind() { + case reflect.Ptr: + if !v.IsNil() { + object = v.Elem().Interface() + } + default: + object = v.Interface() + } + object = r.jMap(object) + if t.Anonymous { + if m, cast := object.(map[string]any); cast { + for k, v := range m { + mp[k] = v + } + } + } else { + mp[t.Name] = object + } + } + out = mp + case reflect.Slice: + list := make([]any, 0) + for i := 0; i < v.Len(); i++ { + v := v.Index(i) + var object any + switch v.Kind() { + case reflect.Ptr: + if !v.IsNil() { + object = v.Elem().Interface() + } + default: + object = v.Interface() + } + object = r.jMap(object) + list = append(list, object) + } + out = list + case reflect.Map: + mp := make(map[string]any) + for _, k := range v.MapKeys() { + v := v.MapIndex(k) + var object any + switch v.Kind() { + case reflect.Ptr: + if !v.IsNil() { + object = v.Elem().Interface() + } + default: + object = v.Interface() + } + object = r.jMap(object) + key := fmt.Sprintf("%v", k.Interface()) + mp[key] = object + } + out = mp + default: + out = in + } + + return +} diff --git a/reaper/file.go b/reaper/file.go index b13767df..244d07c4 100644 --- a/reaper/file.go +++ b/reaper/file.go @@ -64,6 +64,7 @@ func (r *FileReaper) busy(file *model.File) (busy bool, err error) { var n int64 ref := RefCounter{DB: r.DB} for _, m := range []interface{}{ + &model.Task{}, &model.TaskReport{}, &model.RuleSet{}, &model.Rule{}, diff --git a/reaper/task.go b/reaper/task.go index df89da76..22ff6a76 100644 --- a/reaper/task.go +++ b/reaper/task.go @@ -1,10 +1,8 @@ package reaper import ( - "encoding/json" "time" - "github.com/konveyor/tackle2-hub/api" "github.com/konveyor/tackle2-hub/model" "github.com/konveyor/tackle2-hub/task" "gorm.io/gorm" @@ -61,12 +59,11 @@ func (r *TaskReaper) Run() { } for i := range list { m := &list[i] - ttl := r.TTL(m) switch m.State { case task.Created: mark := m.CreateTime - if ttl.Created > 0 { - d := time.Duration(ttl.Created) * Unit + if m.TTL.Created > 0 { + d := time.Duration(m.TTL.Created) * Unit if time.Since(mark) > d { r.delete(m) } @@ -78,32 +75,30 @@ func (r *TaskReaper) Run() { } case task.Pending: mark := m.CreateTime - if ttl.Pending > 0 { - d := time.Duration(ttl.Pending) * Unit + if m.TTL.Pending > 0 { + d := time.Duration(m.TTL.Pending) * Unit if time.Since(mark) > d { r.delete(m) } } - case task.Postponed: + case task.Running: mark := m.CreateTime - if ttl.Postponed > 0 { - d := time.Duration(ttl.Postponed) * Unit - if time.Since(mark) > d { - r.delete(m) - } + if m.Terminated != nil { + mark = *m.Started } - case task.Running: - mark := *m.Started - if ttl.Running > 0 { - d := time.Duration(ttl.Running) * Unit + if m.TTL.Running > 0 { + d := time.Duration(m.TTL.Running) * Unit if time.Since(mark) > d { r.delete(m) } } case task.Succeeded: - mark := *m.Terminated - if ttl.Succeeded > 0 { - d := time.Duration(ttl.Succeeded) * Unit + mark := m.CreateTime + if m.Terminated != nil { + mark = *m.Terminated + } + if m.TTL.Succeeded > 0 { + d := time.Duration(m.TTL.Succeeded) * Unit if time.Since(mark) > d { r.delete(m) } @@ -114,9 +109,12 @@ func (r *TaskReaper) Run() { } } case task.Failed: - mark := *m.Terminated - if ttl.Succeeded > 0 { - d := time.Duration(ttl.Failed) * Unit + mark := m.CreateTime + if m.Terminated != nil { + mark = *m.Terminated + } + if m.TTL.Succeeded > 0 { + d := time.Duration(m.TTL.Failed) * Unit if time.Since(mark) > d { r.delete(m) } @@ -149,6 +147,8 @@ func (r *TaskReaper) release(m *model.Task) { nChanged++ } if nChanged > 0 { + rt := task.Task{Task: m} + rt.Event(task.Released) err := r.DB.Save(m).Error if err != nil { Log.Error(err, "") @@ -172,15 +172,6 @@ func (r *TaskReaper) delete(m *model.Task) { } } -// TTL returns the task TTL. -func (r *TaskReaper) TTL(m *model.Task) (ttl api.TTL) { - if m.TTL != nil { - _ = json.Unmarshal(m.TTL, &ttl) - } - - return -} - // // diff --git a/settings/hub.go b/settings/hub.go index e6255a70..08daf258 100644 --- a/settings/hub.go +++ b/settings/hub.go @@ -3,30 +3,36 @@ package settings import ( "os" "strconv" + "time" ) const ( - EnvNamespace = "NAMESPACE" - EnvDbPath = "DB_PATH" - EnvDbSeedPath = "DB_SEED_PATH" - EnvBucketPath = "BUCKET_PATH" - EnvRwxSupported = "RWX_SUPPORTED" - EnvCachePath = "CACHE_PATH" - EnvCachePvc = "CACHE_PVC" - EnvPassphrase = "ENCRYPTION_PASSPHRASE" - EnvTaskReapCreated = "TASK_REAP_CREATED" - EnvTaskReapSucceeded = "TASK_REAP_SUCCEEDED" - EnvTaskReapFailed = "TASK_REAP_FAILED" - EnvTaskSA = "TASK_SA" - EnvTaskRetries = "TASK_RETRIES" - EnvFrequencyTask = "FREQUENCY_TASK" - EnvFrequencyReaper = "FREQUENCY_REAPER" - EnvDevelopment = "DEVELOPMENT" - EnvBucketTTL = "BUCKET_TTL" - EnvFileTTL = "FILE_TTL" - EnvAppName = "APP_NAME" - EnvDisconnected = "DISCONNECTED" - EnvAnalysisReportPath = "ANALYSIS_REPORT_PATH" + EnvNamespace = "NAMESPACE" + EnvDbPath = "DB_PATH" + EnvDbSeedPath = "DB_SEED_PATH" + EnvBucketPath = "BUCKET_PATH" + EnvRwxSupported = "RWX_SUPPORTED" + EnvCachePath = "CACHE_PATH" + EnvCachePvc = "CACHE_PVC" + EnvSharedPath = "SHARED_PATH" + EnvPassphrase = "ENCRYPTION_PASSPHRASE" + EnvTaskReapCreated = "TASK_REAP_CREATED" + EnvTaskReapSucceeded = "TASK_REAP_SUCCEEDED" + EnvTaskReapFailed = "TASK_REAP_FAILED" + EnvTaskSA = "TASK_SA" + EnvTaskRetries = "TASK_RETRIES" + EnvTaskPreemptEnabled = "TASK_PREEMPT_ENABLED" + EnvTaskPreemptDelayed = "TASK_PREEMPT_DELAYED" + EnvTaskPreemptPostponed = "TASK_PREEMPT_POSTPONED" + EnvTaskPreemptRate = "TASK_PREEMPT_RATE" + EnvFrequencyTask = "FREQUENCY_TASK" + EnvFrequencyReaper = "FREQUENCY_REAPER" + EnvDevelopment = "DEVELOPMENT" + EnvBucketTTL = "BUCKET_TTL" + EnvFileTTL = "FILE_TTL" + EnvAppName = "APP_NAME" + EnvDisconnected = "DISCONNECTED" + EnvAnalysisReportPath = "ANALYSIS_REPORT_PATH" ) type Hub struct { @@ -52,6 +58,10 @@ type Hub struct { Path string PVC string } + // Shared mount settings. + Shared struct { + Path string + } // Encryption settings. Encryption struct { Passphrase string @@ -65,6 +75,12 @@ type Hub struct { Succeeded int Failed int } + Preemption struct { // seconds. + Enabled bool + Delayed time.Duration + Postponed time.Duration + Rate int + } } // Frequency Frequency struct { @@ -115,6 +131,10 @@ func (r *Hub) Load() (err error) { if !found { r.Cache.Path = "/cache" } + r.Shared.Path, found = os.LookupEnv(EnvSharedPath) + if !found { + r.Shared.Path = "/shared" + } r.Encryption.Passphrase, found = os.LookupEnv(EnvPassphrase) if !found { r.Encryption.Passphrase = "tackle" @@ -165,6 +185,38 @@ func (r *Hub) Load() (err error) { } else { r.Frequency.Reaper = 1 // 1 minute. } + s, found = os.LookupEnv(EnvTaskPreemptEnabled) + if found { + b, _ := strconv.ParseBool(s) + r.Task.Preemption.Enabled = b + } + s, found = os.LookupEnv(EnvTaskPreemptDelayed) + if found { + n, _ := strconv.Atoi(s) + r.Task.Preemption.Delayed = time.Duration(n) * time.Second + } else { + r.Task.Preemption.Delayed = time.Minute + } + s, found = os.LookupEnv(EnvTaskPreemptPostponed) + if found { + n, _ := strconv.Atoi(s) + r.Task.Preemption.Postponed = time.Duration(n) * time.Second + } else { + r.Task.Preemption.Postponed = time.Minute + } + s, found = os.LookupEnv(EnvTaskPreemptRate) + if found { + n, _ := strconv.Atoi(s) + if n < 0 { + n = 0 + } + if n > 100 { + n = 100 + } + r.Task.Preemption.Rate = n + } else { + r.Task.Preemption.Rate = 10 + } s, found = os.LookupEnv(EnvDevelopment) if found { b, _ := strconv.ParseBool(s) diff --git a/task/error.go b/task/error.go new file mode 100644 index 00000000..42b7320f --- /dev/null +++ b/task/error.go @@ -0,0 +1,298 @@ +package task + +import ( + "errors" + "fmt" + "strconv" + "strings" + + k8serr "k8s.io/apimachinery/pkg/api/errors" +) + +// BadRequest report bad request. +type BadRequest struct { + Reason string +} + +func (e *BadRequest) Error() string { + return e.Reason +} + +func (e *BadRequest) Is(err error) (matched bool) { + var inst *BadRequest + matched = errors.As(err, &inst) + return +} + +// ActionTimeout report an action timeout. +type ActionTimeout struct { +} + +func (e *ActionTimeout) Error() string { + return "Requested (asynchronous) action timed out." +} + +func (e *ActionTimeout) Is(err error) (matched bool) { + var inst *ActionTimeout + matched = errors.As(err, &inst) + return +} + +// SoftErr returns true when the error isA SoftError. +func SoftErr(err error) (matched, retry bool) { + if err == nil { + return + } + naked := errors.Unwrap(err) + if naked == nil { + naked = err + } + if softErr, cast := naked.(SoftError); cast { + matched = true + retry = softErr.Retry() + } + return +} + +// SoftError used to report errors specific to one task +// rather than systemic issues. +type SoftError interface { + // Retry determines if the task should be + // retried or failed immediately. + Retry() (r bool) +} + +// KindNotFound used to report task (kind) referenced +// by a task but cannot be found. +type KindNotFound struct { + Name string +} + +func (e *KindNotFound) Error() string { + return fmt.Sprintf( + "Task (kind): '%s' not-found.", + e.Name) +} + +func (e *KindNotFound) Is(err error) (matched bool) { + var inst *KindNotFound + matched = errors.As(err, &inst) + return +} + +func (e *KindNotFound) Retry() (r bool) { + return +} + +// AddonNotFound used to report addon referenced +// by a task but cannot be found. +type AddonNotFound struct { + Name string +} + +func (e *AddonNotFound) Error() string { + return fmt.Sprintf( + "Addon: '%s' not-found.", + e.Name) +} + +func (e *AddonNotFound) Is(err error) (matched bool) { + var inst *AddonNotFound + matched = errors.As(err, &inst) + return +} + +func (e *AddonNotFound) Retry() (r bool) { + return +} + +// AddonNotSelected report that an addon has not been selected. +type AddonNotSelected struct { +} + +func (e *AddonNotSelected) Error() string { + return fmt.Sprintf("Addon not selected.") +} + +func (e *AddonNotSelected) Is(err error) (matched bool) { + var inst *AddonNotSelected + matched = errors.As(err, &inst) + return +} + +func (e *AddonNotSelected) Retry() (r bool) { + return +} + +// ExtensionNotFound used to report an extension referenced +// by a task but cannot be found. +type ExtensionNotFound struct { + Name string +} + +func (e *ExtensionNotFound) Error() string { + return fmt.Sprintf( + "Extension: '%s' not-found.", + e.Name) +} + +func (e *ExtensionNotFound) Is(err error) (matched bool) { + var inst *ExtensionNotFound + matched = errors.As(err, &inst) + return +} + +func (e *ExtensionNotFound) Retry() (r bool) { + return +} + +// ExtensionNotValid used to report extension referenced +// by a task not valid with addon. +type ExtensionNotValid struct { + Name string + Addon string +} + +func (e *ExtensionNotValid) Error() string { + return fmt.Sprintf( + "Extension: '%s' not-valid with addon '%s'.", + e.Name, + e.Addon) +} + +func (e *ExtensionNotValid) Is(err error) (matched bool) { + var inst *ExtensionNotValid + matched = errors.As(err, &inst) + return +} + +func (e *ExtensionNotValid) Retry() (r bool) { + return +} + +// SelectorNotValid reports selector errors. +type SelectorNotValid struct { + Selector string + Predicate string + Reason string +} + +func (e *SelectorNotValid) Error() string { + if e.Predicate != "" { + return fmt.Sprintf( + "Selector '%s' not valid. predicate '%s' not supported.", + e.Selector, + e.Predicate) + } + return fmt.Sprintf( + "Selector syntax '%s' not valid: '%s'.", + e.Selector, + e.Reason) +} + +func (e *SelectorNotValid) Is(err error) (matched bool) { + var inst *SelectorNotValid + matched = errors.As(err, &inst) + return +} + +func (e *SelectorNotValid) Retry() (r bool) { + return +} + +// PriorityNotFound report priority class not found. +type PriorityNotFound struct { + Name string + Value int +} + +func (e *PriorityNotFound) Error() string { + var d string + if e.Name != "" { + d = fmt.Sprintf("\"%s\"", e.Name) + } else { + d = strconv.Itoa(e.Value) + } + return fmt.Sprintf( + "PriorityClass %s not-found.", + d) +} + +func (e *PriorityNotFound) Is(err error) (matched bool) { + var inst *PriorityNotFound + matched = errors.As(err, &inst) + return +} + +func (e *PriorityNotFound) Retry() (r bool) { + return +} + +// PodRejected report pod rejected.. +type PodRejected struct { + Reason string +} + +func (e *PodRejected) Error() string { + return e.Reason +} + +func (e *PodRejected) Is(err error) (matched bool) { + var inst *PodRejected + matched = errors.As(err, &inst) + return +} + +// Match returns true when pod is rejected. +func (e *PodRejected) Match(err error) (matched bool) { + matched = k8serr.IsBadRequest(err) || + k8serr.IsForbidden(err) || + k8serr.IsInvalid(err) + if matched { + e.Reason = err.Error() + } + return +} + +func (e *PodRejected) Retry() (r bool) { + return +} + +// QuotaExceeded report quota exceeded. +type QuotaExceeded struct { + Reason string +} + +// Match returns true when the error is Forbidden due to quota exceeded. +func (e *QuotaExceeded) Match(err error) (matched bool) { + if k8serr.IsForbidden(err) { + matched = true + e.Reason = err.Error() + for _, s := range []string{"quota", "exceeded"} { + matched = strings.Contains(e.Reason, s) + if !matched { + break + } + } + part := strings.SplitN(e.Reason, ":", 2) + if len(part) > 1 { + e.Reason = part[1] + } + } + return +} + +func (e *QuotaExceeded) Error() string { + return e.Reason +} + +func (e *QuotaExceeded) Is(err error) (matched bool) { + var inst *QuotaExceeded + matched = errors.As(err, &inst) + return +} + +func (e *QuotaExceeded) Retry() (r bool) { + r = true + return +} diff --git a/task/injector.go b/task/injector.go new file mode 100644 index 00000000..8ae75614 --- /dev/null +++ b/task/injector.go @@ -0,0 +1,75 @@ +package task + +import ( + "regexp" + "strconv" + "strings" + + core "k8s.io/api/core/v1" +) + +var ( + SeqRegex = regexp.MustCompile(`(\${seq:)([0-9]+)}`) +) + +// Injector macro processor. +type Injector struct { + seq SeqInjector +} + +// Inject process macros. +func (r *Injector) Inject(container *core.Container) { + var injected []string + for i := range container.Command { + if i > 0 { + injected = append( + injected, + r.seq.inject(container.Command[i])) + } else { + injected = append( + injected, + container.Command[i]) + } + } + container.Command = injected + injected = nil + for i := range container.Args { + injected = append( + injected, + r.seq.inject(container.Args[i])) + } + container.Args = injected + for i := range container.Env { + env := &container.Env[i] + env.Value = r.seq.inject(env.Value) + } +} + +// SeqInjector provides ${seq:} sequence injection. +type SeqInjector struct { + portMap map[int]int +} + +// inject next integer. +func (r *SeqInjector) inject(in string) (out string) { + if r.portMap == nil { + r.portMap = make(map[int]int) + } + out = in + for { + match := SeqRegex.FindStringSubmatch(out) + if len(match) < 3 { + break + } + base, _ := strconv.Atoi(match[2]) + offset := r.portMap[base] + out = strings.Replace( + out, + match[0], + strconv.Itoa(base+offset), + -1) + offset++ + r.portMap[base] = offset + } + return +} diff --git a/task/manager.go b/task/manager.go index 96ca6d7f..27f13fbb 100644 --- a/task/manager.go +++ b/task/manager.go @@ -2,9 +2,11 @@ package task import ( "context" - "errors" "fmt" + "io" + "os" "path" + "sort" "strconv" "strings" "time" @@ -13,22 +15,31 @@ import ( liberr "github.com/jortel/go-utils/error" "github.com/jortel/go-utils/logr" "github.com/konveyor/tackle2-hub/auth" + k8s2 "github.com/konveyor/tackle2-hub/k8s" crd "github.com/konveyor/tackle2-hub/k8s/api/tackle/v1alpha1" "github.com/konveyor/tackle2-hub/metrics" "github.com/konveyor/tackle2-hub/model" "github.com/konveyor/tackle2-hub/settings" + "gopkg.in/yaml.v2" "gorm.io/gorm" core "k8s.io/api/core/v1" k8serr "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" k8s "sigs.k8s.io/controller-runtime/pkg/client" ) // States +// Some also used as events: +// - Postponed +// - QuotaBlocked +// - Preempted const ( Created = "Created" - Postponed = "Postponed" Ready = "Ready" + Postponed = "Postponed" + Preempted = "Preempted" Pending = "Pending" Running = "Running" Succeeded = "Succeeded" @@ -36,65 +47,42 @@ const ( Canceled = "Canceled" ) -// Policies +// Events +const ( + AddonSelected = "AddonSelected" + ExtSelected = "ExtensionSelected" + PodNotFound = "PodNotFound" + PodCreated = "PodCreated" + PodRunning = "PodRunning" + PodSucceeded = "PodSucceeded" + PodFailed = "PodFailed" + PodDeleted = "PodDeleted" + QuotaBlocked = "QuotaBlocked" + Escalated = "Escalated" + Released = "Released" +) + +// k8s labels. const ( - Isolated = "isolated" + TaskLabel = "task" + AppLabel = "app" + RoleLabel = "role" ) const ( Unit = time.Second ) +const ( + Shared = "shared" + Cache = "cache" +) + var ( Settings = &settings.Settings Log = logr.WithName("task-scheduler") ) -// AddonNotFound used to report addon referenced -// by a task but cannot be found. -type AddonNotFound struct { - Name string -} - -func (e *AddonNotFound) Error() (s string) { - return fmt.Sprintf("Addon: '%s' not-found.", e.Name) -} - -func (e *AddonNotFound) Is(err error) (matched bool) { - _, matched = err.(*AddonNotFound) - return -} - -// QuotaExceeded report quota exceeded. -type QuotaExceeded struct { - Reason string -} - -// Match returns true when the error is Forbidden due to quota exceeded. -func (e *QuotaExceeded) Match(err error) (matched bool) { - if k8serr.IsForbidden(err) { - matched = true - e.Reason = err.Error() - for _, s := range []string{"quota", "exceeded"} { - matched = strings.Contains(e.Reason, s) - if !matched { - break - } - } - } - return -} - -func (e *QuotaExceeded) Error() (s string) { - return e.Reason -} - -func (e *QuotaExceeded) Is(err error) (matched bool) { - var inst *QuotaExceeded - matched = errors.As(err, &inst) - return -} - // Manager provides task management. type Manager struct { // DB @@ -103,10 +91,16 @@ type Manager struct { Client k8s.Client // Addon token scopes. Scopes []string + // cluster resources. + cluster Cluster + // queue of actions. + queue chan func() } // Run the manager. func (m *Manager) Run(ctx context.Context) { + m.queue = make(chan func(), 100) + m.cluster.Client = m.Client auth.Validators = append( auth.Validators, &Validator{ @@ -120,202 +114,955 @@ func (m *Manager) Run(ctx context.Context) { case <-ctx.Done(): return default: - m.updateRunning() - m.startReady() - m.pause() + err := m.cluster.Refresh() + if err == nil { + m.runActions() + m.updateRunning() + m.startReady() + m.pause() + } else { + Log.Error(err, "") + m.pause() + } } } }() } +// Create a task. +func (m *Manager) Create(db *gorm.DB, requested *Task) (task *Task, err error) { + task = &Task{&model.Task{}} + switch requested.State { + case "": + requested.State = Created + fallthrough + case Created, + Ready: + task.CreateUser = requested.CreateUser + task.Name = requested.Name + task.Kind = requested.Kind + task.Addon = requested.Addon + task.Extensions = requested.Extensions + task.State = requested.State + task.Locator = requested.Locator + task.Priority = requested.Priority + task.Policy = requested.Policy + task.TTL = requested.TTL + task.Data = requested.Data + task.ApplicationID = requested.ApplicationID + default: + err = &BadRequest{ + Reason: "state must be (Created|Ready)", + } + return + } + err = db.Create(task).Error + if err != nil { + err = liberr.Wrap(err) + return + } + return +} + +// Update update task. +func (m *Manager) Update(db *gorm.DB, requested *Task) (err error) { + err = m.action(func() (err error) { + task := &Task{} + err = db.First(task, requested.ID).Error + if err != nil { + return + } + switch requested.State { + case Created, + Ready: + task.UpdateUser = requested.UpdateUser + task.Name = requested.Name + task.Kind = requested.Kind + task.Addon = requested.Addon + task.Extensions = requested.Extensions + task.State = requested.State + task.Locator = requested.Locator + task.Priority = requested.Priority + task.Policy = requested.Policy + task.TTL = requested.TTL + task.Data = requested.Data + task.ApplicationID = requested.ApplicationID + case Pending, + QuotaBlocked, + Postponed: + task.UpdateUser = requested.UpdateUser + task.Name = requested.Name + task.Data = requested.Data + task.Priority = requested.Priority + task.Policy = requested.Policy + task.TTL = requested.TTL + case Running, + Succeeded, + Failed, + Canceled: + err = &BadRequest{ + Reason: "state must not be (Running|Succeeded|Failed|Canceled)", + } + return + } + err = db.Save(task).Error + if err != nil { + err = liberr.Wrap(err) + return + } + return + }) + return +} + +// Delete a task. +func (m *Manager) Delete(db *gorm.DB, id uint) (err error) { + err = m.action(func() (err error) { + task := &Task{} + err = db.First(task, id).Error + if err != nil { + return + } + err = task.Delete(m.Client) + if err != nil { + return + } + err = db.Delete(task).Error + return + }) + return +} + +// Cancel a task. +func (m *Manager) Cancel(db *gorm.DB, id uint) (err error) { + err = m.action( + func() (err error) { + task := &Task{} + err = db.First(task, id).Error + if err != nil { + return + } + switch task.State { + case Succeeded, + Failed, + Canceled: + err = &BadRequest{ + Reason: "state must not be (Succeeded|Failed|Canceled)", + } + return + default: + } + pod, found := m.cluster.pods[path.Base(task.Pod)] + if found { + err = m.podSnapshot(task, pod) + if err != nil { + return + } + } + err = task.Cancel(m.Client) + if err != nil { + return + } + err = db.Save(task).Error + if err != nil { + err = liberr.Wrap(err) + return + } + return + }) + return +} + // Pause. func (m *Manager) pause() { d := Unit * time.Duration(Settings.Frequency.Task) time.Sleep(d) } -// startReady starts pending tasks. +// action executes an asynchronous action. +func (m *Manager) action(action func() error) (err error) { + d := time.Hour + ch := make(chan error) + m.queue <- func() { + defer close(ch) + select { + case ch <- action(): + default: + } + } + select { + case err = <-ch: + case <-time.After(d): + err = &ActionTimeout{} + } + return +} + +// runActions executes queued actions. +func (m *Manager) runActions() { + d := time.Millisecond * 10 + for { + select { + case action := <-m.queue: + action() + case <-time.After(d): + return + } + } +} + +// startReady starts ready tasks. func (m *Manager) startReady() { - list := []model.Task{} + var err error + defer func() { + Log.Error(err, "") + }() + fetched := []*model.Task{} db := m.DB.Order("priority DESC, id") result := db.Find( - &list, + &fetched, "state IN ?", []string{ Ready, Postponed, + QuotaBlocked, Pending, Running, }) - Log.Error(result.Error, "") if result.Error != nil { return } - for i := range list { - task := &list[i] - if Settings.Disconnected { - mark := time.Now() - task.State = Failed - task.Terminated = &mark - task.Error("Error", "Hub is disconnected.") - sErr := m.DB.Save(task).Error - Log.Error(sErr, "") + if len(fetched) == 0 { + return + } + var list []*Task + for _, task := range fetched { + list = append(list, &Task{task}) + } + list, err = m.disconnected(list) + if err != nil { + return + } + err = m.adjustPriority(list) + if err != nil { + return + } + list, err = m.selectAddons(list) + if err != nil { + return + } + err = m.postpone(list) + if err != nil { + return + } + err = m.createPod(list) + if err != nil { + return + } + err = m.preempt(list) + if err != nil { + return + } + return +} + +// disconnected fails tasks when hub is disconnected. +// The returned list is empty when disconnected. +func (m *Manager) disconnected(list []*Task) (kept []*Task, err error) { + if !Settings.Disconnected { + kept = list + return + } + for _, task := range list { + mark := time.Now() + task.State = Failed + task.Terminated = &mark + task.Error("Error", "Hub is disconnected.") + err = m.DB.Save(task).Error + if err != nil { + err = liberr.Wrap(err) + return + } + } + return +} + +// selectAddon selects addon as needed. +// The returned list has failed tasks removed. +func (m *Manager) selectAddons(list []*Task) (kept []*Task, err error) { + if len(list) == 0 { + return + } + mark := time.Now() + var addon *crd.Addon + for _, task := range list { + addon, err = m.selectAddon(task) + if err == nil { + err = m.selectExtensions(task, addon) + } + if err != nil { + matched, _ := SoftErr(err) + if matched { + task.Error("Error", err.Error()) + task.Terminated = &mark + task.State = Failed + err = m.DB.Save(task).Error + if err != nil { + err = liberr.Wrap(err) + return + } + err = nil + } + } else { + kept = append(kept, task) + } + } + return +} + +// selectAddon select an addon when not specified. +func (m *Manager) selectAddon(task *Task) (addon *crd.Addon, err error) { + if task.Addon != "" { + found := false + addon, found = m.cluster.addons[task.Addon] + if !found { + err = &AddonNotFound{task.Addon} + } + return + } + kind, found := m.cluster.tasks[task.Kind] + if !found { + err = &KindNotFound{task.Kind} + return + } + matched := false + var selected *crd.Addon + selector := NewSelector(m.DB, task) + for _, addon = range m.cluster.addons { + if addon.Spec.Task != kind.Name { continue } - if task.Canceled { - m.canceled(task) + matched, err = selector.Match(addon.Spec.Selector) + if err != nil { + return + } + if matched { + selected = addon + break + } + } + if selected == nil { + err = &AddonNotSelected{} + return + } + task.Addon = selected.Name + task.Event(AddonSelected, selected) + return +} + +// selectExtensions select extensions when not specified. +func (m *Manager) selectExtensions(task *Task, addon *crd.Addon) (err error) { + if len(task.Extensions) > 0 { + return + } + matched := false + selector := NewSelector(m.DB, task) + for name, extension := range m.cluster.extensions { + if extension.Spec.Addon != addon.Name { continue } - switch task.State { - case Ready, - Postponed: - ready := task - if m.postpone(ready, list) { - ready.State = Postponed - Log.Info("Task postponed.", "id", ready.ID) - err := m.DB.Save(ready).Error - Log.Error(err, "") + matched, err = selector.Match(extension.Spec.Selector) + if err != nil { + return + } + if matched { + task.Extensions = append(task.Extensions, name) + task.Event(ExtSelected, name) + } + } + return +} + +// postpone Postpones a task as needed based on rules. +// postpone order: +// - priority (lower) +// - Age (newer) +func (m *Manager) postpone(list []*Task) (err error) { + if len(list) == 0 { + return + } + sort.Slice( + list, + func(i, j int) bool { + it := list[i] + jt := list[j] + return it.Priority < jt.Priority || + (it.Priority == jt.Priority && + it.ID > jt.ID) + }) + postponed := map[uint]any{} + released := map[uint]any{} + ruleSet := []Rule{ + &RuleIsolated{}, + &RulePreempted{}, + &RuleUnique{ + matched: make(map[uint]uint), + }, + &RuleDeps{ + cluster: m.cluster, + }, + } + for _, task := range list { + if !task.StateIn(Ready, Postponed, QuotaBlocked) { + continue + } + ready := task + for _, other := range list { + if ready.ID == other.ID { continue } - rt := Task{ready} - started, err := rt.Run(m.Client) + for _, rule := range ruleSet { + matched, reason := rule.Match(ready, other) + if matched { + postponed[task.ID] = reason + continue + } + } + } + _, found := postponed[task.ID] + if !found { + if task.State == Postponed { + released[task.ID] = 0 + } + } + } + if len(postponed)+len(released) == 0 { + return + } + for _, task := range list { + updated := false + reason, found := postponed[task.ID] + if found { + task.State = Postponed + task.Event(Postponed, reason) + Log.Info( + "Task postponed.", + "id", + task.ID, + "reason", + reason) + updated = true + } + _, found = released[task.ID] + if found { + task.State = Ready + updated = true + } + if updated { + err = m.DB.Save(task).Error if err != nil { - Log.Error(err, "") - ready.Error("Error", err.Error()) - ready.State = Failed - err = m.DB.Save(ready).Error - Log.Error(err, "") - continue + err = liberr.Wrap(err) + return } - err = m.DB.Save(ready).Error + } + } + return +} + +// adjustPriority escalate as needed. +// To prevent priority inversion, the priority of a task's +// dependencies will be escalated provided the dependency has: +// - state of: (Ready|Pending|Postponed|QuotaBlocked) +// - priority (lower). +// When adjusted, Pending tasks pods deleted and made Ready again. +func (m *Manager) adjustPriority(list []*Task) (err error) { + if len(list) == 0 { + return + } + pE := Priority{cluster: m.cluster} + escalated := pE.Escalate(list) + for _, task := range escalated { + if task.State != Pending { + continue + } + err = task.Delete(m.Client) + if err != nil { + err = liberr.Wrap(err) + return + } + task.State = Ready + err = m.DB.Save(task).Error + if err != nil { + err = liberr.Wrap(err) + return + } + } + return +} + +// createPod creates a pod for the task. +func (m *Manager) createPod(list []*Task) (err error) { + sort.Slice( + list, + func(i, j int) bool { + it := list[i] + jt := list[j] + return it.Priority > jt.Priority || + (it.Priority == jt.Priority && + it.ID < jt.ID) + }) + for _, task := range list { + if !task.StateIn(Ready, QuotaBlocked) { + continue + } + ready := task + started := false + started, err = ready.Run(m.cluster) + if err != nil { Log.Error(err, "") - if started { - Log.Info("Task started.", "id", ready.ID) - if ready.Retries == 0 { - metrics.TasksInitiated.Inc() + return + } + err = m.DB.Save(ready).Error + if err != nil { + err = liberr.Wrap(err) + return + } + if started { + Log.Info("Task started.", "id", ready.ID) + if ready.Retries == 0 { + metrics.TasksInitiated.Inc() + } + } + } + return +} + +// preempt reschedules a Running task as needed. +// The `preempted` task must be: +// - state=Running. +// - lower priority. +// The `blocked` task must be: +// - higher priority +// - pod blocked by quota or pending for a defined period. +// Preempt order: +// - priority (lowest). +// - age (newest). +// Preempt limit: 10% each pass. +func (m *Manager) preempt(list []*Task) (err error) { + preemption := Settings.Hub.Task.Preemption + if len(list) == 0 { + return + } + mark := time.Now() + blocked := []*Task{} + running := []*Task{} + preempt := []Preempt{} + sort.Slice( + list, + func(i, j int) bool { + it := list[i] + jt := list[j] + return it.Priority > jt.Priority || + (it.Priority == jt.Priority && + it.ID < jt.ID) + }) + for _, task := range list { + switch task.State { + case Ready: + case QuotaBlocked: + enabled := preemption.Enabled || task.Policy.PreemptEnabled + if !enabled { + break + } + event, found := task.LastEvent(QuotaBlocked) + if found { + count := preemption.Delayed / time.Second + if event.Count > int(count) { + blocked = append(blocked, task) } } - default: - // Ignored. - // Other states included to support - // postpone rules. + case Pending: + enabled := preemption.Enabled || task.Policy.PreemptEnabled + if !enabled { + break + } + event, found := task.LastEvent(PodCreated) + if found { + if mark.Sub(event.Last) > preemption.Delayed { + blocked = append(blocked, task) + } + } + case Running: + exempt := task.Policy.PreemptExempt + if !exempt { + running = append(running, task) + } + } + } + if len(blocked) == 0 { + return + } + for _, b := range blocked { + for _, p := range running { + if b.Priority > p.Priority { + preempt = append( + preempt, + Preempt{ + task: p, + by: b, + }) + } + } + } + sort.Slice( + preempt, + func(i, j int) bool { + it := list[i] + jt := list[j] + return it.Priority < jt.Priority || + (it.Priority == jt.Priority && + it.ID > jt.ID) + }) + n := 0 + for _, request := range preempt { + p := request.task + by := request.by + reason := fmt.Sprintf( + "Preempted:%d, by: %d", + p.ID, + by.ID) + _ = p.Delete(m.Client) + p.Pod = "" + p.State = Ready + p.Started = nil + p.Terminated = nil + p.Errors = nil + p.Event(Preempted, reason) + Log.Info(reason) + err = m.DB.Save(p).Error + if err != nil { + err = liberr.Wrap(err) + return + } + n++ + // preempt x%. + if len(blocked)/n*100 > preemption.Rate { + break } } + return } // updateRunning tasks to reflect pod state. func (m *Manager) updateRunning() { - list := []model.Task{} + var err error + defer func() { + Log.Error(err, "") + }() + fetched := []*model.Task{} db := m.DB.Order("priority DESC, id") result := db.Find( - &list, + &fetched, "state IN ?", []string{ Pending, Running, }) - Log.Error(result.Error, "") if result.Error != nil { + err = liberr.Wrap(result.Error) + return + } + if len(fetched) == 0 { return } - for _, running := range list { - if running.Canceled { - m.canceled(&running) + var list []*Task + for _, task := range fetched { + list = append(list, &Task{task}) + } + for _, task := range list { + if !task.StateIn(Running, Pending) { continue } - rt := Task{&running} - err := rt.Reflect(m.Client) - if err != nil { - Log.Error(err, "") + running := task + pod, found := running.Reflect(m.cluster) + if !found { continue } + if task.StateIn(Succeeded, Failed) { + err = m.podSnapshot(running, pod) + if err != nil { + Log.Error(err, "") + continue + } + err = running.Delete(m.Client) + if err != nil { + Log.Error(err, "") + continue + } + } err = m.DB.Save(&running).Error if err != nil { - Log.Error(result.Error, "") - continue + err = liberr.Wrap(err) + return } Log.V(1).Info("Task updated.", "id", running.ID) } } -// postpone Postpones a task as needed based on rules. -func (m *Manager) postpone(ready *model.Task, list []model.Task) (postponed bool) { - ruleSet := []Rule{ - &RuleIsolated{}, - &RuleUnique{}, +// podSnapshot attaches a pod description and logs. +// Includes: +// - pod YAML +// - pod Events +// - container Logs +func (m *Manager) podSnapshot(task *Task, pod *core.Pod) (err error) { + var files []*model.File + d, err := m.podYAML(pod) + if err != nil { + return } - for i := range list { - other := &list[i] - if ready.ID == other.ID { - continue + files = append(files, d) + logs, err := m.podLogs(pod) + if err != nil { + return + } + files = append(files, logs...) + for _, f := range files { + task.attach(f) + } + Log.V(1).Info("Task pod snapshot attached.", "id", task.ID) + return +} + +// podYAML builds pod resource description. +func (m *Manager) podYAML(pod *core.Pod) (file *model.File, err error) { + events, err := m.podEvent(pod) + if err != nil { + return + } + file = &model.File{Name: "pod.yaml"} + err = m.DB.Create(file).Error + if err != nil { + err = liberr.Wrap(err) + return + } + f, err := os.Create(file.Path) + if err != nil { + err = liberr.Wrap(err) + return + } + defer func() { + _ = f.Close() + }() + type Pod struct { + core.Pod `yaml:",inline"` + Events []Event `yaml:",omitempty"` + } + d := Pod{ + Pod: *pod, + Events: events, + } + b, _ := yaml.Marshal(d) + _, _ = f.Write(b) + return +} + +// podEvent get pod events. +func (m *Manager) podEvent(pod *core.Pod) (events []Event, err error) { + clientSet, err := k8s2.NewClientSet() + if err != nil { + return + } + options := meta.ListOptions{ + FieldSelector: "involvedObject.name=" + pod.Name, + TypeMeta: meta.TypeMeta{ + Kind: "Pod", + }, + } + eventClient := clientSet.CoreV1().Events(Settings.Hub.Namespace) + eventList, err := eventClient.List(context.TODO(), options) + if err != nil { + err = liberr.Wrap(err) + return + } + for _, event := range eventList.Items { + duration := event.LastTimestamp.Sub(event.FirstTimestamp.Time) + events = append( + events, + Event{ + Type: event.Type, + Reason: event.Reason, + Age: duration.String(), + Reporter: event.ReportingController, + Message: event.Message, + }) + } + return +} + +// podLogs - get and store pod logs as a Files. +func (m *Manager) podLogs(pod *core.Pod) (files []*model.File, err error) { + for _, container := range pod.Spec.Containers { + f, nErr := m.containerLog(pod, container.Name) + if nErr == nil { + files = append(files, f) + } else { + err = nErr + return } - switch other.State { - case Running, - Pending: - for _, rule := range ruleSet { - if rule.Match(ready, other) { - postponed = true - return - } - } + } + return +} + +// containerLog - get container log and store in file. +func (m *Manager) containerLog(pod *core.Pod, container string) (file *model.File, err error) { + options := &core.PodLogOptions{ + Container: container, + } + clientSet, err := k8s2.NewClientSet() + if err != nil { + return + } + podClient := clientSet.CoreV1().Pods(Settings.Hub.Namespace) + req := podClient.GetLogs(pod.Name, options) + reader, err := req.Stream(context.TODO()) + if err != nil { + err = liberr.Wrap(err) + return + } + defer func() { + _ = reader.Close() + }() + file = &model.File{Name: container + ".log"} + err = m.DB.Create(file).Error + if err != nil { + err = liberr.Wrap(err) + return + } + f, err := os.Create(file.Path) + if err != nil { + err = liberr.Wrap(err) + return + } + defer func() { + _ = f.Close() + }() + _, err = io.Copy(f, reader) + if err != nil { + err = liberr.Wrap(err) + return + } + return +} + +// Task is an runtime task. +type Task struct { + // model. + *model.Task +} + +func (r *Task) With(m *model.Task) { + r.Task = m +} + +// StateIn returns true matches on of the specified states. +func (r *Task) StateIn(states ...string) (matched bool) { + for _, state := range states { + if r.State == state { + matched = true + break } } - return } -// The task has been canceled. -func (m *Manager) canceled(task *model.Task) { - rt := Task{task} - err := rt.Cancel(m.Client) - Log.Error(err, "") - if err != nil { +// Error appends an error. +func (r *Task) Error(severity, description string, x ...any) { + description = fmt.Sprintf(description, x...) + r.Errors = append( + r.Errors, + model.TaskError{ + Severity: severity, + Description: description, + }) +} + +// Event appends an event. +// Duplicates result in count incremented and Last updated. +func (r *Task) Event(kind string, p ...any) { + mark := time.Now() + reason := "" + if len(p) > 0 { + switch x := p[0].(type) { + case string: + reason = fmt.Sprintf(x, p[1:]...) + case int: + reason = strconv.Itoa(x) + } + } + event, found := r.LastEvent(kind) + if found && event.Reason == reason { + event.Last = mark + event.Count++ return } - err = m.DB.Save(task).Error - Log.Error(err, "") - db := m.DB.Model(&model.TaskReport{}) - err = db.Delete("taskid", task.ID).Error - Log.Error(err, "") - return + event = &model.TaskEvent{ + Kind: kind, + Count: 1, + Reason: reason, + Last: mark, + } + r.Events = append(r.Events, *event) } -// Task is an runtime task. -type Task struct { - // model. - *model.Task +// LastEvent returns the last event of the specified kind. +func (r *Task) LastEvent(kind string) (event *model.TaskEvent, found bool) { + for i := len(r.Events) - 1; i >= 0; i-- { + event = &r.Events[i] + if kind == event.Kind { + found = true + break + } + } + return } // Run the specified task. -func (r *Task) Run(client k8s.Client) (started bool, err error) { +func (r *Task) Run(cluster Cluster) (started bool, err error) { mark := time.Now() + client := cluster.Client defer func() { if err == nil { return } - if errors.Is(err, &QuotaExceeded{}) { - Log.V(1).Info(err.Error()) - err = nil - return - } - if errors.Is(err, &AddonNotFound{}) { - r.Error("Error", err.Error()) - r.Terminated = &mark - r.State = Failed + matched, retry := SoftErr(err) + if matched { + if !retry { + r.Error("Error", err.Error()) + r.Terminated = &mark + r.State = Failed + } err = nil - return } }() - addon, err := r.findAddon(client, r.Addon) - if err != nil { + addon, found := cluster.addons[r.Addon] + if !found { + err = &AddonNotFound{Name: r.Addon} return } - owner, err := r.findTackle(client) + extensions, err := r.getExtensions(client) if err != nil { return } - r.Image = addon.Spec.Image - secret := r.secret(addon) + for _, extension := range extensions { + if r.Addon != extension.Spec.Addon { + err = &ExtensionNotValid{ + Name: extension.Name, + Addon: addon.Name, + } + return + } + } + secret := r.secret() err = client.Create(context.TODO(), &secret) if err != nil { err = liberr.Wrap(err) @@ -326,12 +1073,24 @@ func (r *Task) Run(client k8s.Client) (started bool, err error) { _ = client.Delete(context.TODO(), &secret) } }() - pod := r.pod(addon, owner, &secret) + pod := r.pod( + addon, + extensions, + cluster.tackle, + &secret) err = client.Create(context.TODO(), &pod) if err != nil { - qe := &QuotaExceeded{err.Error()} + qe := &QuotaExceeded{} if qe.Match(err) { + r.State = QuotaBlocked + r.Event(QuotaBlocked, qe.Reason) err = qe + return + } + pe := &PodRejected{} + if pe.Match(err) { + err = liberr.Wrap(pe) + return } err = liberr.Wrap(err) return @@ -360,58 +1119,31 @@ func (r *Task) Run(client k8s.Client) (started bool, err error) { r.Pod = path.Join( pod.Namespace, pod.Name) + r.Event(PodCreated, r.Pod) return } // Reflect finds the associated pod and updates the task state. -func (r *Task) Reflect(client k8s.Client) (err error) { - pod := &core.Pod{} - err = client.Get( - context.TODO(), - k8s.ObjectKey{ - Namespace: path.Dir(r.Pod), - Name: path.Base(r.Pod), - }, - pod) - if err != nil { - if k8serr.IsNotFound(err) { - r.Pod = "" - r.State = Ready - err = nil - } else { - err = liberr.Wrap(err) - } +func (r *Task) Reflect(cluster Cluster) (pod *core.Pod, found bool) { + pod, found = cluster.pods[path.Base(r.Pod)] + if !found { + r.State = Ready + r.Event(PodNotFound, r.Pod) + r.Terminated = nil + r.Started = nil + r.Pod = "" return } - mark := time.Now() - status := pod.Status - switch status.Phase { + client := cluster.Client + switch pod.Status.Phase { + case core.PodPending: + r.podPending(pod) case core.PodRunning: - r.State = Running + r.podRunning(pod, client) case core.PodSucceeded: - r.State = Succeeded - r.Terminated = &mark + r.podSucceeded(pod) case core.PodFailed: - r.Error( - "Error", - "Pod failed: %s", - pod.Status.ContainerStatuses[0].State.Terminated.Reason) - switch pod.Status.ContainerStatuses[0].State.Terminated.ExitCode { - case 137: // Killed. - if r.Retries < Settings.Hub.Task.Retries { - _ = client.Delete(context.TODO(), pod) - r.Pod = "" - r.State = Ready - r.Errors = nil - r.Retries++ - } else { - r.State = Failed - r.Terminated = &mark - } - default: - r.State = Failed - r.Terminated = &mark - } + r.podFailed(pod, client) } return @@ -425,7 +1157,7 @@ func (r *Task) Delete(client k8s.Client) (err error) { pod := &core.Pod{} pod.Namespace = path.Dir(r.Pod) pod.Name = path.Base(r.Pod) - err = client.Delete(context.TODO(), pod) + err = client.Delete(context.TODO(), pod, k8s.GracePeriodSeconds(0)) if err != nil { if !k8serr.IsNotFound(err) { err = liberr.Wrap(err) @@ -435,6 +1167,7 @@ func (r *Task) Delete(client k8s.Client) (err error) { } } r.Pod = "" + r.Event(PodDeleted, r.Pod) Log.Info( "Task pod deleted.", "id", @@ -446,6 +1179,27 @@ func (r *Task) Delete(client k8s.Client) (err error) { return } +// podPending handles pod pending. +func (r *Task) podPending(pod *core.Pod) { + var status []core.ContainerStatus + status = append( + status, + pod.Status.InitContainerStatuses...) + status = append( + status, + pod.Status.ContainerStatuses...) + for _, status := range status { + if status.Started == nil { + continue + } + if *status.Started { + r.Event(PodRunning) + r.State = Running + return + } + } +} + // Cancel the task. func (r *Task) Cancel(client k8s.Client) (err error) { err = r.Delete(client) @@ -453,6 +1207,7 @@ func (r *Task) Cancel(client k8s.Client) (err error) { return } r.State = Canceled + r.Event(Canceled) r.SetBucket(nil) Log.Info( "Task canceled.", @@ -461,51 +1216,109 @@ func (r *Task) Cancel(client k8s.Client) (err error) { return } -// findAddon by name. -func (r *Task) findAddon(client k8s.Client, name string) (addon *crd.Addon, err error) { - addon = &crd.Addon{} - err = client.Get( - context.TODO(), - k8s.ObjectKey{ - Namespace: Settings.Hub.Namespace, - Name: name, - }, - addon) - if err != nil { - if k8serr.IsNotFound(err) { - err = &AddonNotFound{name} - } else { - err = liberr.Wrap(err) +// podRunning handles pod running. +func (r *Task) podRunning(pod *core.Pod, client k8s.Client) { + r.State = Running + r.Event(PodRunning) + addonStatus := pod.Status.ContainerStatuses[0] + if addonStatus.State.Terminated != nil { + switch addonStatus.State.Terminated.ExitCode { + case 0: + r.podSucceeded(pod) + default: // failed. + r.podFailed(pod, client) + return } - return } +} - return +// podFailed handles pod succeeded. +func (r *Task) podSucceeded(pod *core.Pod) { + mark := time.Now() + r.State = Succeeded + r.Event(PodSucceeded) + r.Terminated = &mark } -// findTackle returns the tackle CR. -func (r *Task) findTackle(client k8s.Client) (owner *crd.Tackle, err error) { - list := crd.TackleList{} - err = client.List( - context.TODO(), - &list, - &k8s.ListOptions{Namespace: Settings.Namespace}) - if err != nil { - err = liberr.Wrap(err) - return +// podFailed handles pod failed. +func (r *Task) podFailed(pod *core.Pod, client k8s.Client) { + mark := time.Now() + var statuses []core.ContainerStatus + statuses = append( + statuses, + pod.Status.InitContainerStatuses...) + statuses = append( + statuses, + pod.Status.ContainerStatuses...) + for _, status := range statuses { + if status.State.Terminated == nil { + continue + } + r.Event( + PodFailed, + status.State.Terminated.Reason) + switch status.State.Terminated.ExitCode { + case 0: // Succeeded. + case 137: // Killed. + if r.Retries < Settings.Hub.Task.Retries { + _ = client.Delete(context.TODO(), pod) + r.Pod = "" + r.State = Ready + r.Terminated = nil + r.Started = nil + r.Errors = nil + r.Retries++ + return + } + fallthrough + default: // Error. + r.State = Failed + r.Terminated = &mark + r.Error( + "Error", + "Container (%s) failed: %s", + status.Name, + status.State.Terminated.Reason) + return + } } - if len(list.Items) == 0 { - err = liberr.New("Tackle CR not found.") - return +} + +// getExtensions by name. +func (r *Task) getExtensions(client k8s.Client) (extensions []crd.Extension, err error) { + for _, name := range r.Extensions { + extension := crd.Extension{} + err = client.Get( + context.TODO(), + k8s.ObjectKey{ + Namespace: Settings.Hub.Namespace, + Name: name, + }, + &extension) + if err != nil { + if k8serr.IsNotFound(err) { + err = &ExtensionNotFound{name} + } else { + err = liberr.Wrap(err) + } + return + } + extensions = append( + extensions, + extension) } - owner = &list.Items[0] return } // pod build the pod. -func (r *Task) pod(addon *crd.Addon, owner *crd.Tackle, secret *core.Secret) (pod core.Pod) { +func (r *Task) pod( + addon *crd.Addon, + extensions []crd.Extension, + owner *crd.Tackle, + secret *core.Secret) (pod core.Pod) { + // pod = core.Pod{ - Spec: r.specification(addon, secret), + Spec: r.specification(addon, extensions, secret), ObjectMeta: meta.ObjectMeta{ Namespace: Settings.Hub.Namespace, GenerateName: r.k8sName(), @@ -520,14 +1333,22 @@ func (r *Task) pod(addon *crd.Addon, owner *crd.Tackle, secret *core.Secret) (po Name: owner.Name, UID: owner.UID, }) - return } // specification builds a Pod specification. -func (r *Task) specification(addon *crd.Addon, secret *core.Secret) (specification core.PodSpec) { +func (r *Task) specification( + addon *crd.Addon, + extensions []crd.Extension, + secret *core.Secret) (specification core.PodSpec) { + shared := core.Volume{ + Name: Shared, + VolumeSource: core.VolumeSource{ + EmptyDir: &core.EmptyDirVolumeSource{}, + }, + } cache := core.Volume{ - Name: "cache", + Name: Cache, } if Settings.Cache.RWX { cache.VolumeSource = core.VolumeSource{ @@ -540,13 +1361,14 @@ func (r *Task) specification(addon *crd.Addon, secret *core.Secret) (specificati EmptyDir: &core.EmptyDirVolumeSource{}, } } + init, plain := r.containers(addon, extensions, secret) specification = core.PodSpec{ ServiceAccountName: Settings.Hub.Task.SA, RestartPolicy: core.RestartPolicyNever, - Containers: []core.Container{ - r.container(addon, secret), - }, + InitContainers: init, + Containers: plain, Volumes: []core.Volume{ + shared, cache, }, } @@ -554,56 +1376,90 @@ func (r *Task) specification(addon *crd.Addon, secret *core.Secret) (specificati return } -// container builds the pod container. -func (r *Task) container(addon *crd.Addon, secret *core.Secret) (container core.Container) { +// container builds the pod containers. +func (r *Task) containers( + addon *crd.Addon, + extensions []crd.Extension, + secret *core.Secret) (init []core.Container, plain []core.Container) { userid := int64(0) - policy := core.PullIfNotPresent - if addon.Spec.ImagePullPolicy != "" { - policy = addon.Spec.ImagePullPolicy - } - container = core.Container{ - Name: "main", - Image: r.Image, - ImagePullPolicy: policy, - Resources: addon.Spec.Resources, - Env: []core.EnvVar{ - { + token := &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + Key: settings.EnvHubToken, + LocalObjectReference: core.LocalObjectReference{ + Name: secret.Name, + }, + }, + } + plain = append(plain, addon.Spec.Container) + plain[0].Name = "addon" + for i := range extensions { + extension := &extensions[i] + container := extension.Spec.Container + container.Name = extension.Name + plain = append( + plain, + container) + } + injector := Injector{} + for i := range plain { + container := &plain[i] + injector.Inject(container) + r.propagateEnv(&plain[0], container) + container.SecurityContext = &core.SecurityContext{ + RunAsUser: &userid, + } + container.VolumeMounts = append( + container.VolumeMounts, + core.VolumeMount{ + Name: Shared, + MountPath: Settings.Shared.Path, + }, + core.VolumeMount{ + Name: Cache, + MountPath: Settings.Cache.Path, + }) + container.Env = append( + container.Env, + core.EnvVar{ + Name: settings.EnvSharedPath, + Value: Settings.Shared.Path, + }, + core.EnvVar{ + Name: settings.EnvCachePath, + Value: Settings.Cache.Path, + }, + core.EnvVar{ Name: settings.EnvHubBaseURL, Value: Settings.Addon.Hub.URL, }, - { + core.EnvVar{ Name: settings.EnvTask, Value: strconv.Itoa(int(r.Task.ID)), }, - { - Name: settings.EnvHubToken, - ValueFrom: &core.EnvVarSource{ - SecretKeyRef: &core.SecretKeySelector{ - Key: settings.EnvHubToken, - LocalObjectReference: core.LocalObjectReference{ - Name: secret.Name, - }, - }, - }, - }, - }, - VolumeMounts: []core.VolumeMount{ - { - Name: "cache", - MountPath: Settings.Cache.Path, - }, - }, - SecurityContext: &core.SecurityContext{ - RunAsUser: &userid, - }, + core.EnvVar{ + Name: settings.EnvHubToken, + ValueFrom: token, + }) } - return } +// propagateEnv copies extension container Env.* to the addon container. +// Prefixed with EXTENSION_. +func (r *Task) propagateEnv(addon, extension *core.Container) { + for _, env := range extension.Env { + addon.Env = append( + addon.Env, + core.EnvVar{ + Name: ExtEnv(extension.Name, env.Name), + Value: env.Value, + }) + } +} + // secret builds the pod secret. -func (r *Task) secret(addon *crd.Addon) (secret core.Secret) { - user := "addon:" + addon.Name +func (r *Task) secret() (secret core.Secret) { + user := "addon:" + r.Addon token, _ := auth.Hub.NewToken( user, auth.AddonRole, @@ -632,8 +1488,275 @@ func (r *Task) k8sName() string { // labels builds k8s labels. func (r *Task) labels() map[string]string { return map[string]string{ - "task": strconv.Itoa(int(r.ID)), - "app": "tackle", - "role": "task", + TaskLabel: strconv.Itoa(int(r.ID)), + AppLabel: "tackle", + RoleLabel: "task", + } +} + +// attach file. +func (r *Task) attach(file *model.File) { + r.Attached = append( + r.Attached, + model.Attachment{ + ID: file.ID, + Name: file.Name, + }) +} + +// Event represents a pod event. +type Event struct { + Type string + Reason string + Age string + Reporter string + Message string +} + +// Priority escalator. +type Priority struct { + cluster Cluster +} + +// Escalate task dependencies as needed. +func (p *Priority) Escalate(ready []*Task) (escalated []*Task) { + sort.Slice( + ready, + func(i, j int) bool { + it := ready[i] + jt := ready[j] + return it.Priority > jt.Priority + }) + for _, task := range ready { + dependencies := p.graph(task, ready) + for _, d := range dependencies { + if !d.StateIn( + Ready, + Pending, + Postponed, + QuotaBlocked) { + continue + } + if d.Priority < task.Priority { + d.Priority = task.Priority + reason := fmt.Sprintf( + "Escalated:%d, by:%d", + d.ID, + task.ID) + d.Event(Escalated, reason) + Log.Info(reason) + escalated = append( + escalated, + d) + } + } + } + escalated = p.unique(escalated) + return +} + +// graph builds a dependency graph. +func (p *Priority) graph(task *Task, ready []*Task) (deps []*Task) { + kind, found := p.cluster.tasks[task.Kind] + if !found { + return + } + for _, d := range kind.Spec.Dependencies { + for _, r := range ready { + if r.ID == task.ID { + continue + } + if r.Kind != d { + continue + } + if r.ApplicationID == nil || task.ApplicationID == nil { + continue + } + if *r.ApplicationID != *task.ApplicationID { + continue + } + deps = append(deps, r) + deps = append(deps, p.graph(r, ready)...) + } + } + return +} + +// unique returns a unique list of tasks. +func (p *Priority) unique(in []*Task) (out []*Task) { + mp := make(map[uint]*Task) + for _, ptr := range in { + mp[ptr.ID] = ptr + } + for _, ptr := range mp { + out = append(out, ptr) + } + return +} + +type Cluster struct { + k8s.Client + tackle *crd.Tackle + addons map[string]*crd.Addon + extensions map[string]*crd.Extension + tasks map[string]*crd.Task + pods map[string]*core.Pod +} + +func (k *Cluster) Refresh() (err error) { + if Settings.Hub.Disconnected { + k.tackle = &crd.Tackle{} + k.addons = make(map[string]*crd.Addon) + k.extensions = make(map[string]*crd.Extension) + k.tasks = make(map[string]*crd.Task) + k.pods = make(map[string]*core.Pod) + return + } + err = k.getTackle() + if err != nil { + return + } + err = k.getAddons() + if err != nil { + return + } + err = k.getExtensions() + if err != nil { + return + } + err = k.getTasks() + if err != nil { + return } + err = k.getPods() + if err != nil { + return + } + return +} + +// getTackle +func (k *Cluster) getTackle() (err error) { + options := &k8s.ListOptions{Namespace: Settings.Namespace} + list := crd.TackleList{} + err = k.List( + context.TODO(), + &list, + options) + if err != nil { + err = liberr.Wrap(err) + return + } + for i := range list.Items { + r := &list.Items[i] + k.tackle = r + return + } + err = liberr.New("Tackle CR not found.") + return +} + +// getAddons +func (k *Cluster) getAddons() (err error) { + k.addons = make(map[string]*crd.Addon) + options := &k8s.ListOptions{Namespace: Settings.Namespace} + list := crd.AddonList{} + err = k.List( + context.TODO(), + &list, + options) + if err != nil { + err = liberr.Wrap(err) + return + } + for i := range list.Items { + r := &list.Items[i] + k.addons[r.Name] = r + } + return +} + +// getExtensions +func (k *Cluster) getExtensions() (err error) { + k.extensions = make(map[string]*crd.Extension) + options := &k8s.ListOptions{Namespace: Settings.Namespace} + list := crd.ExtensionList{} + err = k.List( + context.TODO(), + &list, + options) + if err != nil { + err = liberr.Wrap(err) + return + } + for i := range list.Items { + r := &list.Items[i] + k.extensions[r.Name] = r + } + return +} + +// getTasks kinds. +func (k *Cluster) getTasks() (err error) { + k.tasks = make(map[string]*crd.Task) + options := &k8s.ListOptions{Namespace: Settings.Namespace} + list := crd.TaskList{} + err = k.List( + context.TODO(), + &list, + options) + if err != nil { + err = liberr.Wrap(err) + return + } + for i := range list.Items { + r := &list.Items[i] + k.tasks[r.Name] = r + } + return +} + +// getPods +func (k *Cluster) getPods() (err error) { + k.pods = make(map[string]*core.Pod) + selector := labels.NewSelector() + req, _ := labels.NewRequirement(TaskLabel, selection.Exists, []string{}) + selector = selector.Add(*req) + options := &k8s.ListOptions{ + Namespace: Settings.Namespace, + LabelSelector: selector, + } + list := core.PodList{} + err = k.List( + context.TODO(), + &list, + options) + if err != nil { + err = liberr.Wrap(err) + return + } + for i := range list.Items { + r := &list.Items[i] + k.pods[r.Name] = r + } + return +} + +// ExtEnv returns an environment variable named namespaced to an extension. +// Format: _EXT_. +func ExtEnv(extension string, envar string) (s string) { + s = strings.Join( + []string{ + "_EXT", + strings.ToUpper(extension), + envar, + }, + "_") + return +} + +// Preempt request. +type Preempt struct { + task *Task + by *Task } diff --git a/task/rule.go b/task/rule.go index f627d9ad..4efe071f 100644 --- a/task/rule.go +++ b/task/rule.go @@ -1,74 +1,108 @@ package task import ( - "strings" - - "github.com/konveyor/tackle2-hub/model" + "fmt" + "time" ) // Rule defines postpone rules. type Rule interface { - Match(candidate, other *model.Task) bool + Match(ready, other *Task) (matched bool, reason string) } // RuleUnique running tasks must be unique by: // - application -// - variant // - addon. type RuleUnique struct { + matched map[uint]uint } // Match determines the match. -func (r *RuleUnique) Match(candidate, other *model.Task) (matched bool) { - if candidate.ApplicationID == nil || other.ApplicationID == nil { +func (r *RuleUnique) Match(ready, other *Task) (matched bool, reason string) { + if ready.ApplicationID == nil || other.ApplicationID == nil { + return + } + if *ready.ApplicationID != *other.ApplicationID { return } - if *candidate.ApplicationID != *other.ApplicationID { + if ready.Addon != other.Addon { return } - if candidate.Addon != other.Addon { + if _, found := r.matched[other.ID]; found { return } matched = true - Log.Info( - "Rule:Unique matched.", - "candidate", - candidate.ID, - "by", + r.matched[ready.ID] = other.ID + reason = fmt.Sprintf( + "Rule:Unique matched:%d, other:%d", + ready.ID, other.ID) - + Log.Info(reason) return } -// RuleIsolated policy. -type RuleIsolated struct { +// RuleDeps - Task kind dependencies. +type RuleDeps struct { + cluster Cluster } // Match determines the match. -func (r *RuleIsolated) Match(candidate, other *model.Task) (matched bool) { - matched = r.hasPolicy(candidate, Isolated) || r.hasPolicy(other, Isolated) - if matched { - Log.Info( - "Rule:Isolated matched.", - "candidate", - candidate.ID, - "by", - other.ID) +func (r *RuleDeps) Match(ready, other *Task) (matched bool, reason string) { + if ready.Kind == "" || other.Kind == "" { + return } - + if *ready.ApplicationID != *other.ApplicationID { + return + } + def, found := r.cluster.tasks[ready.Kind] + if !found { + return + } + matched = def.HasDep(other.Kind) + reason = fmt.Sprintf( + "Rule:Dependency matched:%d, other:%d", + ready.ID, + other.ID) + Log.Info(reason) return } -// Returns true if the task policy includes: isolated -func (r *RuleIsolated) hasPolicy(task *model.Task, name string) (matched bool) { - for _, p := range strings.Split(task.Policy, ";") { - p = strings.TrimSpace(p) - p = strings.ToLower(p) - if p == name { +// RulePreempted - preempted tasks postponed to prevent thrashing. +type RulePreempted struct { +} + +// Match determines the match. +// Postpone based on a duration after the last preempted event. +func (r *RulePreempted) Match(ready, _ *Task) (matched bool, reason string) { + preemption := Settings.Hub.Task.Preemption + if !preemption.Enabled { + return + } + mark := time.Now() + event, found := ready.LastEvent(Preempted) + if found { + if mark.Sub(event.Last) < preemption.Postponed { matched = true - break + reason = fmt.Sprintf( + "Rule:Preempted id:%d", + ready.ID) + Log.Info(reason) } } + return +} +// RuleIsolated policy. +type RuleIsolated struct { +} + +// Match determines the match. +func (r *RuleIsolated) Match(ready, other *Task) (matched bool, reason string) { + matched = ready.Policy.Isolated || other.Policy.Isolated + reason = fmt.Sprintf( + "Rule:Isolated matched:%d, other:%d", + ready.ID, + other.ID) + Log.Info(reason) return } diff --git a/task/selector.go b/task/selector.go new file mode 100644 index 00000000..125ff487 --- /dev/null +++ b/task/selector.go @@ -0,0 +1,220 @@ +package task + +import ( + "context" + "errors" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + + gv "github.com/PaesslerAG/gval" + liberr "github.com/jortel/go-utils/error" + "github.com/konveyor/tackle2-hub/model" + "gorm.io/gorm" +) + +var ( + PredRegex = regexp.MustCompile(`(\w+)\:([\w/=]+)`) +) + +// NewSelector returns a selector. +func NewSelector(db *gorm.DB, task *Task) (selector *Selector) { + selector = &Selector{ + predicate: map[string]Predicate{ + "tag": &TagPredicate{ + db: db, + task: task, + }, + }, + } + return +} + +// Selector used to match addons and extensions. +type Selector struct { + predicate map[string]Predicate +} + +// Match evaluates the selector. +func (r *Selector) Match(selector string) (matched bool, err error) { + if selector == "" { + matched = true + return + } + params := make(map[string]string) + found := PredRegex.FindAllStringSubmatch(selector, -1) + for _, m := range found { + kind := m[1] + p, found := r.predicate[kind] + if found { + matched, err = p.Match(m[2]) + if err != nil { + return + } + params[m[0]] = strconv.FormatBool(matched) + } else { + err = &SelectorNotValid{ + Selector: selector, + Predicate: kind, + } + return + } + } + var keySet []string + for k := range params { + keySet = append(keySet, k) + } + sort.Slice( + keySet, + func(i, j int) bool { + return len(keySet[i]) > len(keySet[j]) + }) + matched = false + expression := selector + for _, ref := range keySet { + expression = strings.Replace( + expression, + ref, + params[ref], + -1) + } + p := r.parser() + v, err := p.Evaluate(expression, nil) + if err != nil { + err = &SelectorNotValid{ + Selector: selector, + Reason: err.Error(), + } + return + } + if b, cast := v.(bool); cast { + matched = b + } else { + err = &SelectorNotValid{ + Selector: selector, + Reason: "parser returned unexpected result.", + } + } + return +} + +// parser returns a parser. +func (r *Selector) parser() (p gv.Language) { + p = gv.NewLanguage( + gv.Ident(), + gv.Parentheses(), + gv.Constant("true", true), + gv.Constant("false", false), + gv.PrefixOperator( + "!", + func(c context.Context, v any) (b any, err error) { + switch x := v.(type) { + case bool: + b = !x + default: + err = &SelectorNotValid{ + Reason: fmt.Sprintf("%v not expected", x), + } + } + return + }), + gv.InfixShortCircuit( + "&&", + func(a any) (v any, b bool) { + v = false + b = a == false + return + }), + gv.InfixBoolOperator( + "&&", + func(a, b bool) (v any, err error) { + v = a && b + return + }), + gv.InfixShortCircuit( + "||", + func(a any) (v any, b bool) { + v = true + b = a == true + return + }), + gv.InfixBoolOperator( + "||", + func(a, b bool) (v any, err error) { + v = a || b + return + }), + ) + return +} + +type Predicate interface { + Match(ref string) (matched bool, err error) +} + +// TagPredicate evaluates application tag references. +type TagPredicate struct { + db *gorm.DB + task *Task +} + +// Match evaluates application tag references. +// The `ref` has format: category=tag. +// The tag and behaves like a wildcard when not specified. +func (r *TagPredicate) Match(ref string) (matched bool, err error) { + category, name := r.parse(ref) + db := r.db.Session(&gorm.Session{}) + cat := &model.TagCategory{} + err = db.First(cat, "name=?", category).Error + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + Log.Info( + "TagSelector: category not found.", + "name", + category) + err = nil + } else { + err = liberr.Wrap(err) + } + return + } + db = r.db.Session(&gorm.Session{}) + db = db.Preload("Tags") + application := &model.Application{} + err = db.First(application, r.task.ApplicationID).Error + if err != nil { + if errors.Is(err, gorm.ErrRecordNotFound) { + Log.Info( + "TagSelector: application not found.", + "id", + r.task.ApplicationID) + err = nil + } else { + err = liberr.Wrap(err) + } + return + } + for _, tag := range application.Tags { + if cat.ID != tag.CategoryID { + continue + } + if !(name == "" || tag.Name == name) { + continue + } + matched = true + break + } + return +} + +// parse tag ref. +func (r *TagPredicate) parse(s string) (category, name string) { + part := strings.SplitN(s, "=", 2) + category = part[0] + if len(part) > 1 { + name = part[1] + } + return +} diff --git a/task/task_test.go b/task/task_test.go new file mode 100644 index 00000000..34a2aef0 --- /dev/null +++ b/task/task_test.go @@ -0,0 +1,137 @@ +package task + +import ( + "testing" + + crd "github.com/konveyor/tackle2-hub/k8s/api/tackle/v1alpha1" + "github.com/konveyor/tackle2-hub/model" + "github.com/onsi/gomega" +) + +func TestPriorityEscalate(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + appId := uint(1) + appOther := uint(2) + + kinds := make(map[string]*crd.Task) + ready := []*Task{} + + a := crd.Task{} + a.Name = "a" + kinds[a.Name] = &a + + b := crd.Task{} + b.Name = "b" + b.Spec.Dependencies = []string{"a"} + kinds[b.Name] = &b + + c := crd.Task{} + c.Name = "c" + c.Spec.Dependencies = []string{"b"} + kinds[c.Name] = &c + + task := &Task{&model.Task{}} + task.ID = 1 + task.Kind = "c" + task.State = Ready + task.Priority = 10 + task.ApplicationID = &appId + ready = append(ready, task) + + task = &Task{&model.Task{}} + task.ID = 2 + task.Kind = "b" + task.State = Ready + task.ApplicationID = &appId + ready = append(ready, task) + + task = &Task{&model.Task{}} + task.ID = 3 + task.Kind = "a" + task.State = Ready + task.ApplicationID = &appId + ready = append(ready, task) + + task = &Task{&model.Task{}} + task.ID = 4 + task.State = Ready + task.ApplicationID = &appId + ready = append(ready, task) + + task = &Task{&model.Task{}} + task.ID = 5 + task.Kind = "b" + task.State = Ready + task.ApplicationID = &appOther + ready = append(ready, task) + + pE := Priority{ + cluster: Cluster{ + tasks: kinds, + }} + + escalated := pE.Escalate(ready) + g.Expect(len(escalated)).To(gomega.Equal(2)) + + escalated = pE.Escalate(nil) + g.Expect(len(escalated)).To(gomega.Equal(0)) +} + +func TestPriorityGraph(t *testing.T) { + g := gomega.NewGomegaWithT(t) + + appId := uint(1) + + kinds := make(map[string]*crd.Task) + ready := []*Task{} + + a := crd.Task{} + a.Name = "a" + kinds[a.Name] = &a + + b := crd.Task{} + b.Name = "b" + b.Spec.Dependencies = []string{"a"} + kinds[b.Name] = &b + + c := crd.Task{} + c.Name = "c" + c.Spec.Dependencies = []string{"b"} + kinds[c.Name] = &c + + task := &Task{&model.Task{}} + task.ID = 1 + task.Kind = "c" + task.State = Ready + task.Priority = 10 + task.ApplicationID = &appId + ready = append(ready, task) + + task = &Task{&model.Task{}} + task.ID = 2 + task.Kind = "b" + task.State = Ready + task.ApplicationID = &appId + ready = append(ready, task) + + task = &Task{&model.Task{}} + task.ID = 3 + task.Kind = "a" + task.State = Ready + task.ApplicationID = &appId + ready = append(ready, task) + + task = &Task{&model.Task{}} + task.ID = 4 + task.State = Ready + task.ApplicationID = &appId + ready = append(ready, task) + + pE := Priority{ + cluster: Cluster{ + tasks: kinds, + }} + deps := pE.graph(ready[0], ready) + g.Expect(len(deps)).To(gomega.Equal(2)) +} diff --git a/test/api/task/samples.go b/test/api/task/samples.go index 0663c62d..ae46d3fe 100644 --- a/test/api/task/samples.go +++ b/test/api/task/samples.go @@ -7,9 +7,9 @@ import ( // Set of valid resources for tests and reuse. var ( Windup = api.Task{ - Name: "Test windup task", - Addon: "windup", - Data: "{}", + Name: "Test", + Addon: "analyzer", + Data: api.Map{}, } Samples = []api.Task{Windup} )