// Copyright Contributors to the Open Cluster Management project package v1alpha1 import ( "fmt" "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "open-cluster-management.io/governance-policy-nucleus/api/v1beta1" ) //+kubebuilder:object:generate=false // ReflectiveResourceList implements v1beta1.ResourceList for the wrapped client.ObjectList, by // using reflection. The wrapped list must have an Items field, with a slice of items which satisfy // the client.Object interface - most types which satisfy client.ObjectList seem to follow this // convention. Using this type is not recommended: implementing ResourceList by hand will likely // lead to better performance. type ReflectiveResourceList struct { ClientList client.ObjectList } // Run a compile-time check to ensure ReflectiveResourceList implements ResourceList. var _ v1beta1.ResourceList = (*ReflectiveResourceList)(nil) // Items returns the list of items in the list. Since this implementation uses reflection, it may // have errors or not perform as well as a bespoke implementation for the underlying type. The // returned Objects are in the same order that they are in the list. func (l *ReflectiveResourceList) Items() ([]client.Object, error) { value := reflect.ValueOf(l.ClientList) if value.Kind() == reflect.Pointer { value = value.Elem() } if value.Kind() != reflect.Struct { return nil, &ReflectiveResourceListError{ typeName: value.Type().PkgPath() + "." + value.Type().Name(), message: "the underlying go Kind was not a struct", } } itemsField := value.FieldByName("Items") if !itemsField.IsValid() { return nil, &ReflectiveResourceListError{ typeName: value.Type().PkgPath() + "." + value.Type().Name(), message: "the underlying struct does not have a field called 'Items'", } } if itemsField.Kind() != reflect.Slice { return nil, &ReflectiveResourceListError{ typeName: value.Type().PkgPath() + "." + value.Type().Name(), message: "the 'Items' field in the underlying struct isn't a slice", } } items := make([]client.Object, itemsField.Len()) for i := 0; i < itemsField.Len(); i++ { item, ok := itemsField.Index(i).Interface().(client.Object) if ok { items[i] = item continue } // Try a pointer receiver item, ok = itemsField.Index(i).Addr().Interface().(client.Object) if ok { items[i] = item continue } return nil, &ReflectiveResourceListError{ typeName: value.Type().PkgPath() + "." + value.Type().Name(), message: "an item in the underlying struct's 'Items' slice could not be " + "type-asserted to a sigs.k8s.io/controller-runtime/pkg/client.Object", } } return items, nil } //nolint:ireturn // the ResourceList interface requires this interface return func (l *ReflectiveResourceList) ObjectList() client.ObjectList { return l.ClientList } //+kubebuilder:object:generate=false type ReflectiveResourceListError struct { typeName string message string } func (e *ReflectiveResourceListError) Error() string { return fmt.Sprintf("unable to use %v as a nucleus ResourceList: %v", e.typeName, e.message) }
// Copyright Contributors to the Open Cluster Management project package v1beta1 import ( "sort" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // GetCondition returns the existing index and condition on the status matching the given type. If // no condition of that type is found, it will return -1 as the index. func (status PolicyCoreStatus) GetCondition(condType string) (int, metav1.Condition) { for i, cond := range status.Conditions { if cond.Type == condType { return i, cond } } return -1, metav1.Condition{} } // UpdateCondition modifies the specified condition in the status or adds it if not present, // ensuring conditions remain sorted by Type. Returns true if the condition was updated or added. func (status *PolicyCoreStatus) UpdateCondition(newCond metav1.Condition) (changed bool) { idx, existingCond := status.GetCondition(newCond.Type) if idx == -1 { if newCond.LastTransitionTime.IsZero() { newCond.LastTransitionTime = metav1.Now() } status.Conditions = append(status.Conditions, newCond) sort.SliceStable(status.Conditions, func(i, j int) bool { return status.Conditions[i].Type < status.Conditions[j].Type }) return true } else if condSemanticallyChanged(newCond, existingCond) { if newCond.LastTransitionTime.IsZero() { newCond.LastTransitionTime = metav1.Now() } status.Conditions[idx] = newCond // Do not sort in this case, assume that they are in order. return true } return false } func condSemanticallyChanged(newCond, oldCond metav1.Condition) bool { return newCond.Message != oldCond.Message || newCond.Reason != oldCond.Reason || newCond.Status != oldCond.Status }
// Copyright Contributors to the Open Cluster Management project //+kubebuilder:object:generate=true //+groupName=policy.open-cluster-management.io //+kubebuilder:validation:Optional // Package v1beta1 contains API Schema definitions for the policy v1beta1 API group package v1beta1 import ( "context" "encoding/json" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) // PolicyCoreSpec defines fields that policies should implement to be part of the // Open Cluster Management policy framework. The intention is for controllers // to embed this struct in their *Spec definitions. type PolicyCoreSpec struct { // Severity defines how serious the situation is when the policy is not // compliant. The severity might not change the behavior of the policy, but // may be read and used by other tools. Accepted values include: low, // medium, high, and critical. Severity Severity `json:"severity,omitempty"` // RemediationAction indicates what the policy controller should do when the // policy is not compliant. Accepted values include inform, and enforce. // Note that not all policy controllers will attempt to automatically // remediate a policy, even when set to "enforce". RemediationAction RemediationAction `json:"remediationAction,omitempty"` // NamespaceSelector indicates which namespaces on the cluster this policy // should apply to, when the policy applies to namespaced objects. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` } //+kubebuilder:validation:Enum=low;Low;medium;Medium;high;High;critical;Critical type Severity string //+kubebuilder:validation:Enum=Inform;inform;Enforce;enforce type RemediationAction string // IsEnforce is true when the policy controller can attempt to enforce the // policy by remediating it automatically. Note that not all controllers will // support automatic enforcement. func (ra RemediationAction) IsEnforce() bool { return ra == "Enforce" || ra == "enforce" } // IsInform is true when the policy controller should only report whether the // policy is compliant or not and should not perform any actions to attempt // remediation. func (ra RemediationAction) IsInform() bool { return ra == "Inform" || ra == "inform" } type NamespaceSelector struct { *metav1.LabelSelector `json:",inline"` // Include is a list of filepath expressions for namespaces the policy should apply to. Include []NonEmptyString `json:"include,omitempty"` // Exclude is a list of filepath expressions for namespaces the policy should _not_ apply to. Exclude []NonEmptyString `json:"exclude,omitempty"` } // MarshalJSON returns the JSON encoding of the NamespaceSelector. The LabelSelector's matchLabels // and matchExpressions will only be omitted from the encoding if the LabelSelector is nil; if // either of them have been set but are empty, then they will be included in this JSON encoding. func (sel NamespaceSelector) MarshalJSON() ([]byte, error) { if sel.LabelSelector == nil { return json.Marshal(struct { Include []NonEmptyString `json:"include,omitempty"` Exclude []NonEmptyString `json:"exclude,omitempty"` }{ Include: sel.Include, Exclude: sel.Exclude, }) } return json.Marshal(struct { MatchLabels map[string]string `json:"matchLabels"` MatchExpressions []metav1.LabelSelectorRequirement `json:"matchExpressions"` Include []NonEmptyString `json:"include,omitempty"` Exclude []NonEmptyString `json:"exclude,omitempty"` }{ MatchLabels: sel.MatchLabels, MatchExpressions: sel.MatchExpressions, Include: sel.Include, Exclude: sel.Exclude, }) } // GetNamespaces fetches all namespaces in the cluster and returns a list of the // namespaces that match the NamespaceSelector. The client.Reader needs access // for viewing namespaces, like the access given by this kubebuilder tag: // `//+kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch` // // NOTE: unlike Target, an empty NamespaceSelector will match zero namespaces. func (sel NamespaceSelector) GetNamespaces(ctx context.Context, r client.Reader) ([]string, error) { if len(sel.Include) == 0 && sel.LabelSelector == nil { // A somewhat special case of no matches. return []string{}, nil } t := Target{ LabelSelector: sel.LabelSelector, Include: sel.Include, Exclude: sel.Exclude, } matchingNamespaces, err := t.GetMatches(ctx, r, &namespaceResList{}) if err != nil { return nil, err } names := make([]string, len(matchingNamespaces)) for i, ns := range matchingNamespaces { names[i] = ns.GetName() } return names, nil } type namespaceResList struct { corev1.NamespaceList } // Run a compile-time check to ensure namespaceResList implements ResourceList. var _ ResourceList = (*namespaceResList)(nil) func (l *namespaceResList) Items() ([]client.Object, error) { items := make([]client.Object, len(l.NamespaceList.Items)) for i := range l.NamespaceList.Items { items[i] = &l.NamespaceList.Items[i] } return items, nil } //nolint:ireturn // the ResourceList interface requires this interface return func (l *namespaceResList) ObjectList() client.ObjectList { return &l.NamespaceList } //+kubebuilder:validation:MinLength=1 type NonEmptyString string // PolicyCoreStatus defines fields that policies should implement as part of // the Open Cluster Management policy framework. The intent is for controllers // to embed this struct in their *Status definitions. type PolicyCoreStatus struct { // ComplianceState indicates whether the policy is compliant or not. // Accepted values include: Compliant, NonCompliant, and UnknownCompliancy ComplianceState ComplianceState `json:"compliant,omitempty"` // Conditions represent the latest available observations of the object's status. One of these // items should have Type=Compliant and a message detailing the current compliance. Conditions []metav1.Condition `json:"conditions,omitempty"` } //+kubebuilder:validation:Enum=Compliant;NonCompliant;UnknownCompliancy type ComplianceState string const ( // Compliant indicates that the policy controller determined there were no // violations to the policy in the cluster. Compliant ComplianceState = "Compliant" // NonCompliant indicates that the policy controller found an issue in the // cluster that is considered a violation. NonCompliant ComplianceState = "NonCompliant" // UnknownCompliancy indicates that the policy controller could not determine // if the cluster has any violations or not. UnknownCompliancy ComplianceState = "UnknownCompliancy" ) //+kubebuilder:object:root=true //+kubebuilder:subresource:status // PolicyCore is the Schema for the policycores API. This is not a real API, but // is included so that an example CRD can be generated showing the validated // fields and types. type PolicyCore struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec PolicyCoreSpec `json:"spec,omitempty"` Status PolicyCoreStatus `json:"status,omitempty"` } //+kubebuilder:object:generate=false // PolicyLike is an interface that policies should implement so that they can // benefit from some of the general tools in the nucleus. Here is a simple // example implementation, which utilizes the core types of the nucleus: // // import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // import nucleusv1beta1 "open-cluster-management.io/governance-policy-nucleus/api/v1beta1" // // type FakePolicy struct { // metav1.TypeMeta `json:",inline"` // metav1.ObjectMeta `json:"metadata,omitempty"` // Spec nucleusv1beta1.PolicyCoreSpec `json:"spec,omitempty"` // Status nucleusv1beta1.PolicyCoreStatus `json:"status,omitempty"` // } // // func (f FakePolicy) ComplianceState() nucleusv1beta1.ComplianceState { // return f.Status.ComplianceState // } // // func (f FakePolicy) ComplianceMessage() string { // idx, compCond := f.Status.GetCondition("Compliant") // if idx == -1 { // return "" // } // return compCond.Message // } // // func (f FakePolicy) Parent() metav1.OwnerReference { // if len(f.OwnerReferences) == 0 { // return metav1.OwnerReference{} // } // return f.OwnerReferences[0] // } // // func (f FakePolicy) ParentNamespace() string { // return f.Namespace // } type PolicyLike interface { client.Object // The ComplianceState (Compliant/NonCompliant) of the specific policy. ComplianceState() ComplianceState // A human-readable string describing the current state of the policy, and why it is either // Compliant or NonCompliant. ComplianceMessage() string // The "parent" object on this cluster for the specific policy. Generally a Policy, in the API // GroupVersion `policy.open-cluster-management.io/v1`. For namespaced kinds of policies, this // will usually be the owner of the policy. For cluster-scoped policies, this must be stored // some other way. Parent() metav1.OwnerReference // The namespace of the "parent" object. ParentNamespace() string }
// Copyright Contributors to the Open Cluster Management project package v1beta1 import ( "context" "fmt" "path/filepath" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/dynamic" "sigs.k8s.io/controller-runtime/pkg/client" ) type Target struct { *metav1.LabelSelector `json:",inline"` // Namespace is the namespace to restrict the Target to. Can be empty for non-namespaced // objects, or to look in all namespaces. Namespace string `json:"namespace,omitempty"` // Include is a list of filepath expressions to include objects by name. Include []NonEmptyString `json:"include,omitempty"` // Exclude is a list of filepath expressions to include objects by name. Exclude []NonEmptyString `json:"exclude,omitempty"` } //+kubebuilder:object:generate=false // ResourceList is meant to wrap a concrete implementation of a client.ObjectList, giving access // to the items in the list. The methods should be implemented on pointer types. For example, an // implementation of this interface for ConfigMaps might look like: // // import corev1 "k8s.io/api/core/v1" // import "sigs.k8s.io/controller-runtime/pkg/client" // // type configMapResList struct { // corev1.ConfigMapList // } // // func (l *configMapResList) Items() ([]client.Object, error) { // items := make([]client.Object, len(l.ConfigMapList.Items)) // for i := range l.ConfigMapList.Items { // items[i] = &l.ConfigMapList.Items[i] // } // // return items, nil // } // // func (l *configMapResList) ObjectList() client.ObjectList { // return &l.ConfigMapList // } type ResourceList interface { ObjectList() client.ObjectList Items() ([]client.Object, error) } // GetMatches returns a list of resources on the cluster, matched by the Target. The provided // ResourceList should be backed by a client.ObjectList type which must registered in the scheme of // the client.Reader. The items in the provided ResourceList after this method is called will not // necessarily equal the items matched by the Target. The items returned here will be in relatively // the same order as they were in the list returned by the API. // // This method should be used preferentially to `GetMatchesDynamic` because it can leverage the // Reader's cache. // // NOTE: unlike the NamespaceSelector, an empty Target will match *all* resources on the cluster. func (t Target) GetMatches(ctx context.Context, r client.Reader, list ResourceList) ([]client.Object, error) { nonNilSel := t.LabelSelector if nonNilSel == nil { // override it to be empty if it is nil nonNilSel = &metav1.LabelSelector{} } labelSel, err := metav1.LabelSelectorAsSelector(nonNilSel) if err != nil { return nil, err } listOpts := client.ListOptions{ LabelSelector: labelSel, Namespace: t.Namespace, } if err := r.List(ctx, list.ObjectList(), &listOpts); err != nil { return nil, err } items, err := list.Items() if err != nil { return nil, err } return t.matchesByName(items) } // GetMatchesDynamic returns a list of resources on the cluster, matched by the Target. The kind of // the resources is configured by the provided dynamic.ResourceInterface. If the Target specifies a // namespace, this method will limit the namespace of the provided Interface if possible. If the // provided Interface is already namespaced, the namespace of the Interface will be used (possibly // overriding the namespace specified in the Target). The items returned here will be in relatively // the same order as they were in the list returned by the API. // // NOTE: unlike the NamespaceSelector, an empty Target will match *all* resources on the cluster. func (t Target) GetMatchesDynamic( ctx context.Context, iface dynamic.ResourceInterface, ) ([]*unstructured.Unstructured, error) { labelSel, err := metav1.LabelSelectorAsSelector(t.LabelSelector) if err != nil { return nil, err } if t.Namespace != "" { if namespaceableIface, ok := iface.(dynamic.NamespaceableResourceInterface); ok { iface = namespaceableIface.Namespace(t.Namespace) } } objs, err := iface.List(ctx, metav1.ListOptions{LabelSelector: labelSel.String()}) if err != nil { return nil, err } matchedObjs := make([]*unstructured.Unstructured, 0) for _, obj := range objs.Items { obj := obj matched, err := t.match(obj.GetName()) if err != nil { return matchedObjs, err } if matched { matchedObjs = append(matchedObjs, &obj) } } return matchedObjs, nil } // matchesByName filters a list of client.Objects by name, and returns ones that // match the Include and Exclude lists in the Target. The only possible returned // error is a wrapped filepath.ErrBadPattern. func (t Target) matchesByName(items []client.Object) ([]client.Object, error) { matches := make([]client.Object, 0) for _, item := range items { matched, err := t.match(item.GetName()) if err != nil { return nil, err } if matched { matches = append(matches, item) } } return matches, nil } // match returns whether the given name matches the Include and Exclude lists in // the Target. func (t Target) match(name string) (bool, error) { var err error include := len(t.Include) == 0 // include everything if empty/unset for _, includePattern := range t.Include { include, err = filepath.Match(string(includePattern), name) if err != nil { return false, fmt.Errorf("error parsing 'include' pattern '%s': %w", string(includePattern), err) } if include { break } } if !include { return false, nil } for _, excludePattern := range t.Exclude { exclude, err := filepath.Match(string(excludePattern), name) if err != nil { return false, fmt.Errorf("error parsing 'exclude' pattern '%s': %w", string(excludePattern), err) } if exclude { return false, nil } } return true, nil }
// Copyright Contributors to the Open Cluster Management project package compliance import ( "context" "fmt" "time" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" nucleusv1beta1 "open-cluster-management.io/governance-policy-nucleus/api/v1beta1" ) // K8sEmitter is an emitter of Kubernetes events which the policy framework // watches for in order to aggregate and report policy status. type K8sEmitter struct { // Client is a Kubernetes client for the cluster where the compliance events // will be created. Client client.Client // Source contains optional information for where the event comes from. Source corev1.EventSource // Mutators modify the Event after the fields are initially set, but before // it is created on the cluster. They are run in the order they are defined. Mutators []func(corev1.Event) (corev1.Event, error) } // Emit creates the Kubernetes Event on the cluster. It returns an error if the // API call fails. func (e K8sEmitter) Emit(ctx context.Context, pl nucleusv1beta1.PolicyLike) error { _, err := e.EmitEvent(ctx, pl) return err } // EmitEvent creates the Kubernetes Event on the cluster. It returns the Event // that was (at least) attempted to be created, and an error if the API call // fails. func (e K8sEmitter) EmitEvent(ctx context.Context, pol nucleusv1beta1.PolicyLike) (*corev1.Event, error) { plGVK := pol.GetObjectKind().GroupVersionKind() now := time.Now() // This event name matches the convention of recorders from client-go name := fmt.Sprintf("%v.%x", pol.Parent().Name, now.UnixNano()) // The reason must match a pattern looked for by the policy framework var reason string if ns := pol.GetNamespace(); ns != "" { reason = "policy: " + ns + "/" + pol.GetName() } else { reason = "policy: " + pol.GetName() } // The message must begin with the compliance, then should go into a descriptive message message := string(pol.ComplianceState()) + "; " + pol.ComplianceMessage() evType := "Normal" if pol.ComplianceState() != nucleusv1beta1.Compliant { evType = "Warning" } src := corev1.EventSource{ Component: e.Source.Component, Host: e.Source.Host, } // These fields are required for the event to function as expected if src.Component == "" { src.Component = "policy-nucleus-default" } if src.Host == "" { src.Host = "policy-nucleus-default" } event := corev1.Event{ TypeMeta: metav1.TypeMeta{ Kind: "Event", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: pol.ParentNamespace(), Labels: pol.GetLabels(), Annotations: pol.GetAnnotations(), }, InvolvedObject: corev1.ObjectReference{ Kind: pol.Parent().Kind, Namespace: pol.ParentNamespace(), Name: pol.Parent().Name, UID: pol.Parent().UID, APIVersion: pol.Parent().APIVersion, }, Reason: reason, Message: message, Source: src, FirstTimestamp: metav1.NewTime(now), LastTimestamp: metav1.NewTime(now), Count: 1, Type: evType, EventTime: metav1.NewMicroTime(now), Series: nil, Action: "ComplianceStateUpdate", Related: &corev1.ObjectReference{ Kind: plGVK.Kind, Namespace: pol.GetNamespace(), Name: pol.GetName(), UID: pol.GetUID(), APIVersion: plGVK.GroupVersion().String(), ResourceVersion: pol.GetResourceVersion(), }, ReportingController: src.Component, ReportingInstance: src.Host, } for _, mutatorFunc := range e.Mutators { var err error event, err = mutatorFunc(event) if err != nil { return nil, err } } err := e.Client.Create(ctx, &event) return &event, err }
// Copyright Contributors to the Open Cluster Management project package testutils import ( "regexp" "time" "github.com/onsi/ginkgo/v2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" ) // ObjNN returns a NamespacedName for the given Object. func ObjNN(obj client.Object) types.NamespacedName { return types.NamespacedName{ Namespace: obj.GetNamespace(), Name: obj.GetName(), } } // EventFilter filters the given events. Any of the filter parameters can be passed an empty // value to ignore that field when filtering. The msg parameter will be compiled into a regex if // possible. The since parameter checks against the event's EventTime - but if the event does not // specify an EventTime, it will not be filtered out. func EventFilter(events []corev1.Event, evType, msg string, since time.Time) []corev1.Event { msgRegex, err := regexp.Compile(msg) if err != nil { msgRegex = regexp.MustCompile(regexp.QuoteMeta(msg)) } ans := make([]corev1.Event, 0) for i := range events { if evType != "" && events[i].Type != evType { continue } if !msgRegex.MatchString(events[i].Message) { continue } if !events[i].EventTime.IsZero() && since.After(events[i].EventTime.Time) { continue } ans = append(ans, events[i]) } return ans } // RegisterDebugMessage returns a pointer to a string which will be logged at the // end of the test only if the test fails. This is particularly useful for logging // information only once in an Eventually or Consistently function. // Note: using a custom description message may be a better practice overall. func RegisterDebugMessage() *string { var debugMsg string ginkgo.DeferCleanup(func() { if ginkgo.CurrentSpecReport().Failed() { ginkgo.GinkgoWriter.Println(debugMsg) } }) return &debugMsg }
// Copyright Contributors to the Open Cluster Management project package testutils import ( "context" "errors" "fmt" "os" "os/exec" "regexp" "sort" "strings" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" gomegaTypes "github.com/onsi/gomega/types" corev1 "k8s.io/api/core/v1" k8sErrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" "sigs.k8s.io/controller-runtime/pkg/client" ) var ( ErrNoKubeconfg = errors.New("no known kubeconfig: can not run kubectl") ErrKubectl = errors.New("kubectl exited with error") ) type Toolkit struct { client.Client Ctx context.Context //nolint:containedctx // this is for convenience RestConfig *rest.Config KubeconfigPath string EventuallyPoll string EventuallyTimeout string ConsistentlyPoll string ConsistentlyTimeout string } // NewToolkitFromRest returns a toolkit using the given REST config. This is // the preferred way to get a Toolkit instance, to avoid unset fields. // // The toolkit will use a new client built from the REST config and the global // Scheme. The path to a kubeconfig can also be provided, which will be used // for `.Kubectl` calls - if passed an empty string, a temporary kubeconfig // will be created based on the REST config. func NewToolkitFromRest(tkCfg *rest.Config, kubeconfigPath string) (Toolkit, error) { k8sClient, err := client.New(tkCfg, client.Options{Scheme: scheme.Scheme}) if err != nil { return Toolkit{}, err } // Create a temporary kubeconfig if one is not provided. if kubeconfigPath == "" { f, err := os.CreateTemp("", "toolkit-kubeconfig-*") if err != nil { return Toolkit{}, err } contents, err := createKubeconfigFile(tkCfg) if err != nil { return Toolkit{}, err } _, err = f.Write(contents) if err != nil { return Toolkit{}, err } kubeconfigPath = f.Name() } return Toolkit{ Client: k8sClient, Ctx: context.Background(), RestConfig: tkCfg, KubeconfigPath: kubeconfigPath, EventuallyPoll: "100ms", EventuallyTimeout: "1s", ConsistentlyPoll: "100ms", ConsistentlyTimeout: "1s", }, nil } func (tk Toolkit) WithEPoll(eventuallyPoll string) Toolkit { tk.EventuallyPoll = eventuallyPoll return tk } func (tk Toolkit) WithETimeout(eventuallyTimeout string) Toolkit { tk.EventuallyTimeout = eventuallyTimeout return tk } func (tk Toolkit) WithCPoll(consistentlyPoll string) Toolkit { tk.ConsistentlyPoll = consistentlyPoll return tk } func (tk Toolkit) WithCTimeout(consistentlyTimeout string) Toolkit { tk.ConsistentlyTimeout = consistentlyTimeout return tk } func (tk Toolkit) WithCtx(ctx context.Context) Toolkit { tk.Ctx = ctx return tk } // CleanlyCreate creates the given object, and registers a callback to delete the object which // Ginkgo will call at the appropriate time. The error from the `Create` call is returned (so it // can be checked) and the `Delete` callback handles 'NotFound' errors as a success. func (tk Toolkit) CleanlyCreate(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { createErr := tk.Create(ctx, obj, opts...) if createErr == nil { ginkgo.DeferCleanup(func() { ginkgo.GinkgoWriter.Printf("Deleting %v %v/%v\n", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetNamespace(), obj.GetName()) if err := tk.Delete(tk.Ctx, obj); err != nil { if !k8sErrors.IsNotFound(err) { // Use Fail in order to provide a custom message with useful information ginkgo.Fail(fmt.Sprintf("Expected success or 'NotFound' error, got %v", err), 1) } } }) } return createErr } // Create uses the toolkit's client to save the object in the Kubernetes cluster. // The only change in behavior is that it saves and restores the object's type // information, which might otherwise be stripped during the API call. func (tk Toolkit) Create( ctx context.Context, obj client.Object, opts ...client.CreateOption, ) error { savedGVK := obj.GetObjectKind().GroupVersionKind() err := tk.Client.Create(ctx, obj, opts...) obj.GetObjectKind().SetGroupVersionKind(savedGVK) return err } // Patch uses the toolkit's client to patch the object in the Kubernetes cluster. // The only change in behavior is that it saves and restores the object's type // information, which might otherwise be stripped during the API call. func (tk Toolkit) Patch( ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption, ) error { savedGVK := obj.GetObjectKind().GroupVersionKind() err := tk.Client.Patch(ctx, obj, patch, opts...) obj.GetObjectKind().SetGroupVersionKind(savedGVK) return err } // Update uses the toolkit's client to update the object in the Kubernetes cluster. // The only change in behavior is that it saves and restores the object's type // information, which might otherwise be stripped during the API call. func (tk Toolkit) Update( ctx context.Context, obj client.Object, opts ...client.UpdateOption, ) error { savedGVK := obj.GetObjectKind().GroupVersionKind() err := tk.Client.Update(ctx, obj, opts...) obj.GetObjectKind().SetGroupVersionKind(savedGVK) return err } // This regular expression is copied from // https://github.com/open-cluster-management-io/governance-policy-framework-addon/blob/v0.13.0/controllers/statussync/policy_status_sync.go#L220 var compEventRegex = regexp.MustCompile(`(?i)^policy:\s*(?:([a-z0-9.-]+)\s*\/)?(.+)`) //nolint:gocritic // copy // GetComplianceEvents queries the cluster and returns a sorted list of the Kubernetes // compliance events for the given policy. func (tk Toolkit) GetComplianceEvents( ctx context.Context, ns string, parentUID types.UID, templateName string, ) ([]corev1.Event, error) { list := &corev1.EventList{} err := tk.List(ctx, list, client.InNamespace(ns)) if err != nil { return nil, err } events := make([]corev1.Event, 0) for i := range list.Items { event := list.Items[i] if event.InvolvedObject.UID != parentUID { continue } submatch := compEventRegex.FindStringSubmatch(event.Reason) if len(submatch) >= 3 && submatch[2] == templateName { events = append(events, event) } } sort.SliceStable(events, func(i, j int) bool { return events[i].Name < events[j].Name }) return events, nil } // EC runs assertions on asynchronous behavior, both *E*ventually and *C*onsistently, // using the polling and timeout settings of the toolkit. Its usage should feel familiar // to gomega users, simply skip the `.Should(...)` call and put your matcher as the second // parameter here. func (tk Toolkit) EC( actualOrCtx interface{}, matcher gomegaTypes.GomegaMatcher, optionalDescription ...interface{}, ) bool { ginkgo.GinkgoHelper() // Add where the failure occurred to the description eDesc := make([]interface{}, 1) cDesc := make([]interface{}, 1) //nolint:forcetypeassert // gomega makes the same unchecked assertions switch len(optionalDescription) { case 0: eDesc[0] = "Failed in Eventually" cDesc[0] = "Failed in Consistently" case 1: if origDescFunc, ok := optionalDescription[0].(func() string); ok { eDesc[0] = func() string { return "Failed in Eventually; " + origDescFunc() } cDesc[0] = func() string { return "Failed in Consistently; " + origDescFunc() } } else { eDesc[0] = "Failed in Eventually; " + optionalDescription[0].(string) cDesc[0] = "Failed in Consistently; " + optionalDescription[0].(string) } default: eDesc[0] = "Failed in Eventually; " + optionalDescription[0].(string) eDesc = append(eDesc, optionalDescription[1:]...) //nolint:makezero // appending is definitely correct cDesc[0] = "Failed in Consistently; " + optionalDescription[0].(string) cDesc = append(cDesc, optionalDescription[1:]...) //nolint:makezero // appending is definitely correct } gomega.Eventually( actualOrCtx, tk.EventuallyTimeout, tk.EventuallyPoll, ).Should(matcher, eDesc...) return gomega.Consistently( actualOrCtx, tk.ConsistentlyTimeout, tk.ConsistentlyPoll, ).Should(matcher, cDesc...) } func (tk *Toolkit) Kubectl(args ...string) (string, error) { addKubeconfig := true for _, arg := range args { if strings.HasPrefix(arg, "--kubeconfig") { addKubeconfig = false break } } if addKubeconfig { if tk.KubeconfigPath == "" { return "", ErrNoKubeconfg } args = append([]string{"--kubeconfig=" + tk.KubeconfigPath}, args...) } output, err := exec.Command("kubectl", args...).Output() var exitError *exec.ExitError if errors.As(err, &exitError) { if exitError.Stderr == nil { return string(output), err } return string(output), fmt.Errorf("%w: %s", ErrKubectl, exitError.Stderr) } return string(output), err } func createKubeconfigFile(cfg *rest.Config) ([]byte, error) { identifier := "toolkit" kubeconfig := api.NewConfig() cluster := api.NewCluster() cluster.Server = cfg.Host cluster.CertificateAuthorityData = cfg.CAData kubeconfig.Clusters[identifier] = cluster authInfo := api.NewAuthInfo() authInfo.ClientCertificateData = cfg.CertData authInfo.ClientKeyData = cfg.KeyData kubeconfig.AuthInfos[identifier] = authInfo apiContext := api.NewContext() apiContext.Cluster = identifier apiContext.AuthInfo = identifier kubeconfig.Contexts[identifier] = apiContext kubeconfig.CurrentContext = identifier return clientcmd.Write(*kubeconfig) }
// Copyright Contributors to the Open Cluster Management project package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" nucleusv1beta1 "open-cluster-management.io/governance-policy-nucleus/api/v1beta1" ) // FakePolicySpec defines the desired state of FakePolicy. type FakePolicySpec struct { nucleusv1beta1.PolicyCoreSpec `json:",inline"` // TargetConfigMaps defines the ConfigMaps which should be examined by this policy TargetConfigMaps nucleusv1beta1.Target `json:"targetConfigMaps,omitempty"` // DesiredConfigMapName - if this name is not found, the policy will report a violation DesiredConfigMapName string `json:"desiredConfigMapName,omitempty"` // EventAnnotation - if provided, this value will be annotated on the compliance // events, under the "policy.open-cluster-management.io/test" key EventAnnotation string `json:"eventAnnotation,omitempty"` // TargetUsingReflection defines whether to use reflection to find the ConfigMaps TargetUsingReflection bool `json:"targetUsingReflection,omitempty"` } //+kubebuilder:validation:Optional // FakePolicyStatus defines the observed state of FakePolicy. type FakePolicyStatus struct { nucleusv1beta1.PolicyCoreStatus `json:",inline"` // SelectionComplete stores whether the selection has been completed SelectionComplete bool `json:"selectionComplete"` } //+kubebuilder:object:root=true //+kubebuilder:subresource:status // FakePolicy is the Schema for the fakepolicies API. type FakePolicy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec FakePolicySpec `json:"spec,omitempty"` Status FakePolicyStatus `json:"status,omitempty"` } // Run a compile-time check to ensure FakePolicy implements PolicyLike. var _ nucleusv1beta1.PolicyLike = (*FakePolicy)(nil) func (f FakePolicy) ComplianceState() nucleusv1beta1.ComplianceState { return f.Status.ComplianceState } func (f FakePolicy) ComplianceMessage() string { idx, compCond := f.Status.GetCondition("Compliant") if idx == -1 { return "" } return compCond.Message } func (f FakePolicy) Parent() metav1.OwnerReference { if len(f.OwnerReferences) == 0 { return metav1.OwnerReference{} } return f.OwnerReferences[0] } func (f FakePolicy) ParentNamespace() string { return f.Namespace } //+kubebuilder:object:root=true // FakePolicyList contains a list of FakePolicy. type FakePolicyList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []FakePolicy `json:"items"` } func init() { SchemeBuilder.Register(&FakePolicy{}, &FakePolicyList{}) }
// Copyright Contributors to the Open Cluster Management project package controllers import ( "context" "fmt" "slices" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" nucleusv1alpha1 "open-cluster-management.io/governance-policy-nucleus/api/v1alpha1" nucleusv1beta1 "open-cluster-management.io/governance-policy-nucleus/api/v1beta1" "open-cluster-management.io/governance-policy-nucleus/pkg/compliance" fakev1beta1 "open-cluster-management.io/governance-policy-nucleus/test/fakepolicy/api/v1beta1" ) // FakePolicyReconciler reconciles a FakePolicy object. // NOTE: it does not watch anything other than FakePolcies, so it will not react // to other changes in the cluster - update something on the policy to make it // re-reconcile. type FakePolicyReconciler struct { client.Client Scheme *runtime.Scheme DynamicClient *dynamic.DynamicClient } const mutatorAnno string = "policy.open-cluster-management.io/test" // Usual RBAC for fakepolicy: //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=fakepolicies,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=fakepolicies/status,verbs=get;update;patch //+kubebuilder:rbac:groups=policy.open-cluster-management.io,resources=fakepolicies/finalizers,verbs=update // Nucleus RBAC for namespaceSelector: //+kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch // RBAC for this fakepolicy's capabilities: //+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch func (r *FakePolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { logr := log.FromContext(ctx) logr.Info("Starting a reconcile") policy := &fakev1beta1.FakePolicy{} if err := r.Get(ctx, req.NamespacedName, policy); err != nil { if errors.IsNotFound(err) { logr.Info("Request object not found, probably deleted") return ctrl.Result{}, nil } logr.Error(err, "Failed to get FakePolicy") return ctrl.Result{}, err } cmFound := r.doSelections(ctx, policy) policy.Status.SelectionComplete = true complianceCondition := metav1.Condition{ Type: "Compliant", Status: metav1.ConditionTrue, Reason: "Found", Message: "the desired configmap was found", } policy.Status.ComplianceState = nucleusv1beta1.Compliant if !cmFound { complianceCondition.Status = metav1.ConditionFalse complianceCondition.Reason = "NotFound" complianceCondition.Message = "the desired configmap was missing" policy.Status.ComplianceState = nucleusv1beta1.NonCompliant } changed := policy.Status.UpdateCondition(complianceCondition) if !changed { logr.Info("No change; no compliance event to emit") return ctrl.Result{}, nil } if err := r.Status().Update(ctx, policy); err != nil { logr.Error(err, "Failed to update status") return ctrl.Result{}, err } emitter := compliance.K8sEmitter{ Client: r.Client, } if policy.Spec.EventAnnotation != "" { emitter.Mutators = []func(inpEv corev1.Event) (corev1.Event, error){ func(inpEv corev1.Event) (corev1.Event, error) { if inpEv.Annotations == nil { inpEv.Annotations = make(map[string]string) } inpEv.Annotations[mutatorAnno] = policy.Spec.EventAnnotation return inpEv, nil }, } // it's cheating a bit to put this here but it's helpful to test that // the events work both when this is and when this is not specified emitter.Source = corev1.EventSource{ Component: policy.Spec.EventAnnotation, Host: policy.Spec.EventAnnotation, } } ev, err := emitter.EmitEvent(ctx, policy) logr.Info("Event emitted", "eventName", ev.Name) return ctrl.Result{}, err } func (r *FakePolicyReconciler) doSelections( ctx context.Context, policy *fakev1beta1.FakePolicy, ) (configMapFound bool) { logr := log.FromContext(ctx) nsCond := metav1.Condition{ Type: "NamespaceSelection", Status: metav1.ConditionTrue, Reason: "Done", } selectedNamespaces, err := policy.Spec.NamespaceSelector.GetNamespaces(ctx, r.Client) if err != nil { logr.Error(err, "Failed to GetNamespaces using NamespaceSelector", "selector", policy.Spec.NamespaceSelector) nsCond.Status = metav1.ConditionFalse nsCond.Reason = "ErrorSelecting" nsCond.Message = err.Error() } else { slices.Sort(selectedNamespaces) nsCond.Message = fmt.Sprintf("%v", selectedNamespaces) } policy.Status.UpdateCondition(nsCond) dynCond := metav1.Condition{ Type: "DynamicSelection", Status: metav1.ConditionTrue, Reason: "Done", } cmIface := r.DynamicClient.Resource(schema.GroupVersionResource{ Group: "", Version: "v1", Resource: "configmaps", }) dynamicMatchedCMs, err := policy.Spec.TargetConfigMaps.GetMatchesDynamic(ctx, cmIface) if err != nil { logr.Error(err, "Failed to GetMatchesDynamic for the TargetConfigMaps", "target", policy.Spec.TargetConfigMaps) dynCond.Status = metav1.ConditionFalse dynCond.Reason = "ErrorDynamicMatching" dynCond.Message = err.Error() } else { dynamicCMs := make([]string, len(dynamicMatchedCMs)) for i, cm := range dynamicMatchedCMs { dynamicCMs[i] = cm.GetNamespace() + "/" + cm.GetName() } slices.Sort(dynamicCMs) dynCond.Message = fmt.Sprintf("%v", dynamicCMs) } policy.Status.UpdateCondition(dynCond) clientCond := metav1.Condition{ Type: "ClientSelection", Status: metav1.ConditionTrue, Reason: "Done", } var list nucleusv1beta1.ResourceList if policy.Spec.TargetUsingReflection { list = &nucleusv1alpha1.ReflectiveResourceList{ClientList: &corev1.ConfigMapList{}} } else { list = &configMapResList{} } clientMatchedCMs, err := policy.Spec.TargetConfigMaps.GetMatches(ctx, r.Client, list) if err != nil { logr.Error(err, "Failed to GetMatches for the TargetConfigMaps", "target", policy.Spec.TargetConfigMaps) clientCond.Status = metav1.ConditionFalse clientCond.Reason = "ErrorMatching" clientCond.Message = err.Error() } else { clientCMs := make([]string, len(clientMatchedCMs)) for i, cm := range dynamicMatchedCMs { clientCMs[i] = cm.GetNamespace() + "/" + cm.GetName() if cm.GetName() == policy.Spec.DesiredConfigMapName { configMapFound = true } } slices.Sort(clientCMs) clientCond.Message = fmt.Sprintf("%v", clientCMs) } policy.Status.UpdateCondition(clientCond) return configMapFound } type configMapResList struct { corev1.ConfigMapList } // Run a compile-time check to ensure configMapResList implements ResourceList. var _ nucleusv1beta1.ResourceList = (*configMapResList)(nil) func (l *configMapResList) Items() ([]client.Object, error) { items := make([]client.Object, len(l.ConfigMapList.Items)) for i := range l.ConfigMapList.Items { items[i] = &l.ConfigMapList.Items[i] } return items, nil } //nolint:ireturn // the ResourceList interface requires this interface return func (l *configMapResList) ObjectList() client.ObjectList { return &l.ConfigMapList } // SetupWithManager sets up the controller with the Manager. func (r *FakePolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&fakev1beta1.FakePolicy{}). Complete(r) }
// Copyright Contributors to the Open Cluster Management project package fakepolicy import ( "context" "flag" "github.com/go-logr/zapr" "github.com/stolostron/go-log-utils/zaputil" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/dynamic" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/metrics/server" fakev1beta1 "open-cluster-management.io/governance-policy-nucleus/test/fakepolicy/api/v1beta1" "open-cluster-management.io/governance-policy-nucleus/test/fakepolicy/controllers" ) //nolint:gochecknoglobals // used in inits var scheme = runtime.NewScheme() func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme utilruntime.Must(fakev1beta1.AddToScheme(scheme)) } func Run(parentCtx context.Context, cfg *rest.Config) error { zflags := zaputil.NewFlagConfig() zflags.Bind(flag.CommandLine) klog.InitFlags(flag.CommandLine) var metricsAddr string var enableLeaderElection bool var probeAddr string flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metric endpoint binds to. Disabled by default, but conventionally :8080") flag.StringVar(&probeAddr, "health-probe-bind-address", "0", "The address the probe endpoint binds to. Disabled by default, but conventionally :8081") flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") flag.Parse() ctrlZap, err := zflags.BuildForCtrl() if err != nil { return err } ctrl.SetLogger(zapr.NewLogger(ctrlZap)) setupLog := ctrl.Log.WithName("setup") klogZap, err := zaputil.BuildForKlog(zflags.GetConfig(), flag.CommandLine) if err != nil { return err } klog.SetLogger(zapr.NewLogger(klogZap).WithName("klog")) if cfg == nil { var err error cfg, err = ctrl.GetConfig() if err != nil { setupLog.Error(err, "unable to get Kubernetes config") return err } } mgr, err := ctrl.NewManager(cfg, ctrl.Options{ Scheme: scheme, Metrics: server.Options{BindAddress: metricsAddr}, HealthProbeBindAddress: probeAddr, LeaderElection: enableLeaderElection, LeaderElectionID: "8b5e65ab.open-cluster-management.io", }) if err != nil { setupLog.Error(err, "unable to start manager") return err } dynClient, err := dynamic.NewForConfig(cfg) if err != nil { setupLog.Error(err, "unable to create dynamic client") return err } if err = (&controllers.FakePolicyReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), DynamicClient: dynClient, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "FakePolicy") return err } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up health check") return err } if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { setupLog.Error(err, "unable to set up ready check") return err } setupLog.Info("starting manager") managerCtx, cancel := context.WithCancel(parentCtx) go func() { // It would be nicer if this could take a parent context, // but this will work to cancel the manager on those signals. <-ctrl.SetupSignalHandler().Done() cancel() }() if err := mgr.Start(managerCtx); err != nil { setupLog.Error(err, "problem running manager") return err } return nil }
// Copyright Contributors to the Open Cluster Management project package utils import ( "embed" "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/yaml" nucleusv1beta1 "open-cluster-management.io/governance-policy-nucleus/api/v1beta1" fakev1beta1 "open-cluster-management.io/governance-policy-nucleus/test/fakepolicy/api/v1beta1" ) //go:embed testdata/* var testfiles embed.FS // Unmarshals the given YAML file in testdata/ into an unstructured.Unstructured. func FromTestdata(name string) unstructured.Unstructured { objYAML, err := testfiles.ReadFile("testdata/" + name) gomega.ExpectWithOffset(1, err).ToNot(gomega.HaveOccurred()) m := make(map[string]interface{}) gomega.ExpectWithOffset(1, yaml.UnmarshalStrict(objYAML, &m)).To(gomega.Succeed()) return unstructured.Unstructured{Object: m} } func SampleFakePolicy() fakev1beta1.FakePolicy { return fakev1beta1.FakePolicy{ TypeMeta: metav1.TypeMeta{ APIVersion: "policy.open-cluster-management.io/v1beta1", Kind: "FakePolicy", }, ObjectMeta: metav1.ObjectMeta{ Name: "fakepolicy-sample", Namespace: "default", }, Spec: fakev1beta1.FakePolicySpec{ PolicyCoreSpec: nucleusv1beta1.PolicyCoreSpec{ Severity: "low", RemediationAction: "inform", NamespaceSelector: nucleusv1beta1.NamespaceSelector{ LabelSelector: &metav1.LabelSelector{}, Include: []nucleusv1beta1.NonEmptyString{"*"}, Exclude: []nucleusv1beta1.NonEmptyString{"kube-*"}, }, }, TargetConfigMaps: nucleusv1beta1.Target{ LabelSelector: &metav1.LabelSelector{ MatchExpressions: []metav1.LabelSelectorRequirement{{ Key: "sample", Operator: metav1.LabelSelectorOpExists, }}, }, }, }, } }