kanidmd_lib/server/
migrations.rs

1use crate::prelude::*;
2
3use crate::migration_data;
4use kanidm_proto::internal::{
5    DomainUpgradeCheckItem as ProtoDomainUpgradeCheckItem,
6    DomainUpgradeCheckReport as ProtoDomainUpgradeCheckReport,
7    DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus,
8};
9
10use super::ServerPhase;
11
12impl QueryServer {
13    #[instrument(level = "info", name = "system_initialisation", skip_all)]
14    pub async fn initialise_helper(
15        &self,
16        ts: Duration,
17        domain_target_level: DomainVersion,
18    ) -> Result<(), OperationError> {
19        // We need to perform this in a single transaction pass to prevent tainting
20        // databases during upgrades.
21        let mut write_txn = self.write(ts).await?;
22
23        // Check our database version - attempt to do an initial indexing
24        // based on the in memory configuration. This ONLY triggers ONCE on
25        // the very first run of the instance when the DB in newely created.
26        write_txn.upgrade_reindex(SYSTEM_INDEX_VERSION)?;
27
28        // Because we init the schema here, and commit, this reloads meaning
29        // that the on-disk index meta has been loaded, so our subsequent
30        // migrations will be correctly indexed.
31        //
32        // Remember, that this would normally mean that it's possible for schema
33        // to be mis-indexed (IE we index the new schemas here before we read
34        // the schema to tell us what's indexed), but because we have the in
35        // mem schema that defines how schema is structured, and this is all
36        // marked "system", then we won't have an issue here.
37        write_txn
38            .initialise_schema_core()
39            .and_then(|_| write_txn.reload())?;
40
41        // This is what tells us if the domain entry existed before or not. This
42        // is now the primary method of migrations and version detection.
43        let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) {
44            Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)),
45            Err(OperationError::NoMatchingEntries) => Ok(0),
46            Err(r) => Err(r),
47        }?;
48
49        debug!(?db_domain_version, "Before setting internal domain info");
50
51        if db_domain_version == 0 {
52            // This is here to catch when we increase domain levels but didn't create the migration
53            // hooks. If this fails it probably means you need to add another migration hook
54            // in the above.
55            debug_assert!(domain_target_level <= DOMAIN_MAX_LEVEL);
56
57            // No domain info was present, so neither was the rest of the IDM. Bring up the
58            // full IDM here.
59            match domain_target_level {
60                DOMAIN_LEVEL_10 => write_txn.migrate_domain_9_to_10()?,
61                DOMAIN_LEVEL_11 => write_txn.migrate_domain_10_to_11()?,
62                DOMAIN_LEVEL_12 => write_txn.migrate_domain_11_to_12()?,
63                DOMAIN_LEVEL_13 => write_txn.migrate_domain_12_to_13()?,
64                DOMAIN_LEVEL_14 => write_txn.migrate_domain_13_to_14()?,
65                _ => {
66                    error!("Invalid requested domain target level for server bootstrap");
67                    debug_assert!(false);
68                    return Err(OperationError::MG0009InvalidTargetLevelForBootstrap);
69                }
70            }
71
72            write_txn
73                .internal_apply_domain_migration(domain_target_level)
74                .map(|()| {
75                    warn!(
76                        "Domain level has been bootstrapped to {}",
77                        domain_target_level
78                    );
79                })?;
80        }
81
82        // These steps apply both to bootstrapping and normal startup, since we now have
83        // a DB with data populated in either path.
84
85        // Domain info is now present, so we need to reflect that in our server
86        // domain structures. If we don't do this, the in memory domain level
87        // is stuck at 0 which can confuse init domain info below.
88        //
89        // This also is where the former domain taint flag will be loaded to
90        // d_info so that if the *previous* execution of the database was
91        // a devel version, we'll still trigger the forced remigration in
92        // in the case that we are moving from dev -> stable.
93        write_txn.force_domain_reload();
94
95        write_txn.reload()?;
96
97        // Indicate the schema is now ready, which allows dyngroups to work when they
98        // are created in the next phase of migrations.
99        write_txn.set_phase(ServerPhase::SchemaReady);
100
101        // #2756 - if we *aren't* creating the base IDM entries, then we
102        // need to force dyn groups to reload since we're now at schema
103        // ready. This is done indirectly by ... reloading the schema again.
104        //
105        // This is because dyngroups don't load until server phase >= schemaready
106        // and the reload path for these is either a change in the dyngroup entry
107        // itself or a change to schema reloading. Since we aren't changing the
108        // dyngroup here, we have to go via the schema reload path.
109        write_txn.force_schema_reload();
110
111        // Reload as init idm affects access controls.
112        write_txn.reload()?;
113
114        // Domain info is now ready and reloaded, we can proceed.
115        write_txn.set_phase(ServerPhase::DomainInfoReady);
116
117        // This is the start of domain info related migrations which we will need in future
118        // to handle replication. Due to the access control rework, and the addition of "managed by"
119        // syntax, we need to ensure both nodes "fence" replication from each other. We do this
120        // by changing domain infos to be incompatible during this phase.
121
122        // The reloads will have populated this structure now.
123        let domain_info_version = write_txn.get_domain_version();
124        let domain_patch_level = write_txn.get_domain_patch_level();
125        let domain_development_taint = write_txn.get_domain_development_taint();
126        debug!(
127            ?db_domain_version,
128            ?domain_patch_level,
129            ?domain_development_taint,
130            "After setting internal domain info"
131        );
132
133        let mut reload_required = false;
134
135        // If the database domain info is a lower version than our target level, we reload.
136        if domain_info_version < domain_target_level {
137            if (domain_target_level - domain_info_version) > DOMAIN_MIGRATION_SKIPS {
138                error!(
139                    "UNABLE TO PROCEED. You are attempting a skip update which is NOT SUPPORTED."
140                );
141                error!("For more see: https://kanidm.github.io/kanidm/stable/support.html#upgrade-policy and https://kanidm.github.io/kanidm/stable/server_updates.html");
142                error!(domain_previous_version = ?domain_info_version, domain_target_version = ?domain_target_level, domain_migration_steps_limit = ?DOMAIN_MIGRATION_SKIPS);
143                return Err(OperationError::MG0008SkipUpgradeAttempted);
144            }
145
146            write_txn
147                .internal_apply_domain_migration(domain_target_level)
148                .map(|()| {
149                    warn!("Domain level has been raised to {}", domain_target_level);
150                })?;
151            // Reload if anything in migrations requires it - this triggers the domain migrations
152            // which in turn can trigger schema reloads etc. If the server was just brought up
153            // then we don't need the extra reload since we are already at the correct
154            // version of the server, and this call to set the target level is just for persistence
155            // of the value.
156            if domain_info_version != 0 {
157                reload_required = true;
158            }
159        } else if domain_development_taint {
160            // This forces pre-release versions to re-migrate each start up. This solves
161            // the domain-version-sprawl issue so that during a development cycle we can
162            // do a single domain version bump, and continue to extend the migrations
163            // within that release cycle to contain what we require.
164            //
165            // If this is a pre-release build
166            // AND
167            // we are NOT in a test environment
168            // AND
169            // We did not already need a version migration as above
170            write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?;
171
172            reload_required = true;
173        }
174
175        // If we are new enough to support patches, and we are lower than the target patch level
176        // then a reload will be applied after we raise the patch level.
177        if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
178            write_txn
179                .internal_modify_uuid(
180                    UUID_DOMAIN_INFO,
181                    &ModifyList::new_purge_and_set(
182                        Attribute::PatchLevel,
183                        Value::new_uint32(DOMAIN_TGT_PATCH_LEVEL),
184                    ),
185                )
186                .map(|()| {
187                    warn!(
188                        "Domain patch level has been raised to {}",
189                        domain_patch_level
190                    );
191                })?;
192
193            reload_required = true;
194        };
195
196        // Execute whatever operations we have batched up and ready to go. This is needed
197        // to preserve ordering of the operations - if we reloaded after a remigrate then
198        // we would have skipped the patch level fix which needs to have occurred *first*.
199        if reload_required {
200            write_txn.reload()?;
201        }
202
203        // Now set the db/domain devel taint flag to match our current release status
204        // if it changes. This is what breaks the cycle of db taint from dev -> stable
205        let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some();
206        if current_devel_flag {
207            warn!("Domain Development Taint mode is enabled");
208        }
209        if domain_development_taint != current_devel_flag {
210            write_txn.internal_modify_uuid(
211                UUID_DOMAIN_INFO,
212                &ModifyList::new_purge_and_set(
213                    Attribute::DomainDevelopmentTaint,
214                    Value::Bool(current_devel_flag),
215                ),
216            )?;
217        }
218
219        // We are ready to run
220        write_txn.set_phase(ServerPhase::Running);
221
222        // Commit all changes, this also triggers the final reload, this should be a no-op
223        // since we already did all the needed loads above.
224        write_txn.commit()?;
225
226        debug!("Database version check and migrations success! ☀️  ");
227        Ok(())
228    }
229}
230
231impl QueryServerWriteTransaction<'_> {
232    /// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active
233    /// level.
234    pub(crate) fn internal_apply_domain_migration(
235        &mut self,
236        to_level: u32,
237    ) -> Result<(), OperationError> {
238        assert!(to_level > self.get_domain_version());
239        self.internal_modify_uuid(
240            UUID_DOMAIN_INFO,
241            &ModifyList::new_purge_and_set(Attribute::Version, Value::new_uint32(to_level)),
242        )
243        .and_then(|()| self.reload())
244    }
245
246    fn internal_migrate_or_create_batch(
247        &mut self,
248        msg: &str,
249        entries: Vec<EntryInitNew>,
250    ) -> Result<(), OperationError> {
251        let r: Result<(), _> = entries
252            .into_iter()
253            .try_for_each(|entry| self.internal_migrate_or_create(entry));
254
255        if let Err(err) = r {
256            error!(?err, msg);
257            debug_assert!(false);
258        }
259
260        Ok(())
261    }
262
263    #[instrument(level = "debug", skip_all)]
264    /// - If the thing exists:
265    ///   - Ensure the set of attributes match and are present
266    ///     (but don't delete multivalue, or extended attributes in the situation.
267    /// - If not:
268    ///   - Create the entry
269    ///
270    /// This will extra classes an attributes alone!
271    ///
272    /// NOTE: `gen_modlist*` IS schema aware and will handle multivalue correctly!
273    fn internal_migrate_or_create(
274        &mut self,
275        e: Entry<EntryInit, EntryNew>,
276    ) -> Result<(), OperationError> {
277        // NOTE: Ignoring an attribute only affects the migration phase, not create.
278        self.internal_migrate_or_create_ignore_attrs(
279            e,
280            &[
281                // If the credential type is present, we don't want to touch it.
282                Attribute::CredentialTypeMinimum,
283            ],
284        )
285    }
286
287    #[instrument(level = "debug", skip_all)]
288    fn internal_delete_batch(
289        &mut self,
290        msg: &str,
291        entries: Vec<Uuid>,
292    ) -> Result<(), OperationError> {
293        let filter = entries
294            .into_iter()
295            .map(|uuid| f_eq(Attribute::Uuid, PartialValue::Uuid(uuid)))
296            .collect();
297
298        let filter = filter_all!(f_or(filter));
299
300        let result = self.internal_delete(&filter);
301
302        match result {
303            Ok(_) | Err(OperationError::NoMatchingEntries) => Ok(()),
304            Err(err) => {
305                error!(?err, msg);
306                Err(err)
307            }
308        }
309    }
310
311    /// This is the same as [QueryServerWriteTransaction::internal_migrate_or_create]
312    /// but it will ignore the specified list of attributes, so that if an admin has
313    /// modified those values then we don't stomp them.
314    #[instrument(level = "trace", skip_all)]
315    fn internal_migrate_or_create_ignore_attrs(
316        &mut self,
317        mut e: Entry<EntryInit, EntryNew>,
318        attrs: &[Attribute],
319    ) -> Result<(), OperationError> {
320        trace!("operating on {:?}", e.get_uuid());
321
322        let Some(filt) = e.filter_from_attrs(&[Attribute::Uuid]) else {
323            return Err(OperationError::FilterGeneration);
324        };
325
326        trace!("search {:?}", filt);
327
328        let results = self.internal_search(filt.clone())?;
329
330        if results.is_empty() {
331            // It does not exist. Create it.
332            self.internal_create(vec![e])
333        } else if results.len() == 1 {
334            // For each ignored attr, we remove it from entry.
335            for attr in attrs.iter() {
336                e.remove_ava(attr);
337            }
338
339            // If the thing is subset, pass
340            match e.gen_modlist_assert(&self.schema) {
341                Ok(modlist) => {
342                    // Apply to &results[0]
343                    trace!(?modlist);
344                    self.internal_modify(&filt, &modlist)
345                }
346                Err(e) => Err(OperationError::SchemaViolation(e)),
347            }
348        } else {
349            admin_error!(
350                "Invalid Result Set - Expected One Entry for {:?} - {:?}",
351                filt,
352                results
353            );
354            Err(OperationError::InvalidDbState)
355        }
356    }
357
358    // Commented as an example of patch application
359    /*
360    /// Patch Application - This triggers a one-shot fixup task for issue #3178
361    /// to force access controls to re-migrate in existing databases so that they're
362    /// content matches expected values.
363    #[instrument(level = "info", skip_all)]
364    pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> {
365        admin_warn!("applying domain patch 2.");
366
367        debug_assert!(*self.phase >= ServerPhase::SchemaReady);
368
369        let idm_data = migration_data::dl9::phase_7_builtin_access_control_profiles();
370
371        idm_data
372            .into_iter()
373            .try_for_each(|entry| self.internal_migrate_or_create(entry))
374            .map_err(|err| {
375                error!(?err, "migrate_domain_patch_level_2 -> Error");
376                err
377            })?;
378
379        self.reload()?;
380
381        Ok(())
382    }
383    */
384
385    /// Migration domain level 9 to 10 (1.6.0)
386    #[instrument(level = "info", skip_all)]
387    pub(crate) fn migrate_domain_9_to_10(&mut self) -> Result<(), OperationError> {
388        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
389            error!("Unable to raise domain level from 9 to 10.");
390            return Err(OperationError::MG0004DomainLevelInDevelopment);
391        }
392
393        // =========== Apply changes ==============
394        self.internal_migrate_or_create_batch(
395            "phase 1 - schema attrs",
396            migration_data::dl10::phase_1_schema_attrs(),
397        )?;
398
399        self.internal_migrate_or_create_batch(
400            "phase 2 - schema classes",
401            migration_data::dl10::phase_2_schema_classes(),
402        )?;
403
404        // Reload for the new schema.
405        self.reload()?;
406
407        // Since we just loaded in a ton of schema, lets reindex it in case we added
408        // new indexes, or this is a bootstrap and we have no indexes yet.
409        self.reindex(false)?;
410
411        // Set Phase
412        // Indicate the schema is now ready, which allows dyngroups to work when they
413        // are created in the next phase of migrations.
414        self.set_phase(ServerPhase::SchemaReady);
415
416        self.internal_migrate_or_create_batch(
417            "phase 3 - key provider",
418            migration_data::dl10::phase_3_key_provider(),
419        )?;
420
421        // Reload for the new key providers
422        self.reload()?;
423
424        self.internal_migrate_or_create_batch(
425            "phase 4 - system entries",
426            migration_data::dl10::phase_4_system_entries(),
427        )?;
428
429        // Reload for the new system entries
430        self.reload()?;
431
432        // Domain info is now ready and reloaded, we can proceed.
433        self.set_phase(ServerPhase::DomainInfoReady);
434
435        // Bring up the IDM entries.
436        self.internal_migrate_or_create_batch(
437            "phase 5 - builtin admin entries",
438            migration_data::dl10::phase_5_builtin_admin_entries()?,
439        )?;
440
441        self.internal_migrate_or_create_batch(
442            "phase 6 - builtin not admin entries",
443            migration_data::dl10::phase_6_builtin_non_admin_entries()?,
444        )?;
445
446        self.internal_migrate_or_create_batch(
447            "phase 7 - builtin access control profiles",
448            migration_data::dl10::phase_7_builtin_access_control_profiles(),
449        )?;
450
451        self.reload()?;
452
453        // =========== OAuth2 Cryptography Migration ==============
454
455        debug!("START OAUTH2 MIGRATION");
456
457        // Load all the OAuth2 providers.
458        let all_oauth2_rs_entries = self.internal_search(filter!(f_eq(
459            Attribute::Class,
460            EntryClass::OAuth2ResourceServer.into()
461        )))?;
462
463        if !all_oauth2_rs_entries.is_empty() {
464            let entry_iter = all_oauth2_rs_entries.iter().map(|tgt_entry| {
465                let entry_uuid = tgt_entry.get_uuid();
466                let mut modlist = ModifyList::new_list(vec![
467                    Modify::Present(Attribute::Class, EntryClass::KeyObject.to_value()),
468                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJwtEs256.to_value()),
469                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJweA128GCM.to_value()),
470                    // Delete the fernet key, rs256 if any, and the es256 key
471                    Modify::Purged(Attribute::OAuth2RsTokenKey),
472                    Modify::Purged(Attribute::Es256PrivateKeyDer),
473                    Modify::Purged(Attribute::Rs256PrivateKeyDer),
474                ]);
475
476                trace!(?tgt_entry);
477
478                // Import the ES256 Key
479                if let Some(es256_private_der) =
480                    tgt_entry.get_ava_single_private_binary(Attribute::Es256PrivateKeyDer)
481                {
482                    modlist.push_mod(Modify::Present(
483                        Attribute::KeyActionImportJwsEs256,
484                        Value::PrivateBinary(es256_private_der.to_vec()),
485                    ))
486                } else {
487                    warn!("Unable to migrate es256 key");
488                }
489
490                let has_rs256 = tgt_entry
491                    .get_ava_single_bool(Attribute::OAuth2JwtLegacyCryptoEnable)
492                    .unwrap_or(false);
493
494                // If there is an rs256 key, import it.
495                // Import the RS256 Key
496                if has_rs256 {
497                    modlist.push_mod(Modify::Present(
498                        Attribute::Class,
499                        EntryClass::KeyObjectJwtEs256.to_value(),
500                    ));
501
502                    if let Some(rs256_private_der) =
503                        tgt_entry.get_ava_single_private_binary(Attribute::Rs256PrivateKeyDer)
504                    {
505                        modlist.push_mod(Modify::Present(
506                            Attribute::KeyActionImportJwsRs256,
507                            Value::PrivateBinary(rs256_private_der.to_vec()),
508                        ))
509                    } else {
510                        warn!("Unable to migrate rs256 key");
511                    }
512                }
513
514                (entry_uuid, modlist)
515            });
516
517            self.internal_batch_modify(entry_iter)?;
518        }
519
520        // Reload for new keys, and updated oauth2
521        self.reload()?;
522
523        // Done!
524
525        Ok(())
526    }
527
528    /// Migration domain level 10 to 11 (1.7.0)
529    #[instrument(level = "info", skip_all)]
530    pub(crate) fn migrate_domain_10_to_11(&mut self) -> Result<(), OperationError> {
531        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_10 {
532            error!("Unable to raise domain level from 10 to 11.");
533            return Err(OperationError::MG0004DomainLevelInDevelopment);
534        }
535
536        // =========== Apply changes ==============
537        self.internal_migrate_or_create_batch(
538            "phase 1 - schema attrs",
539            migration_data::dl11::phase_1_schema_attrs(),
540        )?;
541
542        self.internal_migrate_or_create_batch(
543            "phase 2 - schema classes",
544            migration_data::dl11::phase_2_schema_classes(),
545        )?;
546
547        // Reload for the new schema.
548        self.reload()?;
549
550        // Since we just loaded in a ton of schema, lets reindex it in case we added
551        // new indexes, or this is a bootstrap and we have no indexes yet.
552        self.reindex(false)?;
553
554        // Set Phase
555        // Indicate the schema is now ready, which allows dyngroups to work when they
556        // are created in the next phase of migrations.
557        self.set_phase(ServerPhase::SchemaReady);
558
559        self.internal_migrate_or_create_batch(
560            "phase 3 - key provider",
561            migration_data::dl11::phase_3_key_provider(),
562        )?;
563
564        // Reload for the new key providers
565        self.reload()?;
566
567        self.internal_migrate_or_create_batch(
568            "phase 4 - system entries",
569            migration_data::dl11::phase_4_system_entries(),
570        )?;
571
572        // Reload for the new system entries
573        self.reload()?;
574
575        // Domain info is now ready and reloaded, we can proceed.
576        self.set_phase(ServerPhase::DomainInfoReady);
577
578        // Bring up the IDM entries.
579        self.internal_migrate_or_create_batch(
580            "phase 5 - builtin admin entries",
581            migration_data::dl11::phase_5_builtin_admin_entries()?,
582        )?;
583
584        self.internal_migrate_or_create_batch(
585            "phase 6 - builtin not admin entries",
586            migration_data::dl11::phase_6_builtin_non_admin_entries()?,
587        )?;
588
589        self.internal_migrate_or_create_batch(
590            "phase 7 - builtin access control profiles",
591            migration_data::dl11::phase_7_builtin_access_control_profiles(),
592        )?;
593
594        self.reload()?;
595
596        Ok(())
597    }
598
599    /// Migration domain level 11 to 12 (1.8.0)
600    #[instrument(level = "info", skip_all)]
601    pub(crate) fn migrate_domain_11_to_12(&mut self) -> Result<(), OperationError> {
602        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_11 {
603            error!("Unable to raise domain level from 11 to 12.");
604            return Err(OperationError::MG0004DomainLevelInDevelopment);
605        }
606
607        // =========== Apply changes ==============
608        self.internal_migrate_or_create_batch(
609            "phase 1 - schema attrs",
610            migration_data::dl12::phase_1_schema_attrs(),
611        )?;
612
613        self.internal_migrate_or_create_batch(
614            "phase 2 - schema classes",
615            migration_data::dl12::phase_2_schema_classes(),
616        )?;
617
618        // Reload for the new schema.
619        self.reload()?;
620
621        // Since we just loaded in a ton of schema, lets reindex it in case we added
622        // new indexes, or this is a bootstrap and we have no indexes yet.
623        self.reindex(false)?;
624
625        // Set Phase
626        // Indicate the schema is now ready, which allows dyngroups to work when they
627        // are created in the next phase of migrations.
628        self.set_phase(ServerPhase::SchemaReady);
629
630        self.internal_migrate_or_create_batch(
631            "phase 3 - key provider",
632            migration_data::dl12::phase_3_key_provider(),
633        )?;
634
635        // Reload for the new key providers
636        self.reload()?;
637
638        self.internal_migrate_or_create_batch(
639            "phase 4 - system entries",
640            migration_data::dl12::phase_4_system_entries(),
641        )?;
642
643        // Reload for the new system entries
644        self.reload()?;
645
646        // Domain info is now ready and reloaded, we can proceed.
647        self.set_phase(ServerPhase::DomainInfoReady);
648
649        // Bring up the IDM entries.
650        self.internal_migrate_or_create_batch(
651            "phase 5 - builtin admin entries",
652            migration_data::dl12::phase_5_builtin_admin_entries()?,
653        )?;
654
655        self.internal_migrate_or_create_batch(
656            "phase 6 - builtin not admin entries",
657            migration_data::dl12::phase_6_builtin_non_admin_entries()?,
658        )?;
659
660        self.internal_migrate_or_create_batch(
661            "phase 7 - builtin access control profiles",
662            migration_data::dl12::phase_7_builtin_access_control_profiles(),
663        )?;
664
665        self.reload()?;
666
667        // Cleanup any leftover id keys
668        let modlist = ModifyList::new_purge(Attribute::IdVerificationEcKey);
669        let filter = filter_all!(f_pres(Attribute::IdVerificationEcKey));
670
671        self.internal_modify(&filter, &modlist)?;
672
673        Ok(())
674    }
675
676    /// Migration domain level 12 to 13 (1.9.0)
677    #[instrument(level = "info", skip_all)]
678    pub(crate) fn migrate_domain_12_to_13(&mut self) -> Result<(), OperationError> {
679        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_12 {
680            error!("Unable to raise domain level from 12 to 13.");
681            return Err(OperationError::MG0004DomainLevelInDevelopment);
682        }
683
684        // =========== Apply changes ==============
685        self.internal_migrate_or_create_batch(
686            "phase 1 - schema attrs",
687            migration_data::dl13::phase_1_schema_attrs(),
688        )?;
689
690        self.internal_migrate_or_create_batch(
691            "phase 2 - schema classes",
692            migration_data::dl13::phase_2_schema_classes(),
693        )?;
694
695        // Reload for the new schema.
696        self.reload()?;
697
698        // Since we just loaded in a ton of schema, lets reindex it in case we added
699        // new indexes, or this is a bootstrap and we have no indexes yet.
700        self.reindex(false)?;
701
702        // Set Phase
703        // Indicate the schema is now ready, which allows dyngroups to work when they
704        // are created in the next phase of migrations.
705        self.set_phase(ServerPhase::SchemaReady);
706
707        self.internal_migrate_or_create_batch(
708            "phase 3 - key provider",
709            migration_data::dl13::phase_3_key_provider(),
710        )?;
711
712        // Reload for the new key providers
713        self.reload()?;
714
715        self.internal_migrate_or_create_batch(
716            "phase 4 - system entries",
717            migration_data::dl13::phase_4_system_entries(),
718        )?;
719
720        // Reload for the new system entries
721        self.reload()?;
722
723        // Domain info is now ready and reloaded, we can proceed.
724        self.set_phase(ServerPhase::DomainInfoReady);
725
726        // Bring up the IDM entries.
727        self.internal_migrate_or_create_batch(
728            "phase 5 - builtin admin entries",
729            migration_data::dl13::phase_5_builtin_admin_entries()?,
730        )?;
731
732        self.internal_migrate_or_create_batch(
733            "phase 6 - builtin not admin entries",
734            migration_data::dl13::phase_6_builtin_non_admin_entries()?,
735        )?;
736
737        self.internal_migrate_or_create_batch(
738            "phase 7 - builtin access control profiles",
739            migration_data::dl13::phase_7_builtin_access_control_profiles(),
740        )?;
741
742        self.internal_delete_batch(
743            "phase 8 - delete UUIDS",
744            migration_data::dl13::phase_8_delete_uuids(),
745        )?;
746
747        self.reload()?;
748
749        Ok(())
750    }
751
752    /// Migration domain level 13 to 14 (1.10.0)
753    #[instrument(level = "info", skip_all)]
754    pub(crate) fn migrate_domain_13_to_14(&mut self) -> Result<(), OperationError> {
755        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_13 {
756            error!("Unable to raise domain level from 13 to 14.");
757            return Err(OperationError::MG0004DomainLevelInDevelopment);
758        }
759
760        Ok(())
761    }
762
763    #[instrument(level = "info", skip_all)]
764    pub(crate) fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
765        admin_debug!("initialise_schema_core -> start ...");
766        // Load in all the "core" schema, that we already have in "memory".
767        let entries = self.schema.to_entries();
768
769        // admin_debug!("Dumping schemas: {:?}", entries);
770
771        // internal_migrate_or_create.
772        let r: Result<_, _> = entries.into_iter().try_for_each(|e| {
773            trace!(?e, "init schema entry");
774            self.internal_migrate_or_create(e)
775        });
776        if r.is_ok() {
777            admin_debug!("initialise_schema_core -> Ok!");
778        } else {
779            admin_error!(?r, "initialise_schema_core -> Error");
780        }
781        // why do we have error handling if it's always supposed to be `Ok`?
782        debug_assert!(r.is_ok());
783        r
784    }
785}
786
787impl QueryServerReadTransaction<'_> {
788    /// Retrieve the domain info of this server
789    pub fn domain_upgrade_check(
790        &mut self,
791    ) -> Result<ProtoDomainUpgradeCheckReport, OperationError> {
792        let d_info = &self.d_info;
793
794        let name = d_info.d_name.clone();
795        let uuid = d_info.d_uuid;
796        let current_level = d_info.d_vers;
797        let upgrade_level = DOMAIN_TGT_NEXT_LEVEL;
798
799        let mut report_items = Vec::with_capacity(1);
800
801        if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
802            let item = self
803                .domain_upgrade_check_7_to_8_security_keys()
804                .map_err(|err| {
805                    error!(
806                        ?err,
807                        "Failed to perform domain upgrade check 7 to 8 - security-keys"
808                    );
809                    err
810                })?;
811            report_items.push(item);
812
813            let item = self
814                .domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri()
815                .map_err(|err| {
816                    error!(
817                        ?err,
818                        "Failed to perform domain upgrade check 7 to 8 - oauth2-strict-redirect_uri"
819                    );
820                    err
821                })?;
822            report_items.push(item);
823        }
824
825        Ok(ProtoDomainUpgradeCheckReport {
826            name,
827            uuid,
828            current_level,
829            upgrade_level,
830            report_items,
831        })
832    }
833
834    pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
835        &mut self,
836    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
837        let filter = filter!(f_and!([
838            f_eq(Attribute::Class, EntryClass::Account.into()),
839            f_pres(Attribute::PrimaryCredential),
840        ]));
841
842        let results = self.internal_search(filter)?;
843
844        let affected_entries = results
845            .into_iter()
846            .filter_map(|entry| {
847                if entry
848                    .get_ava_single_credential(Attribute::PrimaryCredential)
849                    .map(|cred| cred.has_securitykey())
850                    .unwrap_or_default()
851                {
852                    Some(entry.get_display_id())
853                } else {
854                    None
855                }
856            })
857            .collect::<Vec<_>>();
858
859        let status = if affected_entries.is_empty() {
860            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys
861        } else {
862            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys
863        };
864
865        Ok(ProtoDomainUpgradeCheckItem {
866            status,
867            from_level: DOMAIN_LEVEL_7,
868            to_level: DOMAIN_LEVEL_8,
869            affected_entries,
870        })
871    }
872
873    pub(crate) fn domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri(
874        &mut self,
875    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
876        let filter = filter!(f_and!([
877            f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
878            f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
879        ]));
880
881        let results = self.internal_search(filter)?;
882
883        let affected_entries = results
884            .into_iter()
885            .map(|entry| entry.get_display_id())
886            .collect::<Vec<_>>();
887
888        let status = if affected_entries.is_empty() {
889            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri
890        } else {
891            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri
892        };
893
894        Ok(ProtoDomainUpgradeCheckItem {
895            status,
896            from_level: DOMAIN_LEVEL_7,
897            to_level: DOMAIN_LEVEL_8,
898            affected_entries,
899        })
900    }
901}
902
903#[cfg(test)]
904mod tests {
905    // use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
906    use crate::prelude::*;
907    use crate::value::CredentialType;
908    use crate::valueset::ValueSetCredentialType;
909
910    #[qs_test]
911    async fn test_init_idempotent_schema_core(server: &QueryServer) {
912        {
913            // Setup and abort.
914            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
915            assert!(server_txn.initialise_schema_core().is_ok());
916        }
917        {
918            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
919            assert!(server_txn.initialise_schema_core().is_ok());
920            assert!(server_txn.initialise_schema_core().is_ok());
921            assert!(server_txn.commit().is_ok());
922        }
923        {
924            // Now do it again in a new txn, but abort
925            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
926            assert!(server_txn.initialise_schema_core().is_ok());
927        }
928        {
929            // Now do it again in a new txn.
930            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
931            assert!(server_txn.initialise_schema_core().is_ok());
932            assert!(server_txn.commit().is_ok());
933        }
934    }
935
936    /// This test is for ongoing/longterm checks over the previous to current version.
937    /// This is in contrast to the specific version checks below that are often to
938    /// test a version to version migration.
939    #[qs_test(domain_level=DOMAIN_PREVIOUS_TGT_LEVEL)]
940    async fn test_migrations_dl_previous_to_dl_target(server: &QueryServer) {
941        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
942
943        let db_domain_version = write_txn
944            .internal_search_uuid(UUID_DOMAIN_INFO)
945            .expect("unable to access domain entry")
946            .get_ava_single_uint32(Attribute::Version)
947            .expect("Attribute Version not present");
948
949        assert_eq!(db_domain_version, DOMAIN_PREVIOUS_TGT_LEVEL);
950
951        // == SETUP ==
952
953        // Add a member to a group - it should not be removed.
954        // Remove a default member from a group - it should be returned.
955        let modlist = ModifyList::new_set(
956            Attribute::Member,
957            // This achieves both because this removes IDM_ADMIN from the group
958            // while setting only anon as a member.
959            ValueSetRefer::new(UUID_ANONYMOUS),
960        );
961        write_txn
962            .internal_modify_uuid(UUID_IDM_ADMINS, &modlist)
963            .expect("Unable to modify CredentialTypeMinimum");
964
965        // Change default account policy - it should not be reverted.
966        let modlist = ModifyList::new_set(
967            Attribute::CredentialTypeMinimum,
968            ValueSetCredentialType::new(CredentialType::Any),
969        );
970        write_txn
971            .internal_modify_uuid(UUID_IDM_ALL_PERSONS, &modlist)
972            .expect("Unable to modify CredentialTypeMinimum");
973
974        write_txn.commit().expect("Unable to commit");
975
976        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
977
978        // == Increase the version ==
979        write_txn
980            .internal_apply_domain_migration(DOMAIN_TGT_LEVEL)
981            .expect("Unable to set domain level");
982
983        // post migration verification.
984        // Check that our group is as we left it
985        let idm_admins_entry = write_txn
986            .internal_search_uuid(UUID_IDM_ADMINS)
987            .expect("Unable to retrieve all persons");
988
989        let members = idm_admins_entry
990            .get_ava_refer(Attribute::Member)
991            .expect("No members present");
992
993        // Still present
994        assert!(members.contains(&UUID_ANONYMOUS));
995        // Was reverted
996        assert!(members.contains(&UUID_IDM_ADMIN));
997
998        // Check that the account policy did not revert.
999        let all_persons_entry = write_txn
1000            .internal_search_uuid(UUID_IDM_ALL_PERSONS)
1001            .expect("Unable to retrieve all persons");
1002
1003        assert_eq!(
1004            all_persons_entry.get_ava_single_credential_type(Attribute::CredentialTypeMinimum),
1005            Some(CredentialType::Any)
1006        );
1007
1008        write_txn.commit().expect("Unable to commit");
1009    }
1010
1011    #[qs_test(domain_level=DOMAIN_LEVEL_10)]
1012    async fn test_migrations_dl10_dl11(server: &QueryServer) {
1013        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1014
1015        let db_domain_version = write_txn
1016            .internal_search_uuid(UUID_DOMAIN_INFO)
1017            .expect("unable to access domain entry")
1018            .get_ava_single_uint32(Attribute::Version)
1019            .expect("Attribute Version not present");
1020
1021        assert_eq!(db_domain_version, DOMAIN_LEVEL_10);
1022
1023        write_txn.commit().expect("Unable to commit");
1024
1025        // == pre migration verification. ==
1026        // check we currently would fail a migration.
1027
1028        // let mut read_txn = server.read().await.unwrap();
1029        // drop(read_txn);
1030
1031        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1032
1033        // Fix any issues
1034
1035        // == Increase the version ==
1036        write_txn
1037            .internal_apply_domain_migration(DOMAIN_LEVEL_11)
1038            .expect("Unable to set domain level to version 11");
1039
1040        // post migration verification.
1041
1042        write_txn.commit().expect("Unable to commit");
1043    }
1044
1045    #[qs_test(domain_level=DOMAIN_LEVEL_11)]
1046    async fn test_migrations_dl11_dl12(server: &QueryServer) {
1047        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1048
1049        let db_domain_version = write_txn
1050            .internal_search_uuid(UUID_DOMAIN_INFO)
1051            .expect("unable to access domain entry")
1052            .get_ava_single_uint32(Attribute::Version)
1053            .expect("Attribute Version not present");
1054
1055        assert_eq!(db_domain_version, DOMAIN_LEVEL_11);
1056
1057        // Make a new person.
1058        let tuuid = Uuid::new_v4();
1059        let e1 = entry_init!(
1060            (Attribute::Class, EntryClass::Object.to_value()),
1061            (Attribute::Class, EntryClass::Person.to_value()),
1062            (Attribute::Class, EntryClass::Account.to_value()),
1063            (Attribute::Name, Value::new_iname("testperson1")),
1064            (Attribute::Uuid, Value::Uuid(tuuid)),
1065            (Attribute::Description, Value::new_utf8s("testperson1")),
1066            (Attribute::DisplayName, Value::new_utf8s("testperson1"))
1067        );
1068
1069        write_txn
1070            .internal_create(vec![e1])
1071            .expect("Unable to create user");
1072
1073        let user = write_txn
1074            .internal_search_uuid(tuuid)
1075            .expect("Unable to load user");
1076
1077        // They still have an id verification key
1078        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_some());
1079
1080        write_txn.commit().expect("Unable to commit");
1081
1082        // == pre migration verification. ==
1083        // check we currently would fail a migration.
1084
1085        // let mut read_txn = server.read().await.unwrap();
1086        // drop(read_txn);
1087
1088        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1089
1090        // Fix any issues
1091
1092        // == Increase the version ==
1093        write_txn
1094            .internal_apply_domain_migration(DOMAIN_LEVEL_12)
1095            .expect("Unable to set domain level to version 12");
1096
1097        // post migration verification.
1098        let user = write_txn
1099            .internal_search_uuid(tuuid)
1100            .expect("Unable to load user");
1101
1102        // The key has been removed.
1103        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_none());
1104
1105        // New users don't get a key
1106        let t2uuid = Uuid::new_v4();
1107        let e2 = entry_init!(
1108            (Attribute::Class, EntryClass::Object.to_value()),
1109            (Attribute::Class, EntryClass::Person.to_value()),
1110            (Attribute::Class, EntryClass::Account.to_value()),
1111            (Attribute::Name, Value::new_iname("testperson2")),
1112            (Attribute::Uuid, Value::Uuid(t2uuid)),
1113            (Attribute::Description, Value::new_utf8s("testperson2")),
1114            (Attribute::DisplayName, Value::new_utf8s("testperson2"))
1115        );
1116
1117        write_txn
1118            .internal_create(vec![e2])
1119            .expect("Unable to create user");
1120
1121        let user = write_txn
1122            .internal_search_uuid(t2uuid)
1123            .expect("Unable to load user");
1124
1125        // No key!
1126        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_none());
1127
1128        write_txn.commit().expect("Unable to commit");
1129    }
1130
1131    #[qs_test(domain_level=DOMAIN_LEVEL_12)]
1132    async fn test_migrations_dl12_dl13(server: &QueryServer) {
1133        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1134
1135        let db_domain_version = write_txn
1136            .internal_search_uuid(UUID_DOMAIN_INFO)
1137            .expect("unable to access domain entry")
1138            .get_ava_single_uint32(Attribute::Version)
1139            .expect("Attribute Version not present");
1140
1141        assert_eq!(db_domain_version, DOMAIN_LEVEL_12);
1142
1143        write_txn.commit().expect("Unable to commit");
1144
1145        // == pre migration verification. ==
1146        // check we currently would fail a migration.
1147
1148        // let mut read_txn = server.read().await.unwrap();
1149        // drop(read_txn);
1150
1151        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1152
1153        // Fix any issues
1154
1155        // == Increase the version ==
1156        write_txn
1157            .internal_apply_domain_migration(DOMAIN_LEVEL_13)
1158            .expect("Unable to set domain level to version 13");
1159
1160        // post migration verification.
1161
1162        write_txn.commit().expect("Unable to commit");
1163    }
1164}