kanidmd_lib/server/
migrations.rs

1use crate::prelude::*;
2
3use crate::migration_data;
4use kanidm_proto::internal::{
5    DomainUpgradeCheckItem as ProtoDomainUpgradeCheckItem,
6    DomainUpgradeCheckReport as ProtoDomainUpgradeCheckReport,
7    DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus,
8};
9
10use super::ServerPhase;
11
12impl QueryServer {
13    #[instrument(level = "info", name = "system_initialisation", skip_all)]
14    pub async fn initialise_helper(
15        &self,
16        ts: Duration,
17        domain_target_level: DomainVersion,
18    ) -> Result<(), OperationError> {
19        // We need to perform this in a single transaction pass to prevent tainting
20        // databases during upgrades.
21        let mut write_txn = self.write(ts).await?;
22
23        // Check our database version - attempt to do an initial indexing
24        // based on the in memory configuration. This ONLY triggers ONCE on
25        // the very first run of the instance when the DB in newely created.
26        write_txn.upgrade_reindex(SYSTEM_INDEX_VERSION)?;
27
28        // Because we init the schema here, and commit, this reloads meaning
29        // that the on-disk index meta has been loaded, so our subsequent
30        // migrations will be correctly indexed.
31        //
32        // Remember, that this would normally mean that it's possible for schema
33        // to be mis-indexed (IE we index the new schemas here before we read
34        // the schema to tell us what's indexed), but because we have the in
35        // mem schema that defines how schema is structured, and this is all
36        // marked "system", then we won't have an issue here.
37        write_txn
38            .initialise_schema_core()
39            .and_then(|_| write_txn.reload())?;
40
41        // This is what tells us if the domain entry existed before or not. This
42        // is now the primary method of migrations and version detection.
43        let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) {
44            Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)),
45            Err(OperationError::NoMatchingEntries) => Ok(0),
46            Err(r) => Err(r),
47        }?;
48
49        debug!(?db_domain_version, "Before setting internal domain info");
50
51        if db_domain_version == 0 {
52            // This is here to catch when we increase domain levels but didn't create the migration
53            // hooks. If this fails it probably means you need to add another migration hook
54            // in the above.
55            debug_assert!(domain_target_level <= DOMAIN_MAX_LEVEL);
56
57            // Assert that we have a minimum creation level that is valid.
58            const { assert!(DOMAIN_MIN_CREATION_LEVEL == DOMAIN_LEVEL_10) };
59
60            // No domain info was present, so neither was the rest of the IDM. Bring up the
61            // full IDM here.
62
63            match domain_target_level {
64                DOMAIN_LEVEL_10 => write_txn.migrate_domain_9_to_10()?,
65                DOMAIN_LEVEL_11 => write_txn.migrate_domain_10_to_11()?,
66                DOMAIN_LEVEL_12 => write_txn.migrate_domain_11_to_12()?,
67                DOMAIN_LEVEL_13 => write_txn.migrate_domain_12_to_13()?,
68                DOMAIN_LEVEL_14 => write_txn.migrate_domain_13_to_14()?,
69                DOMAIN_LEVEL_15 => write_txn.migrate_domain_14_to_15()?,
70                _ => {
71                    error!("Invalid requested domain target level for server bootstrap");
72                    debug_assert!(false);
73                    return Err(OperationError::MG0009InvalidTargetLevelForBootstrap);
74                }
75            }
76
77            write_txn
78                .internal_apply_domain_migration(domain_target_level)
79                .map(|()| {
80                    warn!(
81                        "Domain level has been bootstrapped to {}",
82                        domain_target_level
83                    );
84                })?;
85        }
86
87        // These steps apply both to bootstrapping and normal startup, since we now have
88        // a DB with data populated in either path.
89
90        // Domain info is now present, so we need to reflect that in our server
91        // domain structures. If we don't do this, the in memory domain level
92        // is stuck at 0 which can confuse init domain info below.
93        //
94        // This also is where the former domain taint flag will be loaded to
95        // d_info so that if the *previous* execution of the database was
96        // a devel version, we'll still trigger the forced remigration in
97        // in the case that we are moving from dev -> stable.
98        write_txn.force_domain_reload();
99
100        write_txn.reload()?;
101
102        // Indicate the schema is now ready, which allows dyngroups to work when they
103        // are created in the next phase of migrations.
104        write_txn.set_phase(ServerPhase::SchemaReady);
105
106        // #2756 - if we *aren't* creating the base IDM entries, then we
107        // need to force dyn groups to reload since we're now at schema
108        // ready. This is done indirectly by ... reloading the schema again.
109        //
110        // This is because dyngroups don't load until server phase >= schemaready
111        // and the reload path for these is either a change in the dyngroup entry
112        // itself or a change to schema reloading. Since we aren't changing the
113        // dyngroup here, we have to go via the schema reload path.
114        write_txn.force_schema_reload();
115
116        // Reload as init idm affects access controls.
117        write_txn.reload()?;
118
119        // Domain info is now ready and reloaded, we can proceed.
120        write_txn.set_phase(ServerPhase::DomainInfoReady);
121
122        // This is the start of domain info related migrations which we will need in future
123        // to handle replication. Due to the access control rework, and the addition of "managed by"
124        // syntax, we need to ensure both nodes "fence" replication from each other. We do this
125        // by changing domain infos to be incompatible during this phase.
126
127        // The reloads will have populated this structure now.
128        let domain_info_version = write_txn.get_domain_version();
129        let domain_patch_level = write_txn.get_domain_patch_level();
130        let domain_development_taint = write_txn.get_domain_development_taint();
131        debug!(
132            ?db_domain_version,
133            ?domain_patch_level,
134            ?domain_development_taint,
135            "After setting internal domain info"
136        );
137
138        let mut reload_required = false;
139
140        // If the database domain info is a lower version than our target level, we reload.
141        if domain_info_version < domain_target_level {
142            // if (domain_target_level - domain_info_version) > DOMAIN_MIGRATION_SKIPS {
143            if domain_info_version < DOMAIN_MIGRATION_FROM_MIN {
144                error!(
145                    "UNABLE TO PROCEED. You are attempting a skip update which is NOT SUPPORTED."
146                );
147                error!(
148                    "For more see: https://kanidm.github.io/kanidm/stable/support.html#upgrade-policy and https://kanidm.github.io/kanidm/stable/server_updates.html"
149                );
150                error!(domain_previous_version = ?domain_info_version, domain_target_version = ?domain_target_level, domain_migration_minimum_limit = ?DOMAIN_MIGRATION_FROM_MIN);
151                return Err(OperationError::MG0008SkipUpgradeAttempted);
152            }
153
154            // Apply each step in order.
155            for domain_target_level_step in domain_info_version..domain_target_level {
156                // Rust has no way to do a range with the minimum excluded and the maximum
157                // included, so we have to do min -> max which includes min and excludes max,
158                // and by adding 1 we gett the same result.
159                let domain_target_level_step = domain_target_level_step + 1;
160                write_txn
161                    .internal_apply_domain_migration(domain_target_level_step)
162                    .map(|()| {
163                        warn!(
164                            "Domain level has been raised to {}",
165                            domain_target_level_step
166                        );
167                    })?;
168            }
169
170            // Reload if anything in migrations requires it - this triggers the domain migrations
171            // which in turn can trigger schema reloads etc. If the server was just brought up
172            // then we don't need the extra reload since we are already at the correct
173            // version of the server, and this call to set the target level is just for persistence
174            // of the value.
175            if domain_info_version != 0 {
176                reload_required = true;
177            }
178        } else if domain_info_version > domain_target_level {
179            // This is a DOWNGRADE which may not proceed.
180            error!("UNABLE TO PROCEED. You are attempting a downgrade which is NOT SUPPORTED.");
181            error!(
182                "For more see: https://kanidm.github.io/kanidm/stable/support.html#upgrade-policy and https://kanidm.github.io/kanidm/stable/server_updates.html"
183            );
184            error!(domain_previous_version = ?domain_info_version, domain_target_version = ?domain_target_level);
185            return Err(OperationError::MG0010DowngradeNotAllowed);
186        } else if domain_development_taint {
187            // This forces pre-release versions to re-migrate each start up. This solves
188            // the domain-version-sprawl issue so that during a development cycle we can
189            // do a single domain version bump, and continue to extend the migrations
190            // within that release cycle to contain what we require.
191            //
192            // If this is a pre-release build
193            // AND
194            // we are NOT in a test environment
195            // AND
196            // We did not already need a version migration as above
197            write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?;
198
199            reload_required = true;
200        }
201
202        // If we are new enough to support patches, and we are lower than the target patch level
203        // then a reload will be applied after we raise the patch level.
204        if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
205            write_txn
206                .internal_modify_uuid(
207                    UUID_DOMAIN_INFO,
208                    &ModifyList::new_purge_and_set(
209                        Attribute::PatchLevel,
210                        Value::new_uint32(DOMAIN_TGT_PATCH_LEVEL),
211                    ),
212                )
213                .map(|()| {
214                    warn!(
215                        "Domain patch level has been raised to {}",
216                        domain_patch_level
217                    );
218                })?;
219
220            reload_required = true;
221        };
222
223        // Execute whatever operations we have batched up and ready to go. This is needed
224        // to preserve ordering of the operations - if we reloaded after a remigrate then
225        // we would have skipped the patch level fix which needs to have occurred *first*.
226        if reload_required {
227            write_txn.reload()?;
228        }
229
230        // Now set the db/domain devel taint flag to match our current release status
231        // if it changes. This is what breaks the cycle of db taint from dev -> stable
232        let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some();
233        if current_devel_flag {
234            warn!("Domain Development Taint mode is enabled");
235        }
236        if domain_development_taint != current_devel_flag {
237            write_txn.internal_modify_uuid(
238                UUID_DOMAIN_INFO,
239                &ModifyList::new_purge_and_set(
240                    Attribute::DomainDevelopmentTaint,
241                    Value::Bool(current_devel_flag),
242                ),
243            )?;
244        }
245
246        // We are ready to run
247        write_txn.set_phase(ServerPhase::Running);
248
249        // Commit all changes, this also triggers the final reload, this should be a no-op
250        // since we already did all the needed loads above.
251        write_txn.commit()?;
252
253        debug!("Database version check and migrations success! ☀️  ");
254        Ok(())
255    }
256}
257
258impl QueryServerWriteTransaction<'_> {
259    /// Apply a domain migration `to_level`. Errors if `to_level` is not greater than or equal to
260    /// the active level.
261    #[instrument(level = "debug", skip(self))]
262    pub(crate) fn internal_apply_domain_migration(
263        &mut self,
264        to_level: u32,
265    ) -> Result<(), OperationError> {
266        self.internal_modify_uuid(
267            UUID_DOMAIN_INFO,
268            &ModifyList::new_purge_and_set(Attribute::Version, Value::new_uint32(to_level)),
269        )
270        .and_then(|()| self.reload())
271    }
272
273    fn internal_migrate_or_create_batch(
274        &mut self,
275        msg: &str,
276        entries: Vec<EntryInitNew>,
277    ) -> Result<(), OperationError> {
278        #[cfg(test)]
279        eprintln!("MIGRATION BATCH: {}", msg);
280        let r: Result<(), _> = entries
281            .into_iter()
282            .try_for_each(|entry| self.internal_migrate_or_create(entry));
283
284        if let Err(err) = r {
285            error!(?err, msg);
286            debug_assert!(false);
287        }
288
289        Ok(())
290    }
291
292    #[instrument(level = "debug", skip_all)]
293    /// - If the thing exists:
294    ///   - Ensure the set of attributes match and are present
295    ///     (but don't delete multivalue, or extended attributes in the situation.
296    /// - If not:
297    ///   - Create the entry
298    ///
299    /// This will extra classes an attributes alone!
300    ///
301    /// NOTE: `gen_modlist*` IS schema aware and will handle multivalue correctly!
302    fn internal_migrate_or_create(
303        &mut self,
304        e: Entry<EntryInit, EntryNew>,
305    ) -> Result<(), OperationError> {
306        // NOTE: Ignoring an attribute only affects the migration phase, not create.
307        self.internal_migrate_or_create_ignore_attrs(
308            e,
309            &[
310                // If the credential type is present, we don't want to touch it.
311                Attribute::CredentialTypeMinimum,
312            ],
313        )
314    }
315
316    #[instrument(level = "debug", skip_all)]
317    fn internal_delete_batch(
318        &mut self,
319        msg: &str,
320        entries: Vec<Uuid>,
321    ) -> Result<(), OperationError> {
322        let filter = entries
323            .into_iter()
324            .map(|uuid| f_eq(Attribute::Uuid, PartialValue::Uuid(uuid)))
325            .collect();
326
327        let filter = filter_all!(f_or(filter));
328
329        let result = self.internal_delete(&filter);
330
331        match result {
332            Ok(_) | Err(OperationError::NoMatchingEntries) => Ok(()),
333            Err(err) => {
334                error!(?err, msg);
335                Err(err)
336            }
337        }
338    }
339
340    /// This is the same as [QueryServerWriteTransaction::internal_migrate_or_create]
341    /// but it will ignore the specified list of attributes, so that if an admin has
342    /// modified those values then we don't stomp them.
343    #[instrument(level = "trace", skip_all)]
344    fn internal_migrate_or_create_ignore_attrs(
345        &mut self,
346        mut e: Entry<EntryInit, EntryNew>,
347        attrs: &[Attribute],
348    ) -> Result<(), OperationError> {
349        trace!("operating on {:?}", e.get_uuid());
350
351        let Some(filt) = e.filter_from_attrs(&[Attribute::Uuid]) else {
352            return Err(OperationError::FilterGeneration);
353        };
354
355        trace!("search {:?}", filt);
356
357        let results = self.internal_search(filt.clone())?;
358
359        if results.is_empty() {
360            // The entry does not exist. Create it.
361
362            // If there are create-once members, set them up now.
363            if let Some(members_create_once) = e.pop_ava(Attribute::MemberCreateOnce) {
364                if let Some(members) = e.get_ava_mut(Attribute::Member) {
365                    // Merge
366                    members.merge(&members_create_once).inspect_err(|err| {
367                        error!(?err, "Unable to merge member sets, mismatched types?");
368                    })?;
369                } else {
370                    // Just push
371                    e.set_ava_set(&Attribute::Member, members_create_once);
372                }
373            };
374
375            self.internal_create(vec![e])
376        } else if results.len() == 1 {
377            // This is always ignored during migration.
378            e.remove_ava(&Attribute::MemberCreateOnce);
379
380            // For each ignored attr, we remove it from entry.
381            for attr in attrs.iter() {
382                e.remove_ava(attr);
383            }
384
385            // If the thing is subset, pass
386            match e.gen_modlist_assert(&self.schema) {
387                Ok(modlist) => {
388                    // Apply to &results[0]
389                    trace!(?modlist);
390                    self.internal_modify(&filt, &modlist)
391                }
392                Err(e) => Err(OperationError::SchemaViolation(e)),
393            }
394        } else {
395            admin_error!(
396                "Invalid Result Set - Expected One Entry for {:?} - {:?}",
397                filt,
398                results
399            );
400            Err(OperationError::InvalidDbState)
401        }
402    }
403
404    // Commented as an example of patch application
405    /*
406    /// Patch Application - This triggers a one-shot fixup task for issue #3178
407    /// to force access controls to re-migrate in existing databases so that they're
408    /// content matches expected values.
409    #[instrument(level = "info", skip_all)]
410    pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> {
411        admin_warn!("applying domain patch 2.");
412
413        debug_assert!(*self.phase >= ServerPhase::SchemaReady);
414
415        let idm_data = migration_data::dl9::phase_7_builtin_access_control_profiles();
416
417        idm_data
418            .into_iter()
419            .try_for_each(|entry| self.internal_migrate_or_create(entry))
420            .map_err(|err| {
421                error!(?err, "migrate_domain_patch_level_2 -> Error");
422                err
423            })?;
424
425        self.reload()?;
426
427        Ok(())
428    }
429    */
430
431    /// Migration domain level 9 to 10 (1.6.0)
432    #[instrument(level = "info", skip_all)]
433    pub(crate) fn migrate_domain_9_to_10(&mut self) -> Result<(), OperationError> {
434        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_9 {
435            error!("Unable to raise domain level from 9 to 10.");
436            return Err(OperationError::MG0004DomainLevelInDevelopment);
437        }
438
439        // =========== Apply changes ==============
440        self.internal_migrate_or_create_batch(
441            "phase 1 - schema attrs",
442            migration_data::dl10::phase_1_schema_attrs(),
443        )?;
444
445        self.internal_migrate_or_create_batch(
446            "phase 2 - schema classes",
447            migration_data::dl10::phase_2_schema_classes(),
448        )?;
449
450        // Reload for the new schema.
451        self.reload()?;
452
453        // Since we just loaded in a ton of schema, lets reindex it in case we added
454        // new indexes, or this is a bootstrap and we have no indexes yet.
455        self.reindex(false)?;
456
457        // Set Phase
458        // Indicate the schema is now ready, which allows dyngroups to work when they
459        // are created in the next phase of migrations.
460        self.set_phase(ServerPhase::SchemaReady);
461
462        self.internal_migrate_or_create_batch(
463            "phase 3 - key provider",
464            migration_data::dl10::phase_3_key_provider(),
465        )?;
466
467        // Reload for the new key providers
468        self.reload()?;
469
470        self.internal_migrate_or_create_batch(
471            "phase 4 - system entries",
472            migration_data::dl10::phase_4_system_entries(),
473        )?;
474
475        // Reload for the new system entries
476        self.reload()?;
477
478        // Domain info is now ready and reloaded, we can proceed.
479        self.set_phase(ServerPhase::DomainInfoReady);
480
481        // Bring up the IDM entries.
482        self.internal_migrate_or_create_batch(
483            "phase 5 - builtin admin entries",
484            migration_data::dl10::phase_5_builtin_admin_entries()?,
485        )?;
486
487        self.internal_migrate_or_create_batch(
488            "phase 6 - builtin not admin entries",
489            migration_data::dl10::phase_6_builtin_non_admin_entries()?,
490        )?;
491
492        self.internal_migrate_or_create_batch(
493            "phase 7 - builtin access control profiles",
494            migration_data::dl10::phase_7_builtin_access_control_profiles(),
495        )?;
496
497        self.reload()?;
498
499        // =========== OAuth2 Cryptography Migration ==============
500
501        debug!("START OAUTH2 MIGRATION");
502
503        // Load all the OAuth2 providers.
504        let all_oauth2_rs_entries = self.internal_search(filter!(f_eq(
505            Attribute::Class,
506            EntryClass::OAuth2ResourceServer.into()
507        )))?;
508
509        if !all_oauth2_rs_entries.is_empty() {
510            let entry_iter = all_oauth2_rs_entries.iter().map(|tgt_entry| {
511                let entry_uuid = tgt_entry.get_uuid();
512                let mut modlist = ModifyList::new_list(vec![
513                    Modify::Present(Attribute::Class, EntryClass::KeyObject.to_value()),
514                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJwtEs256.to_value()),
515                    Modify::Present(Attribute::Class, EntryClass::KeyObjectJweA128GCM.to_value()),
516                    // Delete the fernet key, rs256 if any, and the es256 key
517                    Modify::Purged(Attribute::OAuth2RsTokenKey),
518                    Modify::Purged(Attribute::Es256PrivateKeyDer),
519                    Modify::Purged(Attribute::Rs256PrivateKeyDer),
520                ]);
521
522                trace!(?tgt_entry);
523
524                // Import the ES256 Key
525                if let Some(es256_private_der) =
526                    tgt_entry.get_ava_single_private_binary(Attribute::Es256PrivateKeyDer)
527                {
528                    modlist.push_mod(Modify::Present(
529                        Attribute::KeyActionImportJwsEs256,
530                        Value::PrivateBinary(es256_private_der.to_vec()),
531                    ))
532                } else {
533                    warn!("Unable to migrate es256 key");
534                }
535
536                let has_rs256 = tgt_entry
537                    .get_ava_single_bool(Attribute::OAuth2JwtLegacyCryptoEnable)
538                    .unwrap_or(false);
539
540                // If there is an rs256 key, import it.
541                // Import the RS256 Key
542                if has_rs256 {
543                    modlist.push_mod(Modify::Present(
544                        Attribute::Class,
545                        EntryClass::KeyObjectJwtEs256.to_value(),
546                    ));
547
548                    if let Some(rs256_private_der) =
549                        tgt_entry.get_ava_single_private_binary(Attribute::Rs256PrivateKeyDer)
550                    {
551                        modlist.push_mod(Modify::Present(
552                            Attribute::KeyActionImportJwsRs256,
553                            Value::PrivateBinary(rs256_private_der.to_vec()),
554                        ))
555                    } else {
556                        warn!("Unable to migrate rs256 key");
557                    }
558                }
559
560                (entry_uuid, modlist)
561            });
562
563            self.internal_batch_modify(entry_iter)?;
564        }
565
566        // Reload for new keys, and updated oauth2
567        self.reload()?;
568
569        // Done!
570
571        Ok(())
572    }
573
574    /// Migration domain level 10 to 11 (1.7.0)
575    #[instrument(level = "info", skip_all)]
576    pub(crate) fn migrate_domain_10_to_11(&mut self) -> Result<(), OperationError> {
577        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_10 {
578            error!("Unable to raise domain level from 10 to 11.");
579            return Err(OperationError::MG0004DomainLevelInDevelopment);
580        }
581
582        // =========== Apply changes ==============
583        self.internal_migrate_or_create_batch(
584            "phase 1 - schema attrs",
585            migration_data::dl11::phase_1_schema_attrs(),
586        )?;
587
588        self.internal_migrate_or_create_batch(
589            "phase 2 - schema classes",
590            migration_data::dl11::phase_2_schema_classes(),
591        )?;
592
593        // Reload for the new schema.
594        self.reload()?;
595
596        // Since we just loaded in a ton of schema, lets reindex it in case we added
597        // new indexes, or this is a bootstrap and we have no indexes yet.
598        self.reindex(false)?;
599
600        // Set Phase
601        // Indicate the schema is now ready, which allows dyngroups to work when they
602        // are created in the next phase of migrations.
603        self.set_phase(ServerPhase::SchemaReady);
604
605        self.internal_migrate_or_create_batch(
606            "phase 3 - key provider",
607            migration_data::dl11::phase_3_key_provider(),
608        )?;
609
610        // Reload for the new key providers
611        self.reload()?;
612
613        self.internal_migrate_or_create_batch(
614            "phase 4 - system entries",
615            migration_data::dl11::phase_4_system_entries(),
616        )?;
617
618        // Reload for the new system entries
619        self.reload()?;
620
621        // Domain info is now ready and reloaded, we can proceed.
622        self.set_phase(ServerPhase::DomainInfoReady);
623
624        // Bring up the IDM entries.
625        self.internal_migrate_or_create_batch(
626            "phase 5 - builtin admin entries",
627            migration_data::dl11::phase_5_builtin_admin_entries()?,
628        )?;
629
630        self.internal_migrate_or_create_batch(
631            "phase 6 - builtin not admin entries",
632            migration_data::dl11::phase_6_builtin_non_admin_entries()?,
633        )?;
634
635        self.internal_migrate_or_create_batch(
636            "phase 7 - builtin access control profiles",
637            migration_data::dl11::phase_7_builtin_access_control_profiles(),
638        )?;
639
640        self.reload()?;
641
642        Ok(())
643    }
644
645    /// Migration domain level 11 to 12 (1.8.0)
646    #[instrument(level = "info", skip_all)]
647    pub(crate) fn migrate_domain_11_to_12(&mut self) -> Result<(), OperationError> {
648        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_11 {
649            error!("Unable to raise domain level from 11 to 12.");
650            return Err(OperationError::MG0004DomainLevelInDevelopment);
651        }
652
653        // =========== Apply changes ==============
654        self.internal_migrate_or_create_batch(
655            "phase 1 - schema attrs",
656            migration_data::dl12::phase_1_schema_attrs(),
657        )?;
658
659        self.internal_migrate_or_create_batch(
660            "phase 2 - schema classes",
661            migration_data::dl12::phase_2_schema_classes(),
662        )?;
663
664        // Reload for the new schema.
665        self.reload()?;
666
667        // Since we just loaded in a ton of schema, lets reindex it in case we added
668        // new indexes, or this is a bootstrap and we have no indexes yet.
669        self.reindex(false)?;
670
671        // Set Phase
672        // Indicate the schema is now ready, which allows dyngroups to work when they
673        // are created in the next phase of migrations.
674        self.set_phase(ServerPhase::SchemaReady);
675
676        self.internal_migrate_or_create_batch(
677            "phase 3 - key provider",
678            migration_data::dl12::phase_3_key_provider(),
679        )?;
680
681        // Reload for the new key providers
682        self.reload()?;
683
684        self.internal_migrate_or_create_batch(
685            "phase 4 - system entries",
686            migration_data::dl12::phase_4_system_entries(),
687        )?;
688
689        // Reload for the new system entries
690        self.reload()?;
691
692        // Domain info is now ready and reloaded, we can proceed.
693        self.set_phase(ServerPhase::DomainInfoReady);
694
695        // Bring up the IDM entries.
696        self.internal_migrate_or_create_batch(
697            "phase 5 - builtin admin entries",
698            migration_data::dl12::phase_5_builtin_admin_entries()?,
699        )?;
700
701        self.internal_migrate_or_create_batch(
702            "phase 6 - builtin not admin entries",
703            migration_data::dl12::phase_6_builtin_non_admin_entries()?,
704        )?;
705
706        self.internal_migrate_or_create_batch(
707            "phase 7 - builtin access control profiles",
708            migration_data::dl12::phase_7_builtin_access_control_profiles(),
709        )?;
710
711        self.reload()?;
712
713        // Cleanup any leftover id keys
714        let modlist = ModifyList::new_purge(Attribute::IdVerificationEcKey);
715        let filter = filter_all!(f_pres(Attribute::IdVerificationEcKey));
716
717        self.internal_modify(&filter, &modlist)?;
718
719        Ok(())
720    }
721
722    /// Migration domain level 12 to 13 (1.9.0)
723    #[instrument(level = "info", skip_all)]
724    pub(crate) fn migrate_domain_12_to_13(&mut self) -> Result<(), OperationError> {
725        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_12 {
726            error!("Unable to raise domain level from 12 to 13.");
727            return Err(OperationError::MG0004DomainLevelInDevelopment);
728        }
729
730        // =========== Apply changes ==============
731        self.internal_migrate_or_create_batch(
732            "phase 1 - schema attrs",
733            migration_data::dl13::phase_1_schema_attrs(),
734        )?;
735
736        self.internal_migrate_or_create_batch(
737            "phase 2 - schema classes",
738            migration_data::dl13::phase_2_schema_classes(),
739        )?;
740
741        // Reload for the new schema.
742        self.reload()?;
743
744        // Since we just loaded in a ton of schema, lets reindex it in case we added
745        // new indexes, or this is a bootstrap and we have no indexes yet.
746        self.reindex(false)?;
747
748        // Set Phase
749        // Indicate the schema is now ready, which allows dyngroups to work when they
750        // are created in the next phase of migrations.
751        self.set_phase(ServerPhase::SchemaReady);
752
753        self.internal_migrate_or_create_batch(
754            "phase 3 - key provider",
755            migration_data::dl13::phase_3_key_provider(),
756        )?;
757
758        // Reload for the new key providers
759        self.reload()?;
760
761        self.internal_migrate_or_create_batch(
762            "phase 4 - system entries",
763            migration_data::dl13::phase_4_system_entries(),
764        )?;
765
766        // Reload for the new system entries
767        self.reload()?;
768
769        // Domain info is now ready and reloaded, we can proceed.
770        self.set_phase(ServerPhase::DomainInfoReady);
771
772        // Bring up the IDM entries.
773        self.internal_migrate_or_create_batch(
774            "phase 5 - builtin admin entries",
775            migration_data::dl13::phase_5_builtin_admin_entries()?,
776        )?;
777
778        self.internal_migrate_or_create_batch(
779            "phase 6 - builtin not admin entries",
780            migration_data::dl13::phase_6_builtin_non_admin_entries()?,
781        )?;
782
783        self.internal_migrate_or_create_batch(
784            "phase 7 - builtin access control profiles",
785            migration_data::dl13::phase_7_builtin_access_control_profiles(),
786        )?;
787
788        self.internal_delete_batch(
789            "phase 8 - delete UUIDS",
790            migration_data::dl13::phase_8_delete_uuids(),
791        )?;
792
793        self.reload()?;
794
795        Ok(())
796    }
797
798    /// Migration domain level 13 to 14 (1.10.0)
799    #[instrument(level = "info", skip_all)]
800    pub(crate) fn migrate_domain_13_to_14(&mut self) -> Result<(), OperationError> {
801        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_13 {
802            error!("Unable to raise domain level from 13 to 14.");
803            return Err(OperationError::MG0004DomainLevelInDevelopment);
804        }
805
806        // =========== Apply changes ==============
807        self.internal_migrate_or_create_batch(
808            &format!("phase 1 - schema attrs target {}", DOMAIN_TGT_LEVEL),
809            migration_data::dl14::phase_1_schema_attrs(),
810        )?;
811
812        self.internal_migrate_or_create_batch(
813            "phase 2 - schema classes",
814            migration_data::dl14::phase_2_schema_classes(),
815        )?;
816
817        // Reload for the new schema.
818        self.reload()?;
819
820        // Since we just loaded in a ton of schema, lets reindex it in case we added
821        // new indexes, or this is a bootstrap and we have no indexes yet.
822        self.reindex(false)?;
823
824        // Set Phase
825        // Indicate the schema is now ready, which allows dyngroups to work when they
826        // are created in the next phase of migrations.
827        self.set_phase(ServerPhase::SchemaReady);
828
829        self.internal_migrate_or_create_batch(
830            "phase 3 - key provider",
831            migration_data::dl14::phase_3_key_provider(),
832        )?;
833
834        // Reload for the new key providers
835        self.reload()?;
836
837        self.internal_migrate_or_create_batch(
838            "phase 4 - dl14 system entries",
839            migration_data::dl14::phase_4_system_entries(),
840        )?;
841
842        // Reload for the new system entries
843        self.reload()?;
844
845        // Domain info is now ready and reloaded, we can proceed.
846        self.set_phase(ServerPhase::DomainInfoReady);
847
848        // Bring up the IDM entries.
849        self.internal_migrate_or_create_batch(
850            "phase 5 - builtin admin entries",
851            migration_data::dl14::phase_5_builtin_admin_entries()?,
852        )?;
853
854        self.internal_migrate_or_create_batch(
855            "phase 6 - builtin not admin entries",
856            migration_data::dl14::phase_6_builtin_non_admin_entries()?,
857        )?;
858
859        self.internal_migrate_or_create_batch(
860            "phase 7 - builtin access control profiles",
861            migration_data::dl14::phase_7_builtin_access_control_profiles(),
862        )?;
863
864        self.internal_delete_batch(
865            "phase 8 - delete UUIDS",
866            migration_data::dl14::phase_8_delete_uuids(),
867        )?;
868
869        self.reload()?;
870
871        Ok(())
872    }
873
874    /// Migration domain level 14 to 15 (1.11.0)
875    #[instrument(level = "info", skip_all)]
876    pub(crate) fn migrate_domain_14_to_15(&mut self) -> Result<(), OperationError> {
877        if !cfg!(test) && DOMAIN_TGT_LEVEL < DOMAIN_LEVEL_14 {
878            error!("Unable to raise domain level from 14 to 15.");
879            return Err(OperationError::MG0004DomainLevelInDevelopment);
880        }
881
882        Ok(())
883    }
884
885    #[instrument(level = "info", skip_all)]
886    pub(crate) fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
887        admin_debug!("initialise_schema_core -> start ...");
888        // Load in all the "core" schema, that we already have in "memory".
889        let entries = self.schema.to_entries();
890
891        // admin_debug!("Dumping schemas: {:?}", entries);
892
893        // internal_migrate_or_create.
894        let r: Result<_, _> = entries.into_iter().try_for_each(|e| {
895            trace!(?e, "init schema entry");
896            self.internal_migrate_or_create(e)
897        });
898        if r.is_ok() {
899            admin_debug!("initialise_schema_core -> Ok!");
900        } else {
901            admin_error!(?r, "initialise_schema_core -> Error");
902        }
903        // why do we have error handling if it's always supposed to be `Ok`?
904        debug_assert!(r.is_ok());
905        r
906    }
907}
908
909impl QueryServerReadTransaction<'_> {
910    /// Retrieve the domain info of this server
911    pub fn domain_upgrade_check(
912        &mut self,
913    ) -> Result<ProtoDomainUpgradeCheckReport, OperationError> {
914        let d_info = &self.d_info;
915
916        let name = d_info.d_name.clone();
917        let uuid = d_info.d_uuid;
918        let current_level = d_info.d_vers;
919        let upgrade_level = DOMAIN_TGT_NEXT_LEVEL;
920
921        let mut report_items = Vec::with_capacity(1);
922
923        if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
924            let item = self
925                .domain_upgrade_check_7_to_8_security_keys()
926                .map_err(|err| {
927                    error!(
928                        ?err,
929                        "Failed to perform domain upgrade check 7 to 8 - security-keys"
930                    );
931                    err
932                })?;
933            report_items.push(item);
934
935            let item = self
936                .domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri()
937                .map_err(|err| {
938                    error!(
939                        ?err,
940                        "Failed to perform domain upgrade check 7 to 8 - oauth2-strict-redirect_uri"
941                    );
942                    err
943                })?;
944            report_items.push(item);
945        }
946
947        Ok(ProtoDomainUpgradeCheckReport {
948            name,
949            uuid,
950            current_level,
951            upgrade_level,
952            report_items,
953        })
954    }
955
956    pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
957        &mut self,
958    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
959        let filter = filter!(f_and!([
960            f_eq(Attribute::Class, EntryClass::Account.into()),
961            f_pres(Attribute::PrimaryCredential),
962        ]));
963
964        let results = self.internal_search(filter)?;
965
966        let affected_entries = results
967            .into_iter()
968            .filter_map(|entry| {
969                if entry
970                    .get_ava_single_credential(Attribute::PrimaryCredential)
971                    .map(|cred| cred.has_securitykey())
972                    .unwrap_or_default()
973                {
974                    Some(entry.get_display_id())
975                } else {
976                    None
977                }
978            })
979            .collect::<Vec<_>>();
980
981        let status = if affected_entries.is_empty() {
982            ProtoDomainUpgradeCheckStatus::Pass7To8SecurityKeys
983        } else {
984            ProtoDomainUpgradeCheckStatus::Fail7To8SecurityKeys
985        };
986
987        Ok(ProtoDomainUpgradeCheckItem {
988            status,
989            from_level: DOMAIN_LEVEL_7,
990            to_level: DOMAIN_LEVEL_8,
991            affected_entries,
992        })
993    }
994
995    pub(crate) fn domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri(
996        &mut self,
997    ) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
998        let filter = filter!(f_and!([
999            f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
1000            f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
1001        ]));
1002
1003        let results = self.internal_search(filter)?;
1004
1005        let affected_entries = results
1006            .into_iter()
1007            .map(|entry| entry.get_display_id())
1008            .collect::<Vec<_>>();
1009
1010        let status = if affected_entries.is_empty() {
1011            ProtoDomainUpgradeCheckStatus::Pass7To8Oauth2StrictRedirectUri
1012        } else {
1013            ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri
1014        };
1015
1016        Ok(ProtoDomainUpgradeCheckItem {
1017            status,
1018            from_level: DOMAIN_LEVEL_7,
1019            to_level: DOMAIN_LEVEL_8,
1020            affected_entries,
1021        })
1022    }
1023}
1024
1025#[cfg(test)]
1026mod tests {
1027    // use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
1028    use crate::prelude::*;
1029    use crate::value::CredentialType;
1030    use crate::valueset::ValueSetCredentialType;
1031
1032    #[qs_test]
1033    async fn test_init_idempotent_schema_core(server: &QueryServer) {
1034        {
1035            // Setup and abort.
1036            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
1037            assert!(server_txn.initialise_schema_core().is_ok());
1038        }
1039        {
1040            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
1041            assert!(server_txn.initialise_schema_core().is_ok());
1042            assert!(server_txn.initialise_schema_core().is_ok());
1043            assert!(server_txn.commit().is_ok());
1044        }
1045        {
1046            // Now do it again in a new txn, but abort
1047            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
1048            assert!(server_txn.initialise_schema_core().is_ok());
1049        }
1050        {
1051            // Now do it again in a new txn.
1052            let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
1053            assert!(server_txn.initialise_schema_core().is_ok());
1054            assert!(server_txn.commit().is_ok());
1055        }
1056    }
1057
1058    /// This test is for ongoing/longterm checks over the previous to current version.
1059    /// This is in contrast to the specific version checks below that are often to
1060    /// test a version to version migration.
1061    #[qs_test(domain_level=DOMAIN_PREVIOUS_TGT_LEVEL)]
1062    async fn test_migrations_dl_previous_to_dl_target(server: &QueryServer) {
1063        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1064
1065        let db_domain_version = write_txn
1066            .internal_search_uuid(UUID_DOMAIN_INFO)
1067            .expect("unable to access domain entry")
1068            .get_ava_single_uint32(Attribute::Version)
1069            .expect("Attribute Version not present");
1070
1071        assert_eq!(db_domain_version, DOMAIN_PREVIOUS_TGT_LEVEL);
1072
1073        // == SETUP ==
1074
1075        // Add a member to a group - it should not be removed.
1076        // Remove a default member from a group - it should be returned.
1077        let modlist = ModifyList::new_set(
1078            Attribute::Member,
1079            // This achieves both because this removes IDM_ADMIN from the group
1080            // while setting only anon as a member.
1081            ValueSetRefer::new(UUID_ANONYMOUS),
1082        );
1083        write_txn
1084            .internal_modify_uuid(UUID_IDM_ADMINS, &modlist)
1085            .expect("Unable to modify CredentialTypeMinimum");
1086
1087        // Remove a group from an object that is "create once".  It should not
1088        // be re-added.
1089        let modlist = ModifyList::new_purge(Attribute::Member);
1090        write_txn
1091            .internal_modify_uuid(UUID_IDM_PEOPLE_SELF_NAME_WRITE, &modlist)
1092            .expect("Unable to remove idm_all_persons from self-write");
1093
1094        // Change default account policy - it should not be reverted.
1095        let modlist = ModifyList::new_set(
1096            Attribute::CredentialTypeMinimum,
1097            ValueSetCredentialType::new(CredentialType::Any),
1098        );
1099        write_txn
1100            .internal_modify_uuid(UUID_IDM_ALL_PERSONS, &modlist)
1101            .expect("Unable to modify CredentialTypeMinimum");
1102
1103        write_txn.commit().expect("Unable to commit");
1104
1105        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1106
1107        // == Increase the version ==
1108        write_txn
1109            .internal_apply_domain_migration(DOMAIN_TGT_LEVEL)
1110            .expect("Unable to set domain level");
1111
1112        // post migration verification.
1113        // Check that our group is as we left it
1114        let idm_admins_entry = write_txn
1115            .internal_search_uuid(UUID_IDM_ADMINS)
1116            .expect("Unable to retrieve all persons");
1117
1118        let members = idm_admins_entry
1119            .get_ava_refer(Attribute::Member)
1120            .expect("No members present");
1121
1122        // Still present
1123        assert!(members.contains(&UUID_ANONYMOUS));
1124        // Was reverted
1125        assert!(members.contains(&UUID_IDM_ADMIN));
1126
1127        // Check that self-write still doesn't have all persons.
1128        let idm_people_self_name_write_entry = write_txn
1129            .internal_search_uuid(UUID_IDM_PEOPLE_SELF_NAME_WRITE)
1130            .expect("Unable to retrieve all persons");
1131
1132        let members = idm_people_self_name_write_entry.get_ava_refer(Attribute::Member);
1133
1134        // There are no members!
1135        assert!(members.is_none());
1136
1137        // Check that the account policy did not revert.
1138        let all_persons_entry = write_txn
1139            .internal_search_uuid(UUID_IDM_ALL_PERSONS)
1140            .expect("Unable to retrieve all persons");
1141
1142        assert_eq!(
1143            all_persons_entry.get_ava_single_credential_type(Attribute::CredentialTypeMinimum),
1144            Some(CredentialType::Any)
1145        );
1146
1147        write_txn.commit().expect("Unable to commit");
1148    }
1149
1150    #[qs_test(domain_level=DOMAIN_TGT_LEVEL)]
1151    async fn test_migrations_prevent_downgrades(server: &QueryServer) {
1152        let curtime = duration_from_epoch_now();
1153
1154        let mut write_txn = server.write(curtime).await.unwrap();
1155
1156        let db_domain_version = write_txn
1157            .internal_search_uuid(UUID_DOMAIN_INFO)
1158            .expect("unable to access domain entry")
1159            .get_ava_single_uint32(Attribute::Version)
1160            .expect("Attribute Version not present");
1161
1162        assert_eq!(db_domain_version, DOMAIN_TGT_LEVEL);
1163
1164        drop(write_txn);
1165
1166        // MUST NOT SUCCEED.
1167        let err = server
1168            .initialise_helper(curtime, DOMAIN_PREVIOUS_TGT_LEVEL)
1169            .await
1170            .expect_err("Domain level was lowered!!!!");
1171
1172        assert_eq!(err, OperationError::MG0010DowngradeNotAllowed);
1173    }
1174
1175    #[qs_test(domain_level=DOMAIN_MIGRATION_FROM_INVALID)]
1176    async fn test_migrations_prevent_skips(server: &QueryServer) {
1177        let curtime = duration_from_epoch_now();
1178
1179        let mut write_txn = server.write(curtime).await.unwrap();
1180
1181        let db_domain_version = write_txn
1182            .internal_search_uuid(UUID_DOMAIN_INFO)
1183            .expect("unable to access domain entry")
1184            .get_ava_single_uint32(Attribute::Version)
1185            .expect("Attribute Version not present");
1186
1187        assert_eq!(db_domain_version, DOMAIN_MIGRATION_FROM_INVALID);
1188
1189        drop(write_txn);
1190
1191        // MUST NOT SUCCEED.
1192        let err = server
1193            .initialise_helper(curtime, DOMAIN_TGT_LEVEL)
1194            .await
1195            .expect_err("Migration went ahead!!!!");
1196
1197        assert_eq!(err, OperationError::MG0008SkipUpgradeAttempted);
1198    }
1199
1200    #[qs_test(domain_level=DOMAIN_MIGRATION_FROM_MIN)]
1201    async fn test_migrations_skip_valid(server: &QueryServer) {
1202        let curtime = duration_from_epoch_now();
1203        // This is a smoke test that X -> Z migrations work for some range. This doesn't
1204        // absolve us of the need to write more detailed migration tests.
1205        let mut write_txn = server.write(curtime).await.unwrap();
1206
1207        let db_domain_version = write_txn
1208            .internal_search_uuid(UUID_DOMAIN_INFO)
1209            .expect("unable to access domain entry")
1210            .get_ava_single_uint32(Attribute::Version)
1211            .expect("Attribute Version not present");
1212
1213        assert_eq!(db_domain_version, DOMAIN_MIGRATION_FROM_MIN);
1214
1215        drop(write_txn);
1216
1217        // MUST SUCCEED.
1218        server
1219            .initialise_helper(curtime, DOMAIN_TGT_LEVEL)
1220            .await
1221            .expect("Migration failed!!!!")
1222    }
1223
1224    #[qs_test(domain_level=DOMAIN_LEVEL_10)]
1225    async fn test_migrations_dl10_dl11(server: &QueryServer) {
1226        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1227
1228        let db_domain_version = write_txn
1229            .internal_search_uuid(UUID_DOMAIN_INFO)
1230            .expect("unable to access domain entry")
1231            .get_ava_single_uint32(Attribute::Version)
1232            .expect("Attribute Version not present");
1233
1234        assert_eq!(db_domain_version, DOMAIN_LEVEL_10);
1235
1236        write_txn.commit().expect("Unable to commit");
1237
1238        // == pre migration verification. ==
1239        // check we currently would fail a migration.
1240
1241        // let mut read_txn = server.read().await.unwrap();
1242        // drop(read_txn);
1243
1244        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1245
1246        // Fix any issues
1247
1248        // == Increase the version ==
1249        write_txn
1250            .internal_apply_domain_migration(DOMAIN_LEVEL_11)
1251            .expect("Unable to set domain level to version 11");
1252
1253        // post migration verification.
1254
1255        write_txn.commit().expect("Unable to commit");
1256    }
1257
1258    #[qs_test(domain_level=DOMAIN_LEVEL_11)]
1259    async fn test_migrations_dl11_dl12(server: &QueryServer) {
1260        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1261
1262        let db_domain_version = write_txn
1263            .internal_search_uuid(UUID_DOMAIN_INFO)
1264            .expect("unable to access domain entry")
1265            .get_ava_single_uint32(Attribute::Version)
1266            .expect("Attribute Version not present");
1267
1268        assert_eq!(db_domain_version, DOMAIN_LEVEL_11);
1269
1270        // Make a new person.
1271        let tuuid = Uuid::new_v4();
1272        let e1 = entry_init!(
1273            (Attribute::Class, EntryClass::Object.to_value()),
1274            (Attribute::Class, EntryClass::Person.to_value()),
1275            (Attribute::Class, EntryClass::Account.to_value()),
1276            (Attribute::Name, Value::new_iname("testperson1")),
1277            (Attribute::Uuid, Value::Uuid(tuuid)),
1278            (Attribute::Description, Value::new_utf8s("testperson1")),
1279            (Attribute::DisplayName, Value::new_utf8s("testperson1"))
1280        );
1281
1282        write_txn
1283            .internal_create(vec![e1])
1284            .expect("Unable to create user");
1285
1286        let user = write_txn
1287            .internal_search_uuid(tuuid)
1288            .expect("Unable to load user");
1289
1290        // They still have an id verification key
1291        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_some());
1292
1293        write_txn.commit().expect("Unable to commit");
1294
1295        // == pre migration verification. ==
1296        // check we currently would fail a migration.
1297
1298        // let mut read_txn = server.read().await.unwrap();
1299        // drop(read_txn);
1300
1301        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1302
1303        // Fix any issues
1304
1305        // == Increase the version ==
1306        write_txn
1307            .internal_apply_domain_migration(DOMAIN_LEVEL_12)
1308            .expect("Unable to set domain level to version 12");
1309
1310        // post migration verification.
1311        let user = write_txn
1312            .internal_search_uuid(tuuid)
1313            .expect("Unable to load user");
1314
1315        // The key has been removed.
1316        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_none());
1317
1318        // New users don't get a key
1319        let t2uuid = Uuid::new_v4();
1320        let e2 = entry_init!(
1321            (Attribute::Class, EntryClass::Object.to_value()),
1322            (Attribute::Class, EntryClass::Person.to_value()),
1323            (Attribute::Class, EntryClass::Account.to_value()),
1324            (Attribute::Name, Value::new_iname("testperson2")),
1325            (Attribute::Uuid, Value::Uuid(t2uuid)),
1326            (Attribute::Description, Value::new_utf8s("testperson2")),
1327            (Attribute::DisplayName, Value::new_utf8s("testperson2"))
1328        );
1329
1330        write_txn
1331            .internal_create(vec![e2])
1332            .expect("Unable to create user");
1333
1334        let user = write_txn
1335            .internal_search_uuid(t2uuid)
1336            .expect("Unable to load user");
1337
1338        // No key!
1339        assert!(user.get_ava_set(Attribute::IdVerificationEcKey).is_none());
1340
1341        write_txn.commit().expect("Unable to commit");
1342    }
1343
1344    #[qs_test(domain_level=DOMAIN_LEVEL_12)]
1345    async fn test_migrations_dl12_dl13(server: &QueryServer) {
1346        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1347
1348        let db_domain_version = write_txn
1349            .internal_search_uuid(UUID_DOMAIN_INFO)
1350            .expect("unable to access domain entry")
1351            .get_ava_single_uint32(Attribute::Version)
1352            .expect("Attribute Version not present");
1353
1354        assert_eq!(db_domain_version, DOMAIN_LEVEL_12);
1355
1356        write_txn.commit().expect("Unable to commit");
1357
1358        // == pre migration verification. ==
1359        // check we currently would fail a migration.
1360
1361        // let mut read_txn = server.read().await.unwrap();
1362        // drop(read_txn);
1363
1364        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1365
1366        // Fix any issues
1367
1368        // == Increase the version ==
1369        write_txn
1370            .internal_apply_domain_migration(DOMAIN_LEVEL_13)
1371            .expect("Unable to set domain level to version 13");
1372
1373        // post migration verification.
1374
1375        write_txn.commit().expect("Unable to commit");
1376    }
1377
1378    #[qs_test(domain_level=DOMAIN_LEVEL_13)]
1379    async fn test_migrations_dl13_dl14(server: &QueryServer) {
1380        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1381
1382        let db_domain_version = write_txn
1383            .internal_search_uuid(UUID_DOMAIN_INFO)
1384            .expect("unable to access domain entry")
1385            .get_ava_single_uint32(Attribute::Version)
1386            .expect("Attribute Version not present");
1387
1388        assert_eq!(db_domain_version, DOMAIN_LEVEL_13);
1389
1390        write_txn.commit().expect("Unable to commit");
1391
1392        // == pre migration verification. ==
1393        // check we currently would fail a migration.
1394
1395        // let mut read_txn = server.read().await.unwrap();
1396        // drop(read_txn);
1397
1398        let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
1399
1400        // Fix any issues
1401
1402        // == Increase the version ==
1403        write_txn
1404            .internal_apply_domain_migration(DOMAIN_LEVEL_14)
1405            .expect("Unable to set domain level to version 14");
1406
1407        // post migration verification.
1408
1409        write_txn.commit().expect("Unable to commit");
1410    }
1411}