atomix / atomix

A Kubernetes toolkit for building distributed applications using cloud native principles

Home Page:https://atomix.io

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Data is not actually clustered / backed up / available, basic integration test and real application show bug

doctorpangloss opened this issue · comments

Expected behavior

A map created on a node that leaves should still be available.

You can reproduce this issue using the following test added to core/src/test/java/io/atomix/core/AtomixTest.java:

	@Test
	public void testScaleDownMap() throws Exception {
		List<CompletableFuture<Atomix>> futures = new ArrayList<>();
		futures.add(startAtomix(1, Arrays.asList(1, 2, 3), Profile.dataGrid()));
		futures.add(startAtomix(2, Arrays.asList(1, 2, 3), Profile.dataGrid()));
		futures.add(startAtomix(3, Arrays.asList(1, 2, 3), Profile.dataGrid()));
		Futures.allOf(futures).get(30, TimeUnit.SECONDS);
		AtomicMap<String, String> map = instances.get(0).<String, String>getAtomicMap("test");
		Versioned<String> res1 = map.async().put("test", "test1").get(30, TimeUnit.SECONDS);
		instances.get(0).stop().get(30, TimeUnit.SECONDS);
		AtomicMap<String, String> map2 = instances.get(2).<String, String>getAtomicMap("test");
		Boolean res2 = map2.async().replace("test", "test1", "test2").get(30, TimeUnit.SECONDS);
		assertTrue(res2);
	}

Another take, this time with something that sounds like it should back up the data?

	@Test
	public void testScaleDownMap() throws Exception {
		List<CompletableFuture<Atomix>> futures = new ArrayList<>();
		futures.add(startAtomix(1, Arrays.asList(1, 2, 3), Profile.dataGrid()));
		futures.add(startAtomix(2, Arrays.asList(1, 2, 3), Profile.dataGrid()));
		futures.add(startAtomix(3, Arrays.asList(1, 2, 3), Profile.dataGrid()));
		Futures.allOf(futures).get(30, TimeUnit.SECONDS);
		MultiPrimaryProtocol protocol = MultiPrimaryProtocol.builder().withBackups(2)
				.withReplication(Replication.SYNCHRONOUS)
				.withRecovery(Recovery.RECOVER)
				.withConsistency(Consistency.LINEARIZABLE).build();
		DistributedMap<String, String> map = instances.get(0).<String, String>mapBuilder("test")
				.withProtocol(protocol)
				.build();
		String res1 = map.async().put("test", "test1").get(30, TimeUnit.SECONDS);
		instances.get(0).stop().get(30, TimeUnit.SECONDS);
		DistributedMap<String, String> map2 = instances.get(2).<String, String>mapBuilder("test").withProtocol(protocol).build();
		Boolean res2 = map2.async().replace("test", "test1", "test2").get(30, TimeUnit.SECONDS);
		assertTrue(res2);
	}

on 3.2.0-snapshot and all prior versions.

Here's the error:


io.atomix.utils.AtomixRuntimeException: java.util.concurrent.ExecutionException: io.atomix.primitive.PrimitiveException$Unavailable

	at io.atomix.primitive.PrimitiveFactory.getPrimitive(PrimitiveFactory.java:93)
	at io.atomix.core.impl.CorePrimitivesService.getAtomicMap(CorePrimitivesService.java:192)
	at io.atomix.core.Atomix.getAtomicMap(Atomix.java:638)
	at io.atomix.core.AtomixTest.testScaleDownMap(AtomixTest.java:422)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.base/java.lang.reflect.Method.invoke(Method.java:564)
	at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
	at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
	at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
	at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
	at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:325)
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:78)
	at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:57)
	at org.junit.runners.ParentRunner$3.run(ParentRunner.java:290)
	at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:71)
	at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:288)
	at org.junit.runners.ParentRunner.access$000(ParentRunner.java:58)
	at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:268)
	at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
	at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
	at org.junit.runners.ParentRunner.run(ParentRunner.java:363)
	at org.junit.runner.JUnitCore.run(JUnitCore.java:137)
	at com.intellij.junit4.JUnit4IdeaTestRunner.startRunnerWithArgs(JUnit4IdeaTestRunner.java:68)
	at com.intellij.rt.junit.IdeaTestRunner$Repeater.startRunnerWithArgs(IdeaTestRunner.java:33)
	at com.intellij.rt.junit.JUnitStarter.prepareStreamsAndStart(JUnitStarter.java:230)
	at com.intellij.rt.junit.JUnitStarter.main(JUnitStarter.java:58)
Caused by: java.util.concurrent.ExecutionException: io.atomix.primitive.PrimitiveException$Unavailable
	at java.base/java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:395)
	at java.base/java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2086)
	at io.atomix.primitive.PrimitiveFactory.getPrimitive(PrimitiveFactory.java:91)
	... 29 more
Caused by: io.atomix.primitive.PrimitiveException$Unavailable
	at io.atomix.protocols.backup.session.PrimaryBackupSessionClient.execute(PrimaryBackupSessionClient.java:179)
	at io.atomix.protocols.backup.session.PrimaryBackupSessionClient.lambda$execute$6(PrimaryBackupSessionClient.java:217)
	at io.atomix.utils.concurrent.ThreadPoolContext.lambda$new$0(ThreadPoolContext.java:81)
	at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)
	at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)
	at java.base/java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:304)
	at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1130)
	at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:630)
	at java.base/java.lang.Thread.run(Thread.java:832)

Logs:

Why do all the nodes shutdown when only one is closed? That's clearly not what I wanted.

23:42:46.570 [main] INFO  io.atomix.core.Atomix - 3.2.0-SNAPSHOT (revision 842276 built on 2020-07-05 23:42:30)
23:42:46.677 [netty-messaging-event-nio-server-0] INFO  i.a.c.m.impl.NettyMessagingService - TCP server listening for connections on 0.0.0.0:5001
23:42:46.680 [netty-messaging-event-nio-server-0] INFO  i.a.c.m.impl.NettyMessagingService - Started
23:42:46.687 [main] INFO  io.atomix.core.Atomix - 3.2.0-SNAPSHOT (revision 842276 built on 2020-07-05 23:42:30)
23:42:46.691 [netty-messaging-event-nio-server-0] INFO  i.a.c.m.impl.NettyMessagingService - TCP server listening for connections on 0.0.0.0:5002
23:42:46.691 [netty-messaging-event-nio-server-0] INFO  i.a.c.m.impl.NettyMessagingService - Started
23:42:46.696 [netty-unicast-event-nio-client-0] INFO  i.a.c.m.impl.NettyUnicastService - UDP server listening for connections on 0.0.0.0:5002
23:42:46.696 [netty-unicast-event-nio-client-0] INFO  i.a.c.m.impl.NettyUnicastService - UDP server listening for connections on 0.0.0.0:5001
23:42:46.698 [main] INFO  io.atomix.core.Atomix - 3.2.0-SNAPSHOT (revision 842276 built on 2020-07-05 23:42:30)
23:42:46.702 [netty-messaging-event-nio-server-0] INFO  i.a.c.m.impl.NettyMessagingService - TCP server listening for connections on 0.0.0.0:5003
23:42:46.702 [netty-messaging-event-nio-server-0] INFO  i.a.c.m.impl.NettyMessagingService - Started
23:42:46.704 [netty-unicast-event-nio-client-0] INFO  i.a.c.m.impl.NettyUnicastService - UDP server listening for connections on 0.0.0.0:5003
23:42:46.708 [netty-broadcast-event-nio-client-1] INFO  i.a.c.m.impl.NettyBroadcastService - localhost joining multicast group 230.0.0.1 on port 54321
23:42:46.708 [netty-broadcast-event-nio-client-1] INFO  i.a.c.m.impl.NettyBroadcastService - localhost joining multicast group 230.0.0.1 on port 54321
23:42:46.708 [netty-broadcast-event-nio-client-1] INFO  i.a.c.m.impl.NettyBroadcastService - localhost joining multicast group 230.0.0.1 on port 54321
23:42:46.709 [netty-broadcast-event-nio-client-1] INFO  i.a.c.m.impl.NettyBroadcastService - localhost successfully joined multicast group 230.0.0.1 on port 54321
23:42:46.709 [netty-broadcast-event-nio-client-1] INFO  i.a.c.m.impl.NettyBroadcastService - localhost successfully joined multicast group 230.0.0.1 on port 54321
23:42:46.709 [netty-broadcast-event-nio-client-1] INFO  i.a.c.m.impl.NettyBroadcastService - localhost successfully joined multicast group 230.0.0.1 on port 54321
23:42:46.709 [atomix-cluster-0] INFO  i.a.c.d.BootstrapDiscoveryProvider - Joined
23:42:46.709 [atomix-cluster-0] INFO  i.a.c.d.BootstrapDiscoveryProvider - Joined
23:42:46.710 [atomix-cluster-0] INFO  i.a.c.d.BootstrapDiscoveryProvider - Joined
23:42:46.710 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - 2 - Member activated: Member{id=2, address=localhost:5002, host=localhost, properties={}}
23:42:46.710 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - 3 - Member activated: Member{id=3, address=localhost:5003, host=localhost, properties={}}
23:42:46.710 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - 1 - Member activated: Member{id=1, address=localhost:5001, host=localhost, properties={}}
23:42:46.714 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - Started
23:42:46.714 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - Started
23:42:46.714 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - Started
23:42:46.715 [atomix-cluster-0] INFO  i.a.c.i.DefaultClusterMembershipService - Started
23:42:46.715 [atomix-cluster-0] INFO  i.a.c.i.DefaultClusterMembershipService - Started
23:42:46.715 [atomix-cluster-0] INFO  i.a.c.i.DefaultClusterMembershipService - Started
23:42:46.715 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterCommunicationService - Started
23:42:46.715 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterCommunicationService - Started
23:42:46.715 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterCommunicationService - Started
23:42:46.716 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterEventService - Started
23:42:46.716 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterEventService - Started
23:42:46.716 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterEventService - Started
23:42:46.721 [atomix-0] INFO  i.a.p.p.i.DefaultPartitionGroupMembershipService - Started
23:42:46.721 [atomix-0] INFO  i.a.p.p.i.DefaultPartitionGroupMembershipService - Started
23:42:46.721 [atomix-0] INFO  i.a.p.p.i.DefaultPartitionGroupMembershipService - Started
23:42:46.736 [atomix-0] INFO  i.a.p.p.i.HashBasedPrimaryElectionService - Started
23:42:46.736 [atomix-0] INFO  i.a.p.p.i.HashBasedPrimaryElectionService - Started
23:42:46.736 [atomix-0] INFO  i.a.p.p.i.HashBasedPrimaryElectionService - Started
23:42:46.826 [atomix-0] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Started
23:42:46.826 [atomix-0] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Started
23:42:46.826 [atomix-0] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Started
23:42:47.084 [atomix-system-26] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Started
23:42:47.084 [atomix-system-26] INFO  i.a.p.p.impl.DefaultPartitionService - Started
23:42:47.132 [atomix-system-1] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Started
23:42:47.132 [atomix-system-1] INFO  i.a.p.p.impl.DefaultPartitionService - Started
23:42:47.157 [atomix-system-18] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Started
23:42:47.157 [atomix-system-18] INFO  i.a.p.p.impl.DefaultPartitionService - Started
23:42:47.275 [atomix-system-19] INFO  i.a.core.impl.CoreTransactionService - Started
23:42:47.275 [atomix-system-13] INFO  i.a.core.impl.CoreTransactionService - Started
23:42:47.275 [atomix-system-25] INFO  i.a.core.impl.CoreTransactionService - Started
23:42:47.275 [atomix-system-19] INFO  i.a.core.impl.CorePrimitivesService - Started
23:42:47.275 [atomix-system-13] INFO  i.a.core.impl.CorePrimitivesService - Started
23:42:47.275 [atomix-system-25] INFO  i.a.core.impl.CorePrimitivesService - Started
23:42:47.595 [atomix-system-4] INFO  i.a.core.impl.CorePrimitivesService - Stopped
23:42:47.599 [ForkJoinPool.commonPool-worker-5] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Stopped
23:42:47.606 [ForkJoinPool.commonPool-worker-19] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Stopped
23:42:47.607 [ForkJoinPool.commonPool-worker-19] INFO  i.a.p.p.i.DefaultPartitionGroupMembershipService - Stopped
23:42:47.607 [ForkJoinPool.commonPool-worker-19] INFO  i.a.p.p.impl.DefaultPartitionService - Stopped
23:42:47.608 [atomix-0] INFO  i.a.c.m.i.DefaultClusterCommunicationService - Stopped
23:42:47.609 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterEventService - Stopped
23:42:47.609 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - 1 - Member deactivated: Member{id=1, address=localhost:5001, host=localhost, properties={}}
23:42:47.610 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - Stopped
23:42:47.610 [atomix-cluster-0] INFO  i.a.c.d.BootstrapDiscoveryProvider - Left
23:42:47.611 [atomix-cluster-0] INFO  i.a.c.i.DefaultClusterMembershipService - Stopped
23:42:49.851 [ForkJoinPool.commonPool-worker-5] INFO  i.a.c.m.impl.NettyMessagingService - Stopped
23:42:49.851 [atomix-cluster-0] INFO  io.atomix.cluster.AtomixCluster - Stopped
23:42:55.192 [atomix-system-31] INFO  i.a.core.impl.CorePrimitivesService - Stopped
23:42:55.192 [atomix-system-2] INFO  i.a.core.impl.CorePrimitivesService - Stopped
23:42:55.193 [ForkJoinPool.commonPool-worker-19] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Stopped
23:42:55.193 [ForkJoinPool.commonPool-worker-5] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Stopped
23:42:55.198 [ForkJoinPool.commonPool-worker-31] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Stopped
23:42:55.198 [ForkJoinPool.commonPool-worker-19] INFO  i.a.p.b.p.PrimaryBackupPartitionGroup - Stopped
23:42:55.198 [ForkJoinPool.commonPool-worker-31] INFO  i.a.p.p.i.DefaultPartitionGroupMembershipService - Stopped
23:42:55.198 [ForkJoinPool.commonPool-worker-19] INFO  i.a.p.p.i.DefaultPartitionGroupMembershipService - Stopped
23:42:55.198 [ForkJoinPool.commonPool-worker-31] INFO  i.a.p.p.impl.DefaultPartitionService - Stopped
23:42:55.198 [ForkJoinPool.commonPool-worker-19] INFO  i.a.p.p.impl.DefaultPartitionService - Stopped
23:42:55.199 [atomix-0] INFO  i.a.c.m.i.DefaultClusterCommunicationService - Stopped
23:42:55.199 [atomix-0] INFO  i.a.c.m.i.DefaultClusterCommunicationService - Stopped
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterEventService - Stopped
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.m.i.DefaultClusterEventService - Stopped
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - 2 - Member deactivated: Member{id=2, address=localhost:5002, host=localhost, properties={}}
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - 3 - Member deactivated: Member{id=3, address=localhost:5003, host=localhost, properties={}}
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - Stopped
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.p.SwimMembershipProtocol - Stopped
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.d.BootstrapDiscoveryProvider - Left
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.d.BootstrapDiscoveryProvider - Left
23:42:55.199 [atomix-cluster-0] INFO  i.a.c.i.DefaultClusterMembershipService - Stopped
23:42:55.200 [atomix-cluster-0] INFO  i.a.c.i.DefaultClusterMembershipService - Stopped
23:42:57.421 [ForkJoinPool.commonPool-worker-31] INFO  i.a.c.m.impl.NettyMessagingService - Stopped
23:42:57.421 [ForkJoinPool.commonPool-worker-19] INFO  i.a.c.m.impl.NettyMessagingService - Stopped
23:42:57.421 [atomix-cluster-0] INFO  io.atomix.cluster.AtomixCluster - Stopped
23:42:57.421 [atomix-cluster-0] INFO  io.atomix.cluster.AtomixCluster - Stopped

There do not appear to be any tests inside the atomix codebase that validate the accessibility or writeability of data after a node is stopped, which is pretty surprising.

Does stopping a node destroy the data created by that node? Is that really how things should work?

Original Ticket:

Trying to author an integration test demonstrating 3 local Atomix class instances where one leaves the cluster. I'm using atomix-vertx but these are Atomix issues.

Actual behavior

I've tried an incredible number of configurations and so far I cannot make Atomix maps work after one node of three leaves.

Using Raft, I see this when a node leaves:

RaftServer{spellsource-data-partition-1}{role=FOLLOWER} - java.net.ConnectException

Using Multi-Primary backup, I see this when a node leaves:

20200705T221552 atomix-vertx-0 ERROR i.a.u.c.SingleThreadContext An uncaught exception occurred
io.atomix.primitive.PrimitiveException$Timeout: null
	at io.atomix.core.map.impl.BlockingAtomicMap.complete(BlockingAtomicMap.java:223)
	at io.atomix.core.map.impl.BlockingAtomicMap.get(BlockingAtomicMap.java:85)
	at io.atomix.vertx.AtomixMap.get(AtomixMap.java:67)

Reproduction:

Configuration from your atomix-vertx library, for Raft:

static Atomix create(int port, Node... nodes) {
		var hostIpAddress = "localhost";
		var memberId = getMemberId(port, hostIpAddress);

		var path = "build/atomix/" + memberId.replace(":", "_");
		try {
			Files.createDirectories(Path.of(path));
		} catch (IOException e) {
			throw new RuntimeException(e);
		}
		AtomixBuilder atomixBuilder = Atomix.builder()
				.withClusterId("spellsource")
				.withMemberId(memberId)
				.withHost(hostIpAddress)
				.withPort(port);

		var consensusProfileBuilder = ConsensusProfile.builder()
				.withDataPath(path);
		if (nodes.length == 0) {
			atomixBuilder
					.withProfiles(Profile.dataGrid(1)/*, consensusProfileBuilder.withMembers(memberId).build()*/);
		} else {
			var members = Arrays
					.stream(nodes)
					.map(Node::id)
					.map(NodeId::id).collect(toSet());
			LOGGER.info("create: members={}", members);
			atomixBuilder
					.withMembershipProtocol(SwimMembershipProtocol.builder()
							.withBroadcastDisputes(true)
							.withBroadcastUpdates(true)
							.withProbeInterval(Duration.ofMillis(100))
							.withNotifySuspect(true)
							.withFailureTimeout(Duration.ofSeconds(3))
							.build())
					.withMembershipProvider(new BootstrapDiscoveryProvider(nodes))
					.withManagementGroup(RaftPartitionGroup.builder("system")
							.withNumPartitions(1)
							.withMembers(members)
							.withPartitionSize(nodes.length)
							.withDataDirectory(new File(path))
							.build())
					.withPartitionGroups(RaftPartitionGroup.builder("spellsource-data")
							.withNumPartitions(3)
							.withPartitionSize(nodes.length)
							.withMembers(members)
							.withDataDirectory(new File(path + "/spellsource-data"))
							.build());
		}
		return atomixBuilder
				.build();
	}

For multi-primary backup:

static Atomix create(int port, Node... nodes) {
		var hostIpAddress = "localhost";
		var memberId = getMemberId(port, hostIpAddress);

		var path = "build/atomix/" + memberId.replace(":", "_");
		try {
			Files.createDirectories(Path.of(path));
		} catch (IOException e) {
			throw new RuntimeException(e);
		}
		AtomixBuilder atomixBuilder = Atomix.builder()
				.withClusterId("spellsource")
				.withMemberId(memberId)
				.withHost(hostIpAddress)
				.withPort(port);

		var consensusProfileBuilder = ConsensusProfile.builder()
				.withDataPath(path);
		if (nodes.length == 0) {
			atomixBuilder
					.withProfiles(Profile.dataGrid(1)/*, consensusProfileBuilder.withMembers(memberId).build()*/);
		} else {
			var members = Arrays
					.stream(nodes)
					.map(Node::id)
					.map(NodeId::id).collect(toSet());
			LOGGER.info("create: members={}", members);
			atomixBuilder
					.withMembershipProtocol(SwimMembershipProtocol.builder()
							.withBroadcastDisputes(true)
							.withBroadcastUpdates(true)
							.withProbeInterval(Duration.ofMillis(100))
							.withNotifySuspect(true)
							.withFailureTimeout(Duration.ofSeconds(3))
							.build())
					.withMembershipProvider(new BootstrapDiscoveryProvider(nodes))
					.withManagementGroup(PrimaryBackupPartitionGroup.builder("system")
							.withNumPartitions(1)
							/*
							.withMembers(members)
							.withPartitionSize(nodes.length)
							.withDataDirectory(new File(path))*/
							.build())
					.withPartitionGroups(PrimaryBackupPartitionGroup.builder("spellsource-data")
							.withNumPartitions(3)
							/*
							.withPartitionSize(nodes.length)
							.withMembers(members)
							.withDataDirectory(new File(path + "/spellsource-data"))*/
							.build());
		}
		return atomixBuilder
				.build();
	}

Environment

  • Atomix: 3.1.8
  • OS: macOS
  • JVM: OpenJDK 13