Portainer Agent: Unable to retrieve local agent IP address
kevdogg opened this issue · comments
Hi I'm running Agent version 2.0.0 on Arch linux host with Docker
I'm running docker in swarm mode, however I'm just connecting single instances (no replicas) over the swarm network:
The actual portainer installation is on a different Docker Daemon.
My docker compose file appears as the following (these are snippets):
networks:
net:
name: net
driver: bridge
watchtower-ubuntumc:
name: watchtower_ubuntumc
driver: bridge
openldap-net:
name: openldap-net
driver: overlay
attachable: true
ipam:
config:
- subnet: 10.90.0.0/24
services:
portainer-agent:
container_name: portainer-agent
image: portainer/agent:2.0.0
labels:
- "com.centurylinklabs.watchtower.scope=archzfsproxy"
restart: always
secrets:
- portainer-agent_secret
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
ports:
- target: 9001
published: 9001
protocol: tcp
mode: host
networks:
- net
deploy:
mode: global
placement:
constraints: [node.platform.os == linux]
environment:
- AGENT_SECRET=/run/secrets/portainer-agent_secret
- LOG_LEVEL=debug
I'm getting the following errors starting the portainer agent:
2020/12/26 08:39:05 [INFO] [main] [message: Agent running on Docker platform]
2020/12/26 08:39:05 [DEBUG] [main,configuration] [Member tags: &{AgentPort:9001 EdgeKeySet:false NodeName:archZFSProxy.domain.com DockerConfiguration:{EngineStatus:2 Leader:true NodeRole:1} KubernetesConfiguration:{}}]
2020/12/26 08:39:05 [INFO] [main] [message: Agent running on a Swarm cluster node. Running in cluster mode]
2020/12/26 08:39:05 [DEBUG] [docker] [network_name: net] [scope: local] [ingress: false] [message: Skipping invalid container network]
2020/12/26 08:39:05 [ERROR] [main,docker] [message: Unable to retrieve local agent IP address] [error: unable to retrieve the address on which the agent can advertise. Check your network settings]
sudo docker inspect portainer-agent
[
{
"Id": "071c8718c5936d231ac13d9747f2c278a00c97d5e62ae3fa0d736687c151baa7",
"Created": "2020-12-26T08:34:55.386882817Z",
"Path": "./agent",
"Args": [],
"State": {
"Status": "restarting",
"Running": true,
"Paused": false,
"Restarting": true,
"OOMKilled": false,
"Dead": false,
"Pid": 0,
"ExitCode": 1,
"Error": "",
"StartedAt": "2020-12-26T08:40:07.95189253Z",
"FinishedAt": "2020-12-26T08:40:08.046910704Z"
},
"Image": "sha256:6b367a5c4fe3e272ae9fb6cf8393856562c99a6eb41d2922c8ab75a3e37d1813",
"ResolvConfPath": "/var/lib/docker/containers/071c8718c5936d231ac13d9747f2c278a00c97d5e62ae3fa0d736687c151baa7/resolv.conf",
"HostnamePath": "/var/lib/docker/containers/071c8718c5936d231ac13d9747f2c278a00c97d5e62ae3fa0d736687c151baa7/hostname",
"HostsPath": "/var/lib/docker/containers/071c8718c5936d231ac13d9747f2c278a00c97d5e62ae3fa0d736687c151baa7/hosts",
"LogPath": "/var/lib/docker/containers/071c8718c5936d231ac13d9747f2c278a00c97d5e62ae3fa0d736687c151baa7/071c8718c5936d231ac13d9747f2c278a00c97d5e62ae3fa0d736687c151baa7-json.log",
"Name": "/portainer-agent",
"RestartCount": 14,
"Driver": "zfs",
"Platform": "linux",
"MountLabel": "",
"ProcessLabel": "",
"AppArmorProfile": "",
"ExecIDs": null,
"HostConfig": {
"Binds": [
"/var/run/docker.sock:/var/run/docker.sock:rw",
"/var/lib/docker/volumes:/var/lib/docker/volumes:rw"
],
"ContainerIDFile": "",
"LogConfig": {
"Type": "json-file",
"Config": {}
},
"NetworkMode": "net",
"PortBindings": {
"9001/tcp": [
{
"HostIp": "",
"HostPort": "9001"
}
]
},
"RestartPolicy": {
"Name": "always",
"MaximumRetryCount": 0
},
"AutoRemove": false,
"VolumeDriver": "",
"VolumesFrom": [],
"CapAdd": null,
"CapDrop": null,
"CgroupnsMode": "host",
"Dns": null,
"DnsOptions": null,
"DnsSearch": null,
"ExtraHosts": null,
"GroupAdd": null,
"IpcMode": "private",
"Cgroup": "",
"Links": null,
"OomScoreAdj": 0,
"PidMode": "",
"Privileged": false,
"PublishAllPorts": false,
"ReadonlyRootfs": false,
"SecurityOpt": null,
"UTSMode": "",
"UsernsMode": "",
"ShmSize": 67108864,
"Runtime": "runc",
"ConsoleSize": [
0,
0
],
"Isolation": "",
"CpuShares": 0,
"Memory": 0,
"NanoCpus": 0,
"CgroupParent": "",
"BlkioWeight": 0,
"BlkioWeightDevice": null,
"BlkioDeviceReadBps": null,
"BlkioDeviceWriteBps": null,
"BlkioDeviceReadIOps": null,
"BlkioDeviceWriteIOps": null,
"CpuPeriod": 0,
"CpuQuota": 0,
"CpuRealtimePeriod": 0,
"CpuRealtimeRuntime": 0,
"CpusetCpus": "",
"CpusetMems": "",
"Devices": null,
"DeviceCgroupRules": null,
"DeviceRequests": null,
"KernelMemory": 0,
"KernelMemoryTCP": 0,
"MemoryReservation": 0,
"MemorySwap": 0,
"MemorySwappiness": null,
"OomKillDisable": false,
"PidsLimit": null,
"Ulimits": null,
"CpuCount": 0,
"CpuPercent": 0,
"IOMaximumIOps": 0,
"IOMaximumBandwidth": 0,
"Mounts": [
{
"Type": "bind",
"Source": "/etc/docker/compose/authelia/portainer-agent.secret",
"Target": "/run/secrets/portainer-agent_secret",
"ReadOnly": true
}
],
"MaskedPaths": [
"/proc/asound",
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware"
],
"ReadonlyPaths": [
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger"
]
},
"GraphDriver": {
"Data": {
"Dataset": "tank/ROOT/default/a640d79fa430beb1bf25c16f10f7c32a261518057af75bacd6383ddd61721168",
"Mountpoint": "/var/lib/docker/zfs/graph/a640d79fa430beb1bf25c16f10f7c32a261518057af75bacd6383ddd61721168"
},
"Name": "zfs"
},
"Mounts": [
{
"Type": "bind",
"Source": "/var/run/docker.sock",
"Destination": "/var/run/docker.sock",
"Mode": "rw",
"RW": true,
"Propagation": "rprivate"
},
{
"Type": "bind",
"Source": "/var/lib/docker/volumes",
"Destination": "/var/lib/docker/volumes",
"Mode": "rw",
"RW": true,
"Propagation": "rslave"
},
{
"Type": "bind",
"Source": "/etc/docker/compose/authelia/portainer-agent.secret",
"Destination": "/run/secrets/portainer-agent_secret",
"Mode": "",
"RW": false,
"Propagation": "rprivate"
}
],
"Config": {
"Hostname": "071c8718c593",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"ExposedPorts": {
"9001/tcp": {}
},
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"AGENT_SECRET=/run/secrets/portainer-agent_secret",
"LOG_LEVEL=debug",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"Cmd": null,
"Image": "portainer/agent:2.0.0",
"Volumes": {
"/var/lib/docker/volumes": {},
"/var/run/docker.sock": {}
},
"WorkingDir": "/app",
"Entrypoint": [
"./agent"
],
"OnBuild": null,
"Labels": {
"com.centurylinklabs.watchtower.scope": "archzfsproxy",
"com.docker.compose.config-hash": "109b860d435924a1cde851eeea0fd7b5a528fd8df1de9faeb2bc641cbd2ece1f",
"com.docker.compose.container-number": "1",
"com.docker.compose.oneoff": "False",
"com.docker.compose.project": "authelia",
"com.docker.compose.project.config_files": "docker-compose.yml",
"com.docker.compose.project.working_dir": "/etc/docker/compose/authelia",
"com.docker.compose.service": "portainer-agent",
"com.docker.compose.version": "1.27.4"
}
},
"NetworkSettings": {
"Bridge": "",
"SandboxID": "b93d11a21fd3359f1e0f633bf24e9cd591f13da25d9e4eb2b32bac058f4ed9da",
"HairpinMode": false,
"LinkLocalIPv6Address": "",
"LinkLocalIPv6PrefixLen": 0,
"Ports": {},
"SandboxKey": "/var/run/docker/netns/b93d11a21fd3",
"SecondaryIPAddresses": null,
"SecondaryIPv6Addresses": null,
"EndpointID": "",
"Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"MacAddress": "",
"Networks": {
"net": {
"IPAMConfig": null,
"Links": null,
"Aliases": [
"071c8718c593",
"portainer-agent"
],
"NetworkID": "9ef3e7f522b228bd92824e6c142a2b04b7e2f7eba9e2e6b75655a541b25dc023",
"EndpointID": "",
"Gateway": "",
"IPAddress": "",
"IPPrefixLen": 0,
"IPv6Gateway": "",
"GlobalIPv6Address": "",
"GlobalIPv6PrefixLen": 0,
"MacAddress": "",
"DriverOpts": null
}
}
}
}
]
I've seen this error reported about 1 year ago (#95), however I didn't see what the actual solution was. Obviously if I use the TCP/Direct Route with OpenSSL self-signed client server certs -- I bypass the agent and don't get the error.
Hi @kevdogg
The agent must be part of an overlay network to work properly on a Swarm cluster. Have you tried to update the driver
property of the net
network to overlay?
I have this same problem, I've got a feeling that it's to do with the fact that my server has a Wi-Fi connection and Ethernet connection both with different IP addresses, is there a way to get the agent to just bind to one of the IP addresses. eg: [docker deployment command] --bind 192.168.10.4:9001
I've got a feeling that it's to do with the fact that my server has a Wi-Fi connection and Ethernet connection both with different IP addresses
I don't think that's the issue. My homeserver has a (internal) static ip address (via ethernet, no Wi-Fi connection), however, my public IP address changes (but I'm using DynDNS, so this shouldn't be a problem).
is there a way to get the agent to just bind to one of the IP addresses. eg: [docker deployment command] --bind 192.168.10.4:9001
That's possible via the -p/publish argument: https://docs.docker.com/config/containers/container-networking/
I am still getting this issue, I have tried many different version of the agent:
2022/03/08 04:39:20 [INFO] [main] [message: Agent running on Docker platform]
2022/03/08 04:39:20 [INFO] [main] [message: Agent running on a Swarm cluster node. Running in cluster mode]
2022/03/08 04:39:20 [ERROR] [main,docker] [message: Unable to retrieve local agent IP address] [error: unable to retrieve the address on which the agent can advertise. Check your network settings]
Any ideas? It's running on a bridge network, I have tried to change it over to an overlay network but it does not let me select any overlay networks from the drop-down box in the Portainer UI.
nvm, this fixed it here: #95 (comment)
I had the same issue. I fix this running this command
sudo docker swarm leave --force
(In my case, I don't need swarm)
nvm, this fixed it here: #95 (comment)
I had the same issue. I fix this running this command
sudo docker swarm leave --force
(In my case, I don't need swarm)
this isn't a solution for many of us who use docker swarm.
With the merge of #239 this shouldn't be an issue anymore I believe. Give a try to portainerci/agent:develop
to see if that could solve the issue.
Sorry @deviantony does this mean that in the next version of the portainer, this won't be a problem anymore.
I'm using portainer 2.14.2 on both Desktop PC (with linux subsystem docker) and Synology Server. Have been trying to connect both to each other via using the agent, but it gives the following error log on the Desktop PC.
Both dockers are not srawm enabled. They are stand alone dockers, and I'm purely trying to connect them via the agent which was the easiest way.
Is there a solution to this yet?
I even added network host option to agent, but that did not solve the issue too.
2022/08/21 11:18:31 [INFO] [main] [message: Agent running on Docker platform] 2022/08/21 11:18:31 [WARN] [main] [message: Unable to retrieve agent container IP address, using '0.0.0.0' instead] [error: Error: No such container: docker-desktop] 2022/08/21 11:18:31 [INFO] [edge] [message: Edge key loaded from options] 2022/08/21 11:18:31 [INFO] [edge,registry] [message: Starting registry credential server] 2022/08/21 11:18:31 [INFO] [http] [server_addr: 0.0.0.0] [server_port: 9001] [use_tls: false] [api_version: 2.14.2] [message: Starting Agent API server] 2022/08/21 11:18:51 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:19:02 client: Connection error: dial tcp 172.67.198.19:8000: operation was canceled 2022/08/21 11:19:02 client: Give up 2022/08/21 11:19:38 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:19:48 client: Connection error: dial tcp 172.67.198.19:8000: operation was canceled 2022/08/21 11:19:48 client: Give up 2022/08/21 11:20:37 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:20:47 client: Connection error: dial tcp 172.67.198.19:8000: operation was canceled 2022/08/21 11:20:47 client: Give up 2022/08/21 11:21:37 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:21:48 client: Connection error: dial tcp 172.67.198.19:8000: operation was canceled 2022/08/21 11:21:48 client: Give up 2022/08/21 11:22:37 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:22:48 client: Connection error: dial tcp 172.67.198.19:8000: operation was canceled 2022/08/21 11:22:48 client: Give up 2022/08/21 11:23:37 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:23:47 client: Connection error: dial tcp 104.21.52.103:8000: operation was canceled 2022/08/21 11:23:47 client: Give up 2022/08/21 11:24:38 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:24:46 client: Connection error: dial tcp 104.21.52.103:8000: operation was canceled 2022/08/21 11:24:46 client: Give up 2022/08/21 11:25:52 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:26:02 client: Connection error: dial tcp 104.21.52.103:8000: operation was canceled 2022/08/21 11:26:02 client: Give up 2022/08/21 11:27:18 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:27:27 client: Connection error: dial tcp 104.21.52.103:8000: operation was canceled 2022/08/21 11:27:27 client: Give up 2022/08/21 11:28:18 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:28:27 client: Connection error: dial tcp 104.21.52.103:8000: operation was canceled 2022/08/21 11:28:27 client: Give up 2022/08/21 11:28:37 client: Connecting to ws://portainer.costxforbreakfast.com:8000 2022/08/21 11:28:47 client: Connection error: dial tcp 104.21.52.103:8000: operation was canceled 2022/08/21 11:28:47 client: Give up
Any updates on this issue?
Found a solution in the #95 (here), TL;DR: add the env var AGENT_CLUSTER_ADDR=localhost
:
docker run -d \
-e AGENT_CLUSTER_ADDR=localhost \
-p 9001:9001 \
--name portainer_agent \
--restart=always \
-v /var/run/docker.sock:/var/run/docker.sock \
-v /var/lib/docker/volumes:/var/lib/docker/volumes \
portainer/agent:2.19.4