2022-12-27 09:32:52 +05:30
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 2020-2022 Project CHIP Authors
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
use crate::{
|
|
|
|
data_model::objects::{ClusterType, Endpoint},
|
|
|
|
error::*,
|
|
|
|
interaction_model::{core::IMStatusCode, messages::GenericPath},
|
|
|
|
// TODO: This layer shouldn't really depend on the TLV layer, should create an abstraction layer
|
|
|
|
};
|
|
|
|
use std::fmt;
|
|
|
|
|
2023-03-06 23:26:30 +05:30
|
|
|
use super::{ClusterId, DeviceType, EndptId};
|
2023-01-12 16:56:02 +05:30
|
|
|
|
2022-12-27 09:32:52 +05:30
|
|
|
pub trait ChangeConsumer {
|
2023-03-06 23:26:30 +05:30
|
|
|
fn endpoint_added(&self, id: EndptId, endpoint: &mut Endpoint) -> Result<(), Error>;
|
2022-12-27 09:32:52 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
pub const ENDPTS_PER_ACC: usize = 3;
|
|
|
|
|
2023-01-10 08:53:04 +01:00
|
|
|
pub type BoxedEndpoints = [Option<Box<Endpoint>>];
|
|
|
|
|
2022-12-27 09:32:52 +05:30
|
|
|
#[derive(Default)]
|
|
|
|
pub struct Node {
|
|
|
|
endpoints: [Option<Box<Endpoint>>; ENDPTS_PER_ACC],
|
|
|
|
changes_cb: Option<Box<dyn ChangeConsumer>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::fmt::Display for Node {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
writeln!(f, "node:")?;
|
|
|
|
for (i, element) in self.endpoints.iter().enumerate() {
|
|
|
|
if let Some(e) = element {
|
|
|
|
writeln!(f, "endpoint {}: {}", i, e)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
write!(f, "")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Node {
|
|
|
|
pub fn new() -> Result<Box<Node>, Error> {
|
2023-01-10 08:53:04 +01:00
|
|
|
let node = Box::default();
|
2022-12-27 09:32:52 +05:30
|
|
|
Ok(node)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_changes_cb(&mut self, consumer: Box<dyn ChangeConsumer>) {
|
|
|
|
self.changes_cb = Some(consumer);
|
|
|
|
}
|
|
|
|
|
2023-03-06 23:26:30 +05:30
|
|
|
pub fn add_endpoint(&mut self, dev_type: DeviceType) -> Result<EndptId, Error> {
|
2022-12-27 09:32:52 +05:30
|
|
|
let index = self
|
|
|
|
.endpoints
|
|
|
|
.iter()
|
|
|
|
.position(|x| x.is_none())
|
|
|
|
.ok_or(Error::NoSpace)?;
|
2023-01-12 16:56:02 +05:30
|
|
|
let mut endpoint = Endpoint::new(dev_type)?;
|
2022-12-27 09:32:52 +05:30
|
|
|
if let Some(cb) = &self.changes_cb {
|
2023-03-06 23:26:30 +05:30
|
|
|
cb.endpoint_added(index as EndptId, &mut endpoint)?;
|
2022-12-27 09:32:52 +05:30
|
|
|
}
|
|
|
|
self.endpoints[index] = Some(endpoint);
|
2023-03-06 23:26:30 +05:30
|
|
|
Ok(index as EndptId)
|
2022-12-27 09:32:52 +05:30
|
|
|
}
|
|
|
|
|
2023-03-06 23:26:30 +05:30
|
|
|
pub fn get_endpoint(&self, endpoint_id: EndptId) -> Result<&Endpoint, Error> {
|
2022-12-27 09:32:52 +05:30
|
|
|
if (endpoint_id as usize) < ENDPTS_PER_ACC {
|
|
|
|
let endpoint = self.endpoints[endpoint_id as usize]
|
|
|
|
.as_ref()
|
|
|
|
.ok_or(Error::EndpointNotFound)?;
|
|
|
|
Ok(endpoint)
|
|
|
|
} else {
|
|
|
|
Err(Error::EndpointNotFound)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-06 23:26:30 +05:30
|
|
|
pub fn get_endpoint_mut(&mut self, endpoint_id: EndptId) -> Result<&mut Endpoint, Error> {
|
2022-12-27 09:32:52 +05:30
|
|
|
if (endpoint_id as usize) < ENDPTS_PER_ACC {
|
|
|
|
let endpoint = self.endpoints[endpoint_id as usize]
|
|
|
|
.as_mut()
|
|
|
|
.ok_or(Error::EndpointNotFound)?;
|
|
|
|
Ok(endpoint)
|
|
|
|
} else {
|
|
|
|
Err(Error::EndpointNotFound)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-06 23:26:30 +05:30
|
|
|
pub fn get_cluster_mut(
|
|
|
|
&mut self,
|
|
|
|
e: EndptId,
|
|
|
|
c: ClusterId,
|
|
|
|
) -> Result<&mut dyn ClusterType, Error> {
|
2022-12-27 09:32:52 +05:30
|
|
|
self.get_endpoint_mut(e)?.get_cluster_mut(c)
|
|
|
|
}
|
|
|
|
|
2023-03-06 23:26:30 +05:30
|
|
|
pub fn get_cluster(&self, e: EndptId, c: ClusterId) -> Result<&dyn ClusterType, Error> {
|
2022-12-27 09:32:52 +05:30
|
|
|
self.get_endpoint(e)?.get_cluster(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn add_cluster(
|
|
|
|
&mut self,
|
2023-03-06 23:26:30 +05:30
|
|
|
endpoint_id: EndptId,
|
2022-12-27 09:32:52 +05:30
|
|
|
cluster: Box<dyn ClusterType>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let endpoint_id = endpoint_id as usize;
|
|
|
|
if endpoint_id < ENDPTS_PER_ACC {
|
|
|
|
self.endpoints[endpoint_id]
|
|
|
|
.as_mut()
|
|
|
|
.ok_or(Error::NoEndpoint)?
|
|
|
|
.add_cluster(cluster)
|
|
|
|
} else {
|
|
|
|
Err(Error::Invalid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns a slice of endpoints, with either a single endpoint or all (wildcard)
|
|
|
|
pub fn get_wildcard_endpoints(
|
|
|
|
&self,
|
2023-03-06 23:26:30 +05:30
|
|
|
endpoint: Option<EndptId>,
|
2023-01-10 08:53:04 +01:00
|
|
|
) -> Result<(&BoxedEndpoints, usize, bool), IMStatusCode> {
|
2022-12-27 09:32:52 +05:30
|
|
|
if let Some(e) = endpoint {
|
|
|
|
let e = e as usize;
|
|
|
|
if self.endpoints.len() <= e || self.endpoints[e].is_none() {
|
|
|
|
Err(IMStatusCode::UnsupportedEndpoint)
|
|
|
|
} else {
|
|
|
|
Ok((&self.endpoints[e..e + 1], e, false))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Ok((&self.endpoints[..], 0, true))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get_wildcard_endpoints_mut(
|
|
|
|
&mut self,
|
2023-03-06 23:26:30 +05:30
|
|
|
endpoint: Option<EndptId>,
|
2023-01-10 08:53:04 +01:00
|
|
|
) -> Result<(&mut BoxedEndpoints, usize, bool), IMStatusCode> {
|
2022-12-27 09:32:52 +05:30
|
|
|
if let Some(e) = endpoint {
|
|
|
|
let e = e as usize;
|
|
|
|
if self.endpoints.len() <= e || self.endpoints[e].is_none() {
|
|
|
|
Err(IMStatusCode::UnsupportedEndpoint)
|
|
|
|
} else {
|
|
|
|
Ok((&mut self.endpoints[e..e + 1], e, false))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
Ok((&mut self.endpoints[..], 0, true))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run a closure for all endpoints as specified in the path
|
|
|
|
///
|
|
|
|
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
|
|
|
|
/// of this function is to only capture the successful invocations and ignore the erroneous
|
|
|
|
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
|
|
|
|
/// 'please run this operation on this wildcard path "wherever possible"'
|
|
|
|
///
|
|
|
|
/// It is expected that if the closure that you pass here returns an error it may not reach
|
|
|
|
/// out to the caller, in case there was a wildcard path specified
|
|
|
|
pub fn for_each_endpoint<T>(&self, path: &GenericPath, mut f: T) -> Result<(), IMStatusCode>
|
|
|
|
where
|
|
|
|
T: FnMut(&GenericPath, &Endpoint) -> Result<(), IMStatusCode>,
|
|
|
|
{
|
|
|
|
let mut current_path = *path;
|
|
|
|
let (endpoints, mut endpoint_id, wildcard) = self.get_wildcard_endpoints(path.endpoint)?;
|
|
|
|
for e in endpoints.iter() {
|
|
|
|
if let Some(e) = e {
|
2023-03-06 23:26:30 +05:30
|
|
|
current_path.endpoint = Some(endpoint_id as EndptId);
|
2022-12-27 09:32:52 +05:30
|
|
|
f(¤t_path, e.as_ref())
|
|
|
|
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
|
|
|
|
}
|
|
|
|
endpoint_id += 1;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run a closure for all endpoints (mutable) as specified in the path
|
|
|
|
///
|
|
|
|
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
|
|
|
|
/// of this function is to only capture the successful invocations and ignore the erroneous
|
|
|
|
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
|
|
|
|
/// 'please run this operation on this wildcard path "wherever possible"'
|
|
|
|
///
|
|
|
|
/// It is expected that if the closure that you pass here returns an error it may not reach
|
|
|
|
/// out to the caller, in case there was a wildcard path specified
|
|
|
|
pub fn for_each_endpoint_mut<T>(
|
|
|
|
&mut self,
|
|
|
|
path: &GenericPath,
|
|
|
|
mut f: T,
|
|
|
|
) -> Result<(), IMStatusCode>
|
|
|
|
where
|
|
|
|
T: FnMut(&GenericPath, &mut Endpoint) -> Result<(), IMStatusCode>,
|
|
|
|
{
|
|
|
|
let mut current_path = *path;
|
|
|
|
let (endpoints, mut endpoint_id, wildcard) =
|
|
|
|
self.get_wildcard_endpoints_mut(path.endpoint)?;
|
|
|
|
for e in endpoints.iter_mut() {
|
|
|
|
if let Some(e) = e {
|
2023-03-06 23:26:30 +05:30
|
|
|
current_path.endpoint = Some(endpoint_id as EndptId);
|
2022-12-27 09:32:52 +05:30
|
|
|
f(¤t_path, e.as_mut())
|
|
|
|
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
|
|
|
|
}
|
|
|
|
endpoint_id += 1;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run a closure for all clusters as specified in the path
|
|
|
|
///
|
|
|
|
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
|
|
|
|
/// of this function is to only capture the successful invocations and ignore the erroneous
|
|
|
|
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
|
|
|
|
/// 'please run this operation on this wildcard path "wherever possible"'
|
|
|
|
///
|
|
|
|
/// It is expected that if the closure that you pass here returns an error it may not reach
|
|
|
|
/// out to the caller, in case there was a wildcard path specified
|
|
|
|
pub fn for_each_cluster<T>(&self, path: &GenericPath, mut f: T) -> Result<(), IMStatusCode>
|
|
|
|
where
|
|
|
|
T: FnMut(&GenericPath, &dyn ClusterType) -> Result<(), IMStatusCode>,
|
|
|
|
{
|
|
|
|
self.for_each_endpoint(path, |p, e| {
|
|
|
|
let mut current_path = *p;
|
|
|
|
let (clusters, wildcard) = e.get_wildcard_clusters(p.cluster)?;
|
|
|
|
for c in clusters.iter() {
|
|
|
|
current_path.cluster = Some(c.base().id);
|
|
|
|
f(¤t_path, c.as_ref())
|
|
|
|
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run a closure for all clusters (mutable) as specified in the path
|
|
|
|
///
|
|
|
|
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
|
|
|
|
/// of this function is to only capture the successful invocations and ignore the erroneous
|
|
|
|
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
|
|
|
|
/// 'please run this operation on this wildcard path "wherever possible"'
|
|
|
|
///
|
|
|
|
/// It is expected that if the closure that you pass here returns an error it may not reach
|
|
|
|
/// out to the caller, in case there was a wildcard path specified
|
|
|
|
pub fn for_each_cluster_mut<T>(
|
|
|
|
&mut self,
|
|
|
|
path: &GenericPath,
|
|
|
|
mut f: T,
|
|
|
|
) -> Result<(), IMStatusCode>
|
|
|
|
where
|
|
|
|
T: FnMut(&GenericPath, &mut dyn ClusterType) -> Result<(), IMStatusCode>,
|
|
|
|
{
|
|
|
|
self.for_each_endpoint_mut(path, |p, e| {
|
|
|
|
let mut current_path = *p;
|
|
|
|
let (clusters, wildcard) = e.get_wildcard_clusters_mut(p.cluster)?;
|
|
|
|
|
|
|
|
for c in clusters.iter_mut() {
|
|
|
|
current_path.cluster = Some(c.base().id);
|
|
|
|
f(¤t_path, c.as_mut())
|
|
|
|
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Run a closure for all attributes as specified in the path
|
|
|
|
///
|
|
|
|
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
|
|
|
|
/// of this function is to only capture the successful invocations and ignore the erroneous
|
|
|
|
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
|
|
|
|
/// 'please run this operation on this wildcard path "wherever possible"'
|
|
|
|
///
|
|
|
|
/// It is expected that if the closure that you pass here returns an error it may not reach
|
|
|
|
/// out to the caller, in case there was a wildcard path specified
|
|
|
|
pub fn for_each_attribute<T>(&self, path: &GenericPath, mut f: T) -> Result<(), IMStatusCode>
|
|
|
|
where
|
|
|
|
T: FnMut(&GenericPath, &dyn ClusterType) -> Result<(), IMStatusCode>,
|
|
|
|
{
|
|
|
|
self.for_each_cluster(path, |current_path, c| {
|
|
|
|
let mut current_path = *current_path;
|
|
|
|
let (attributes, wildcard) = c
|
|
|
|
.base()
|
|
|
|
.get_wildcard_attribute(path.leaf.map(|at| at as u16))?;
|
|
|
|
for a in attributes.iter() {
|
|
|
|
current_path.leaf = Some(a.id as u32);
|
|
|
|
f(¤t_path, c).or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|