Merge branch 'feature/multi-admin' of https://github.com/project-chip/matter-rs into feature/multi-admin
This commit is contained in:
commit
40f4fd5d7f
47 changed files with 375 additions and 266 deletions
|
@ -15,7 +15,6 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
use std::{
|
||||
mem::MaybeUninit,
|
||||
ops::{Deref, DerefMut},
|
||||
|
@ -115,7 +114,7 @@ impl<T: SlabPool> Slab<T> {
|
|||
}))
|
||||
}
|
||||
|
||||
pub fn new(new_object: T::SlabType) -> Option<BoxSlab<T>> {
|
||||
pub fn try_new(new_object: T::SlabType) -> Option<BoxSlab<T>> {
|
||||
let slab = T::get_slab();
|
||||
let mut inner = slab.0.lock().unwrap();
|
||||
if let Some(index) = inner.map.first_false_index() {
|
||||
|
@ -180,14 +179,14 @@ mod tests {
|
|||
#[test]
|
||||
fn simple_alloc_free() {
|
||||
{
|
||||
let a = Slab::<TestSlab>::new(Test { val: Arc::new(10) }).unwrap();
|
||||
let a = Slab::<TestSlab>::try_new(Test { val: Arc::new(10) }).unwrap();
|
||||
assert_eq!(*a.val.deref(), 10);
|
||||
let inner = TestSlab::get_slab().0.lock().unwrap();
|
||||
assert_eq!(inner.map.is_empty(), false);
|
||||
assert!(!inner.map.is_empty());
|
||||
}
|
||||
// Validates that the 'Drop' got executed
|
||||
let inner = TestSlab::get_slab().0.lock().unwrap();
|
||||
assert_eq!(inner.map.is_empty(), true);
|
||||
assert!(inner.map.is_empty());
|
||||
println!("Box Size {}", std::mem::size_of::<Box<Test>>());
|
||||
println!("BoxSlab Size {}", std::mem::size_of::<BoxSlab<TestSlab>>());
|
||||
}
|
||||
|
@ -195,25 +194,22 @@ mod tests {
|
|||
#[test]
|
||||
fn alloc_full_block() {
|
||||
{
|
||||
let a = Slab::<TestSlab>::new(Test { val: Arc::new(10) }).unwrap();
|
||||
let b = Slab::<TestSlab>::new(Test { val: Arc::new(11) }).unwrap();
|
||||
let c = Slab::<TestSlab>::new(Test { val: Arc::new(12) }).unwrap();
|
||||
let a = Slab::<TestSlab>::try_new(Test { val: Arc::new(10) }).unwrap();
|
||||
let b = Slab::<TestSlab>::try_new(Test { val: Arc::new(11) }).unwrap();
|
||||
let c = Slab::<TestSlab>::try_new(Test { val: Arc::new(12) }).unwrap();
|
||||
// Test that at overflow, we return None
|
||||
assert_eq!(
|
||||
Slab::<TestSlab>::new(Test { val: Arc::new(13) }).is_none(),
|
||||
true
|
||||
);
|
||||
assert!(Slab::<TestSlab>::try_new(Test { val: Arc::new(13) }).is_none(),);
|
||||
assert_eq!(*b.val.deref(), 11);
|
||||
|
||||
{
|
||||
let inner = TestSlab::get_slab().0.lock().unwrap();
|
||||
// Test that the bitmap is marked as full
|
||||
assert_eq!(inner.map.is_full(), true);
|
||||
assert!(inner.map.is_full());
|
||||
}
|
||||
|
||||
// Purposefully drop, to test that new allocation is possible
|
||||
std::mem::drop(b);
|
||||
let d = Slab::<TestSlab>::new(Test { val: Arc::new(21) }).unwrap();
|
||||
let d = Slab::<TestSlab>::try_new(Test { val: Arc::new(21) }).unwrap();
|
||||
assert_eq!(*d.val.deref(), 21);
|
||||
|
||||
// Ensure older allocations are still valid
|
||||
|
@ -223,16 +219,16 @@ mod tests {
|
|||
|
||||
// Validates that the 'Drop' got executed - test that the bitmap is empty
|
||||
let inner = TestSlab::get_slab().0.lock().unwrap();
|
||||
assert_eq!(inner.map.is_empty(), true);
|
||||
assert!(inner.map.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop_logic() {
|
||||
let root = Arc::new(10);
|
||||
{
|
||||
let _a = Slab::<TestSlab>::new(Test { val: root.clone() }).unwrap();
|
||||
let _b = Slab::<TestSlab>::new(Test { val: root.clone() }).unwrap();
|
||||
let _c = Slab::<TestSlab>::new(Test { val: root.clone() }).unwrap();
|
||||
let _a = Slab::<TestSlab>::try_new(Test { val: root.clone() }).unwrap();
|
||||
let _b = Slab::<TestSlab>::try_new(Test { val: root.clone() }).unwrap();
|
||||
let _c = Slab::<TestSlab>::try_new(Test { val: root.clone() }).unwrap();
|
||||
assert_eq!(Arc::strong_count(&root), 4);
|
||||
}
|
||||
// Test that Drop was correctly called on all the members of the pool
|
||||
|
|
|
@ -35,6 +35,7 @@ fn main() {
|
|||
pid: 0x8002,
|
||||
hw_ver: 2,
|
||||
sw_ver: 1,
|
||||
serial_no: "aabbccdd".to_string(),
|
||||
};
|
||||
let dev_att = Box::new(dev_att::HardCodedDevAtt::new());
|
||||
|
||||
|
|
|
@ -487,6 +487,7 @@ impl std::fmt::Display for AclMgr {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::bool_assert_comparison)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
data_model::objects::{Access, Privilege},
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
use super::{CertConsumer, MAX_DEPTH};
|
||||
use crate::error::Error;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use log::warn;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ASN1Writer<'a> {
|
||||
|
@ -255,10 +256,22 @@ impl<'a> CertConsumer for ASN1Writer<'a> {
|
|||
}
|
||||
|
||||
fn utctime(&mut self, _tag: &str, epoch: u32) -> Result<(), Error> {
|
||||
let mut matter_epoch = Utc.ymd(2000, 1, 1).and_hms(0, 0, 0).timestamp();
|
||||
let mut matter_epoch = Utc
|
||||
.with_ymd_and_hms(2000, 1, 1, 0, 0, 0)
|
||||
.unwrap()
|
||||
.timestamp();
|
||||
|
||||
matter_epoch += epoch as i64;
|
||||
|
||||
let dt = Utc.timestamp(matter_epoch, 0);
|
||||
let dt = match Utc.timestamp_opt(matter_epoch, 0) {
|
||||
chrono::LocalResult::None => return Err(Error::InvalidTime),
|
||||
chrono::LocalResult::Single(s) => s,
|
||||
chrono::LocalResult::Ambiguous(_, a) => {
|
||||
warn!("Ambiguous time for epoch {epoch}; returning latest timestamp: {a}");
|
||||
a
|
||||
}
|
||||
};
|
||||
|
||||
let time_str = format!("{}Z", dt.format("%y%m%d%H%M%S"));
|
||||
self.write_str(0x17, time_str.as_bytes())
|
||||
}
|
||||
|
|
|
@ -348,20 +348,17 @@ impl DistNames {
|
|||
w.start_seq(tag)?;
|
||||
for (id, value) in &self.dn {
|
||||
if let Ok(tag) = num::FromPrimitive::from_u8(*id).ok_or(Error::InvalidData) {
|
||||
match tag {
|
||||
DnTags::NocCat => {
|
||||
w.start_set("")?;
|
||||
w.start_seq("")?;
|
||||
w.oid("Chip NOC CAT Id:", &OID_MATTER_NOC_CAT_ID)?;
|
||||
w.utf8str("", format!("{:08X}", value).as_str())?;
|
||||
w.end_seq()?;
|
||||
w.end_set()?;
|
||||
}
|
||||
_ => {
|
||||
let index: usize = (*id as usize) - (DnTags::NodeId as usize);
|
||||
let this = &dn_encoding[index];
|
||||
encode_u64_dn(*value, this.0, this.1, w)?;
|
||||
}
|
||||
if let DnTags::NocCat = tag {
|
||||
w.start_set("")?;
|
||||
w.start_seq("")?;
|
||||
w.oid("Chip NOC CAT Id:", &OID_MATTER_NOC_CAT_ID)?;
|
||||
w.utf8str("", format!("{:08X}", value).as_str())?;
|
||||
w.end_seq()?;
|
||||
w.end_set()?;
|
||||
} else {
|
||||
let index: usize = (*id as usize) - (DnTags::NodeId as usize);
|
||||
let this = &dn_encoding[index];
|
||||
encode_u64_dn(*value, this.0, this.1, w)?;
|
||||
}
|
||||
} else {
|
||||
error!("Non Matter DNs are not yet supported {}", id);
|
||||
|
@ -452,7 +449,7 @@ impl Cert {
|
|||
|
||||
pub fn as_asn1(&self, buf: &mut [u8]) -> Result<usize, Error> {
|
||||
let mut w = ASN1Writer::new(buf);
|
||||
let _ = self.encode(&mut w)?;
|
||||
self.encode(&mut w)?;
|
||||
Ok(w.as_slice().len())
|
||||
}
|
||||
|
||||
|
@ -657,7 +654,7 @@ mod tests {
|
|||
|
||||
for input in test_input.iter() {
|
||||
println!("Testing next input...");
|
||||
let root = tlv::get_root_node(*input).unwrap();
|
||||
let root = tlv::get_root_node(input).unwrap();
|
||||
let cert = Cert::from_tlv(&root).unwrap();
|
||||
let mut buf = [0u8; 1024];
|
||||
let buf_len = buf.len();
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
use super::{CertConsumer, MAX_DEPTH};
|
||||
use crate::error::Error;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use log::warn;
|
||||
use std::fmt;
|
||||
|
||||
pub struct CertPrinter<'a, 'b> {
|
||||
|
@ -118,15 +119,23 @@ impl<'a, 'b> CertConsumer for CertPrinter<'a, 'b> {
|
|||
Ok(())
|
||||
}
|
||||
fn utctime(&mut self, tag: &str, epoch: u32) -> Result<(), Error> {
|
||||
let mut matter_epoch = Utc.ymd(2000, 1, 1).and_hms(0, 0, 0).timestamp();
|
||||
let mut matter_epoch = Utc
|
||||
.with_ymd_and_hms(2000, 1, 1, 0, 0, 0)
|
||||
.unwrap()
|
||||
.timestamp();
|
||||
|
||||
matter_epoch += epoch as i64;
|
||||
let _ = writeln!(
|
||||
self.f,
|
||||
"{} {} {}",
|
||||
SPACE[self.level],
|
||||
tag,
|
||||
Utc.timestamp(matter_epoch, 0)
|
||||
);
|
||||
|
||||
let dt = match Utc.timestamp_opt(matter_epoch, 0) {
|
||||
chrono::LocalResult::None => return Err(Error::InvalidTime),
|
||||
chrono::LocalResult::Single(s) => s,
|
||||
chrono::LocalResult::Ambiguous(_, a) => {
|
||||
warn!("Ambiguous time for epoch {epoch}; returning latest timestamp: {a}");
|
||||
a
|
||||
}
|
||||
};
|
||||
|
||||
let _ = writeln!(self.f, "{} {} {}", SPACE[self.level], tag, dt);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,13 +17,18 @@
|
|||
|
||||
use super::objects::*;
|
||||
use crate::error::*;
|
||||
use num_derive::FromPrimitive;
|
||||
|
||||
pub const ID: u32 = 0x0028;
|
||||
|
||||
#[derive(FromPrimitive)]
|
||||
enum Attributes {
|
||||
DMRevision = 0,
|
||||
VendorId = 2,
|
||||
ProductId = 4,
|
||||
HwVer = 7,
|
||||
SwVer = 9,
|
||||
SerialNo = 0x0f,
|
||||
}
|
||||
|
||||
pub struct BasicInfoConfig {
|
||||
|
@ -31,6 +36,16 @@ pub struct BasicInfoConfig {
|
|||
pub pid: u16,
|
||||
pub hw_ver: u16,
|
||||
pub sw_ver: u32,
|
||||
pub serial_no: String,
|
||||
}
|
||||
|
||||
fn attr_dm_rev_new() -> Result<Attribute, Error> {
|
||||
Attribute::new(
|
||||
Attributes::DMRevision as u16,
|
||||
AttrValue::Uint8(1),
|
||||
Access::RV,
|
||||
Quality::FIXED,
|
||||
)
|
||||
}
|
||||
|
||||
fn attr_vid_new(vid: u16) -> Result<Attribute, Error> {
|
||||
|
@ -69,6 +84,14 @@ fn attr_sw_ver_new(sw_ver: u32) -> Result<Attribute, Error> {
|
|||
)
|
||||
}
|
||||
|
||||
fn attr_serial_no_new(label: String) -> Result<Attribute, Error> {
|
||||
Attribute::new(
|
||||
Attributes::SerialNo as u16,
|
||||
AttrValue::Utf8(label),
|
||||
Access::RV,
|
||||
Quality::FIXED,
|
||||
)
|
||||
}
|
||||
pub struct BasicInfoCluster {
|
||||
base: Cluster,
|
||||
}
|
||||
|
@ -78,10 +101,14 @@ impl BasicInfoCluster {
|
|||
let mut cluster = Box::new(BasicInfoCluster {
|
||||
base: Cluster::new(ID)?,
|
||||
});
|
||||
cluster.base.add_attribute(attr_dm_rev_new()?)?;
|
||||
cluster.base.add_attribute(attr_vid_new(cfg.vid)?)?;
|
||||
cluster.base.add_attribute(attr_pid_new(cfg.pid)?)?;
|
||||
cluster.base.add_attribute(attr_hw_ver_new(cfg.hw_ver)?)?;
|
||||
cluster.base.add_attribute(attr_sw_ver_new(cfg.sw_ver)?)?;
|
||||
cluster
|
||||
.base
|
||||
.add_attribute(attr_serial_no_new(cfg.serial_no)?)?;
|
||||
Ok(cluster)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ impl DataModel {
|
|||
) -> Result<AttrValue, IMStatusCode> {
|
||||
let node = self.node.read().unwrap();
|
||||
let cluster = node.get_cluster(endpoint, cluster)?;
|
||||
cluster.base().read_attribute_raw(attr).map(|a| *a)
|
||||
cluster.base().read_attribute_raw(attr).map(|a| a.clone())
|
||||
}
|
||||
|
||||
// Encode a write attribute from a path that may or may not be wildcard
|
||||
|
@ -180,7 +180,7 @@ impl DataModel {
|
|||
// Set the cluster's data version
|
||||
attr_encoder.set_data_ver(cluster_data_ver);
|
||||
let mut access_req = AccessReq::new(accessor, path, Access::READ);
|
||||
Cluster::read_attribute(c, &mut access_req, attr_encoder, &attr_details);
|
||||
Cluster::read_attribute(c, &mut access_req, attr_encoder, attr_details);
|
||||
Ok(())
|
||||
});
|
||||
if let Err(e) = result {
|
||||
|
|
|
@ -32,6 +32,11 @@ use crate::secure_channel::pake::PaseMgr;
|
|||
use std::sync::Arc;
|
||||
use std::sync::RwLockWriteGuard;
|
||||
|
||||
pub const DEV_TYPE_ROOT_NODE: DeviceType = DeviceType {
|
||||
dtype: 0x0016,
|
||||
drev: 1,
|
||||
};
|
||||
|
||||
type WriteNode<'a> = RwLockWriteGuard<'a, Box<Node>>;
|
||||
|
||||
pub fn device_type_add_root_node(
|
||||
|
@ -43,7 +48,7 @@ pub fn device_type_add_root_node(
|
|||
pase_mgr: PaseMgr,
|
||||
) -> Result<u32, Error> {
|
||||
// Add the root endpoint
|
||||
let endpoint = node.add_endpoint()?;
|
||||
let endpoint = node.add_endpoint(DEV_TYPE_ROOT_NODE)?;
|
||||
if endpoint != 0 {
|
||||
// Somehow endpoint 0 was already added, this shouldn't be the case
|
||||
return Err(Error::Invalid);
|
||||
|
@ -63,8 +68,13 @@ pub fn device_type_add_root_node(
|
|||
Ok(endpoint)
|
||||
}
|
||||
|
||||
const DEV_TYPE_ON_OFF_LIGHT: DeviceType = DeviceType {
|
||||
dtype: 0x0100,
|
||||
drev: 2,
|
||||
};
|
||||
|
||||
pub fn device_type_add_on_off_light(node: &mut WriteNode) -> Result<u32, Error> {
|
||||
let endpoint = node.add_endpoint()?;
|
||||
let endpoint = node.add_endpoint(DEV_TYPE_ON_OFF_LIGHT)?;
|
||||
node.add_cluster(endpoint, OnOffCluster::new()?)?;
|
||||
Ok(endpoint)
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ bitflags! {
|
|||
* - instead of arrays, can use linked-lists to conserve space and avoid the internal fragmentation
|
||||
*/
|
||||
|
||||
#[derive(PartialEq, Copy, Clone)]
|
||||
#[derive(PartialEq, Clone)]
|
||||
pub enum AttrValue {
|
||||
Int64(i64),
|
||||
Uint8(u8),
|
||||
|
@ -96,6 +96,7 @@ pub enum AttrValue {
|
|||
Uint32(u32),
|
||||
Uint64(u64),
|
||||
Bool(bool),
|
||||
Utf8(String),
|
||||
Custom,
|
||||
}
|
||||
|
||||
|
@ -108,6 +109,7 @@ impl Debug for AttrValue {
|
|||
AttrValue::Uint32(v) => write!(f, "{:?}", *v),
|
||||
AttrValue::Uint64(v) => write!(f, "{:?}", *v),
|
||||
AttrValue::Bool(v) => write!(f, "{:?}", *v),
|
||||
AttrValue::Utf8(v) => write!(f, "{:?}", *v),
|
||||
AttrValue::Custom => write!(f, "custom-attribute"),
|
||||
}?;
|
||||
Ok(())
|
||||
|
@ -123,6 +125,7 @@ impl ToTLV for AttrValue {
|
|||
AttrValue::Uint16(v) => tw.u16(tag_type, *v),
|
||||
AttrValue::Uint32(v) => tw.u32(tag_type, *v),
|
||||
AttrValue::Uint64(v) => tw.u64(tag_type, *v),
|
||||
AttrValue::Utf8(v) => tw.utf8(tag_type, v.as_bytes()),
|
||||
_ => {
|
||||
error!("Attribute type not yet supported");
|
||||
Err(Error::AttributeNotFound)
|
||||
|
@ -203,6 +206,7 @@ impl std::fmt::Display for Attribute {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::bool_assert_comparison)]
|
||||
mod tests {
|
||||
use super::Access;
|
||||
use crate::data_model::objects::Privilege;
|
||||
|
|
|
@ -87,7 +87,6 @@ pub trait ClusterType {
|
|||
pub struct Cluster {
|
||||
pub(super) id: u32,
|
||||
attributes: Vec<Attribute>,
|
||||
feature_map: Option<u32>,
|
||||
data_ver: u32,
|
||||
}
|
||||
|
||||
|
@ -96,7 +95,6 @@ impl Cluster {
|
|||
let mut c = Cluster {
|
||||
id,
|
||||
attributes: Vec::with_capacity(ATTRS_PER_CLUSTER),
|
||||
feature_map: None,
|
||||
data_ver: rand::thread_rng().gen_range(0..0xFFFFFFFF),
|
||||
};
|
||||
c.add_default_attributes()?;
|
||||
|
@ -112,22 +110,20 @@ impl Cluster {
|
|||
}
|
||||
|
||||
pub fn set_feature_map(&mut self, map: u32) -> Result<(), Error> {
|
||||
if self.feature_map.is_none() {
|
||||
self.add_attribute(Attribute::new(
|
||||
GlobalElements::FeatureMap as u16,
|
||||
AttrValue::Uint32(map),
|
||||
Access::RV,
|
||||
Quality::NONE,
|
||||
)?)?;
|
||||
} else {
|
||||
self.write_attribute_raw(GlobalElements::FeatureMap as u16, AttrValue::Uint32(map))
|
||||
.map_err(|_| Error::Invalid)?;
|
||||
}
|
||||
self.feature_map = Some(map);
|
||||
self.write_attribute_raw(GlobalElements::FeatureMap as u16, AttrValue::Uint32(map))
|
||||
.map_err(|_| Error::Invalid)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn add_default_attributes(&mut self) -> Result<(), Error> {
|
||||
// Default feature map is 0
|
||||
self.add_attribute(Attribute::new(
|
||||
GlobalElements::FeatureMap as u16,
|
||||
AttrValue::Uint32(0),
|
||||
Access::RV,
|
||||
Quality::NONE,
|
||||
)?)?;
|
||||
|
||||
self.add_attribute(Attribute::new(
|
||||
GlobalElements::AttributeList as u16,
|
||||
AttrValue::Custom,
|
||||
|
@ -233,8 +229,7 @@ impl Cluster {
|
|||
return;
|
||||
}
|
||||
GlobalElements::FeatureMap => {
|
||||
let val = if let Some(m) = self.feature_map { m } else { 0 };
|
||||
encoder.encode(EncodeValue::Value(&val));
|
||||
encoder.encode(EncodeValue::Value(&attr.value));
|
||||
return;
|
||||
}
|
||||
_ => {
|
||||
|
@ -284,14 +279,13 @@ impl Cluster {
|
|||
) -> Result<(), IMStatusCode> {
|
||||
let a = self.get_attribute_mut(attr_id)?;
|
||||
if a.value != AttrValue::Custom {
|
||||
let mut value = a.value;
|
||||
let mut value = a.value.clone();
|
||||
value
|
||||
.update_from_tlv(data)
|
||||
.map_err(|_| IMStatusCode::Failure)?;
|
||||
a.set_value(value)
|
||||
.map(|_| {
|
||||
self.cluster_changed();
|
||||
()
|
||||
})
|
||||
.map_err(|_| IMStatusCode::UnsupportedWrite)
|
||||
} else {
|
||||
|
@ -303,7 +297,6 @@ impl Cluster {
|
|||
let a = self.get_attribute_mut(attr_id)?;
|
||||
a.set_value(value).map(|_| {
|
||||
self.cluster_changed();
|
||||
()
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ impl<'a> ToTLV for EncodeValue<'a> {
|
|||
(f)(tag_type, tw);
|
||||
Ok(())
|
||||
}
|
||||
EncodeValue::Tlv(_) => (panic!("This looks invalid")),
|
||||
EncodeValue::Tlv(_) => panic!("This looks invalid"),
|
||||
EncodeValue::Value(v) => v.to_tlv(tw, tag_type),
|
||||
}
|
||||
}
|
||||
|
@ -115,3 +115,9 @@ pub trait Encoder {
|
|||
/// Encode a status report
|
||||
fn encode_status(&mut self, status: IMStatusCode, cluster_status: u16);
|
||||
}
|
||||
|
||||
#[derive(ToTLV, Copy, Clone)]
|
||||
pub struct DeviceType {
|
||||
pub dtype: u16,
|
||||
pub drev: u16,
|
||||
}
|
||||
|
|
|
@ -19,15 +19,21 @@ use crate::{data_model::objects::ClusterType, error::*, interaction_model::core:
|
|||
|
||||
use std::fmt;
|
||||
|
||||
use super::DeviceType;
|
||||
|
||||
pub const CLUSTERS_PER_ENDPT: usize = 9;
|
||||
|
||||
pub struct Endpoint {
|
||||
dev_type: DeviceType,
|
||||
clusters: Vec<Box<dyn ClusterType>>,
|
||||
}
|
||||
|
||||
pub type BoxedClusters = [Box<dyn ClusterType>];
|
||||
|
||||
impl Endpoint {
|
||||
pub fn new() -> Result<Box<Endpoint>, Error> {
|
||||
pub fn new(dev_type: DeviceType) -> Result<Box<Endpoint>, Error> {
|
||||
Ok(Box::new(Endpoint {
|
||||
dev_type,
|
||||
clusters: Vec::with_capacity(CLUSTERS_PER_ENDPT),
|
||||
}))
|
||||
}
|
||||
|
@ -41,6 +47,10 @@ impl Endpoint {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn get_dev_type(&self) -> &DeviceType {
|
||||
&self.dev_type
|
||||
}
|
||||
|
||||
fn get_cluster_index(&self, cluster_id: u32) -> Option<usize> {
|
||||
self.clusters.iter().position(|c| c.base().id == cluster_id)
|
||||
}
|
||||
|
@ -63,7 +73,7 @@ impl Endpoint {
|
|||
pub fn get_wildcard_clusters(
|
||||
&self,
|
||||
cluster: Option<u32>,
|
||||
) -> Result<(&[Box<dyn ClusterType>], bool), IMStatusCode> {
|
||||
) -> Result<(&BoxedClusters, bool), IMStatusCode> {
|
||||
if let Some(c) = cluster {
|
||||
if let Some(i) = self.get_cluster_index(c) {
|
||||
Ok((&self.clusters[i..i + 1], false))
|
||||
|
@ -79,7 +89,7 @@ impl Endpoint {
|
|||
pub fn get_wildcard_clusters_mut(
|
||||
&mut self,
|
||||
cluster: Option<u32>,
|
||||
) -> Result<(&mut [Box<dyn ClusterType>], bool), IMStatusCode> {
|
||||
) -> Result<(&mut BoxedClusters, bool), IMStatusCode> {
|
||||
if let Some(c) = cluster {
|
||||
if let Some(i) = self.get_cluster_index(c) {
|
||||
Ok((&mut self.clusters[i..i + 1], false))
|
||||
|
|
|
@ -23,12 +23,16 @@ use crate::{
|
|||
};
|
||||
use std::fmt;
|
||||
|
||||
use super::DeviceType;
|
||||
|
||||
pub trait ChangeConsumer {
|
||||
fn endpoint_added(&self, id: u16, endpoint: &mut Endpoint) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
pub const ENDPTS_PER_ACC: usize = 3;
|
||||
|
||||
pub type BoxedEndpoints = [Option<Box<Endpoint>>];
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Node {
|
||||
endpoints: [Option<Box<Endpoint>>; ENDPTS_PER_ACC],
|
||||
|
@ -49,7 +53,7 @@ impl std::fmt::Display for Node {
|
|||
|
||||
impl Node {
|
||||
pub fn new() -> Result<Box<Node>, Error> {
|
||||
let node = Box::new(Node::default());
|
||||
let node = Box::default();
|
||||
Ok(node)
|
||||
}
|
||||
|
||||
|
@ -57,13 +61,13 @@ impl Node {
|
|||
self.changes_cb = Some(consumer);
|
||||
}
|
||||
|
||||
pub fn add_endpoint(&mut self) -> Result<u32, Error> {
|
||||
pub fn add_endpoint(&mut self, dev_type: DeviceType) -> Result<u32, Error> {
|
||||
let index = self
|
||||
.endpoints
|
||||
.iter()
|
||||
.position(|x| x.is_none())
|
||||
.ok_or(Error::NoSpace)?;
|
||||
let mut endpoint = Endpoint::new()?;
|
||||
let mut endpoint = Endpoint::new(dev_type)?;
|
||||
if let Some(cb) = &self.changes_cb {
|
||||
cb.endpoint_added(index as u16, &mut endpoint)?;
|
||||
}
|
||||
|
@ -121,7 +125,7 @@ impl Node {
|
|||
pub fn get_wildcard_endpoints(
|
||||
&self,
|
||||
endpoint: Option<u16>,
|
||||
) -> Result<(&[Option<Box<Endpoint>>], usize, bool), IMStatusCode> {
|
||||
) -> Result<(&BoxedEndpoints, usize, bool), IMStatusCode> {
|
||||
if let Some(e) = endpoint {
|
||||
let e = e as usize;
|
||||
if self.endpoints.len() <= e || self.endpoints[e].is_none() {
|
||||
|
@ -137,7 +141,7 @@ impl Node {
|
|||
pub fn get_wildcard_endpoints_mut(
|
||||
&mut self,
|
||||
endpoint: Option<u16>,
|
||||
) -> Result<(&mut [Option<Box<Endpoint>>], usize, bool), IMStatusCode> {
|
||||
) -> Result<(&mut BoxedEndpoints, usize, bool), IMStatusCode> {
|
||||
if let Some(e) = endpoint {
|
||||
let e = e as usize;
|
||||
if self.endpoints.len() <= e || self.endpoints[e].is_none() {
|
||||
|
|
|
@ -58,6 +58,7 @@ impl FromTLV<'_> for Privilege {
|
|||
}
|
||||
|
||||
impl ToTLV for Privilege {
|
||||
#[allow(clippy::bool_to_int_with_if)]
|
||||
fn to_tlv(
|
||||
&self,
|
||||
tw: &mut crate::tlv::TLVWriter,
|
||||
|
|
|
@ -21,6 +21,7 @@ use std::sync::RwLock;
|
|||
|
||||
#[derive(PartialEq)]
|
||||
#[allow(dead_code)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
enum NocState {
|
||||
NocNotRecvd,
|
||||
// This is the local fabric index
|
||||
|
|
|
@ -131,7 +131,7 @@ impl NocCluster {
|
|||
}
|
||||
|
||||
fn add_acl(&self, fab_idx: u8, admin_subject: u64) -> Result<(), Error> {
|
||||
let mut acl = AclEntry::new(fab_idx as u8, Privilege::ADMIN, AuthMode::Case);
|
||||
let mut acl = AclEntry::new(fab_idx, Privilege::ADMIN, AuthMode::Case);
|
||||
acl.add_subject(admin_subject)?;
|
||||
self.acl_mgr.add(acl)
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ impl NocCluster {
|
|||
|
||||
let noc_value = Cert::new(r.noc_value.0).map_err(|_| NocStatus::InvalidNOC)?;
|
||||
info!("Received NOC as: {}", noc_value);
|
||||
let icac_value = if r.icac_value.0.len() != 0 {
|
||||
let icac_value = if !r.icac_value.0.is_empty() {
|
||||
let cert = Cert::new(r.icac_value.0).map_err(|_| NocStatus::InvalidNOC)?;
|
||||
info!("Received ICAC as: {}", cert);
|
||||
Some(cert)
|
||||
|
@ -178,7 +178,7 @@ impl NocCluster {
|
|||
.add(fabric)
|
||||
.map_err(|_| NocStatus::TableFull)?;
|
||||
|
||||
if self.add_acl(fab_idx as u8, r.case_admin_subject).is_err() {
|
||||
if self.add_acl(fab_idx, r.case_admin_subject).is_err() {
|
||||
error!("Failed to add ACL, what to do?");
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ impl AccessControlCluster {
|
|||
/// Care about fabric-scoped behaviour is taken
|
||||
fn write_acl_attr(
|
||||
&mut self,
|
||||
op: ListOperation,
|
||||
op: &ListOperation,
|
||||
data: &TLVElement,
|
||||
fab_idx: u8,
|
||||
) -> Result<(), IMStatusCode> {
|
||||
|
@ -77,12 +77,12 @@ impl AccessControlCluster {
|
|||
acl_entry.fab_idx = Some(fab_idx);
|
||||
|
||||
if let ListOperation::EditItem(index) = op {
|
||||
self.acl_mgr.edit(index as u8, fab_idx, acl_entry)
|
||||
self.acl_mgr.edit(*index as u8, fab_idx, acl_entry)
|
||||
} else {
|
||||
self.acl_mgr.add(acl_entry)
|
||||
}
|
||||
}
|
||||
ListOperation::DeleteItem(index) => self.acl_mgr.delete(index as u8, fab_idx),
|
||||
ListOperation::DeleteItem(index) => self.acl_mgr.delete(*index as u8, fab_idx),
|
||||
ListOperation::DeleteList => self.acl_mgr.delete_for_fabric(fab_idx),
|
||||
};
|
||||
match result {
|
||||
|
@ -128,14 +128,13 @@ impl ClusterType for AccessControlCluster {
|
|||
attr: &AttrDetails,
|
||||
data: &TLVElement,
|
||||
) -> Result<(), IMStatusCode> {
|
||||
let result = match num::FromPrimitive::from_u16(attr.attr_id) {
|
||||
Some(Attributes::Acl) => attr_list_write(attr, data, |op, data| {
|
||||
self.write_acl_attr(op, data, attr.fab_idx)
|
||||
}),
|
||||
_ => {
|
||||
error!("Attribute not yet supported: this shouldn't happen");
|
||||
Err(IMStatusCode::NotFound)
|
||||
}
|
||||
let result = if let Some(Attributes::Acl) = num::FromPrimitive::from_u16(attr.attr_id) {
|
||||
attr_list_write(attr, data, |op, data| {
|
||||
self.write_acl_attr(&op, data, attr.fab_idx)
|
||||
})
|
||||
} else {
|
||||
error!("Attribute not yet supported: this shouldn't happen");
|
||||
Err(IMStatusCode::NotFound)
|
||||
};
|
||||
if result.is_ok() {
|
||||
self.base.cluster_changed();
|
||||
|
@ -223,7 +222,7 @@ mod tests {
|
|||
|
||||
// Test, ACL has fabric index 2, but the accessing fabric is 1
|
||||
// the fabric index in the TLV should be ignored and the ACL should be created with entry 1
|
||||
let result = acl.write_acl_attr(ListOperation::AddItem, &data, 1);
|
||||
let result = acl.write_acl_attr(&ListOperation::AddItem, &data, 1);
|
||||
assert_eq!(result, Ok(()));
|
||||
|
||||
let verifier = AclEntry::new(1, Privilege::VIEW, AuthMode::Case);
|
||||
|
@ -259,7 +258,7 @@ mod tests {
|
|||
let data = get_root_node_struct(writebuf.as_borrow_slice()).unwrap();
|
||||
|
||||
// Test, Edit Fabric 2's index 1 - with accessing fabring as 2 - allow
|
||||
let result = acl.write_acl_attr(ListOperation::EditItem(1), &data, 2);
|
||||
let result = acl.write_acl_attr(&ListOperation::EditItem(1), &data, 2);
|
||||
// Fabric 2's index 1, is actually our index 2, update the verifier
|
||||
verifier[2] = new;
|
||||
assert_eq!(result, Ok(()));
|
||||
|
@ -292,7 +291,7 @@ mod tests {
|
|||
let data = TLVElement::new(TagType::Anonymous, ElementType::True);
|
||||
|
||||
// Test , Delete Fabric 1's index 0
|
||||
let result = acl.write_acl_attr(ListOperation::DeleteItem(0), &data, 1);
|
||||
let result = acl.write_acl_attr(&ListOperation::DeleteItem(0), &data, 1);
|
||||
assert_eq!(result, Ok(()));
|
||||
|
||||
let verifier = [input[0], input[2]];
|
||||
|
@ -323,7 +322,7 @@ mod tests {
|
|||
for i in input {
|
||||
acl_mgr.add(i).unwrap();
|
||||
}
|
||||
let acl = AccessControlCluster::new(acl_mgr.clone()).unwrap();
|
||||
let acl = AccessControlCluster::new(acl_mgr).unwrap();
|
||||
// Test 1, all 3 entries are read in the response without fabric filtering
|
||||
{
|
||||
let mut tw = TLVWriter::new(&mut writebuf);
|
||||
|
|
|
@ -21,12 +21,13 @@ use crate::data_model::core::DataModel;
|
|||
use crate::data_model::objects::*;
|
||||
use crate::error::*;
|
||||
use crate::interaction_model::messages::GenericPath;
|
||||
use crate::tlv::{TLVWriter, TagType};
|
||||
use crate::tlv::{TLVWriter, TagType, ToTLV};
|
||||
use log::error;
|
||||
|
||||
pub const ID: u32 = 0x001D;
|
||||
|
||||
#[derive(FromPrimitive)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
enum Attributes {
|
||||
DeviceTypeList = 0,
|
||||
ServerList = 1,
|
||||
|
@ -47,11 +48,28 @@ impl DescriptorCluster {
|
|||
data_model,
|
||||
base: Cluster::new(ID)?,
|
||||
});
|
||||
c.base.add_attribute(attr_devtypelist_new()?)?;
|
||||
c.base.add_attribute(attr_serverlist_new()?)?;
|
||||
c.base.add_attribute(attr_partslist_new()?)?;
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
fn encode_devtype_list(&self, tag: TagType, tw: &mut TLVWriter) {
|
||||
let path = GenericPath {
|
||||
endpoint: Some(self.endpoint_id),
|
||||
cluster: None,
|
||||
leaf: None,
|
||||
};
|
||||
let _ = tw.start_array(tag);
|
||||
let dm = self.data_model.node.read().unwrap();
|
||||
let _ = dm.for_each_endpoint(&path, |_, e| {
|
||||
let dev_type = e.get_dev_type();
|
||||
let _ = dev_type.to_tlv(tw, TagType::Anonymous);
|
||||
Ok(())
|
||||
});
|
||||
let _ = tw.end_container();
|
||||
}
|
||||
|
||||
fn encode_server_list(&self, tag: TagType, tw: &mut TLVWriter) {
|
||||
let path = GenericPath {
|
||||
endpoint: Some(self.endpoint_id),
|
||||
|
@ -68,8 +86,24 @@ impl DescriptorCluster {
|
|||
}
|
||||
|
||||
fn encode_parts_list(&self, tag: TagType, tw: &mut TLVWriter) {
|
||||
// TODO: Support Partslist
|
||||
let path = GenericPath {
|
||||
endpoint: None,
|
||||
cluster: None,
|
||||
leaf: None,
|
||||
};
|
||||
let _ = tw.start_array(tag);
|
||||
if self.endpoint_id == 0 {
|
||||
// TODO: If endpoint is another than 0, need to figure out what to do
|
||||
let dm = self.data_model.node.read().unwrap();
|
||||
let _ = dm.for_each_endpoint(&path, |current_path, _| {
|
||||
if let Some(endpoint_id) = current_path.endpoint {
|
||||
if endpoint_id != 0 {
|
||||
let _ = tw.u16(TagType::Anonymous, endpoint_id);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
let _ = tw.end_container();
|
||||
}
|
||||
}
|
||||
|
@ -84,6 +118,9 @@ impl ClusterType for DescriptorCluster {
|
|||
|
||||
fn read_custom_attribute(&self, encoder: &mut dyn Encoder, attr: &AttrDetails) {
|
||||
match num::FromPrimitive::from_u16(attr.attr_id) {
|
||||
Some(Attributes::DeviceTypeList) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
|
||||
self.encode_devtype_list(tag, tw)
|
||||
})),
|
||||
Some(Attributes::ServerList) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
|
||||
self.encode_server_list(tag, tw)
|
||||
})),
|
||||
|
@ -97,6 +134,14 @@ impl ClusterType for DescriptorCluster {
|
|||
}
|
||||
}
|
||||
|
||||
fn attr_devtypelist_new() -> Result<Attribute, Error> {
|
||||
Attribute::new(
|
||||
Attributes::DeviceTypeList as u16,
|
||||
AttrValue::Custom,
|
||||
Access::RV,
|
||||
Quality::NONE,
|
||||
)
|
||||
}
|
||||
fn attr_serverlist_new() -> Result<Attribute, Error> {
|
||||
Attribute::new(
|
||||
Attributes::ServerList as u16,
|
||||
|
|
|
@ -57,6 +57,7 @@ pub enum Error {
|
|||
InvalidAuthKey,
|
||||
InvalidSignature,
|
||||
InvalidState,
|
||||
InvalidTime,
|
||||
RwLock,
|
||||
TLVNotFound,
|
||||
TLVTypeMismatch,
|
||||
|
@ -118,7 +119,7 @@ impl From<TryRecvError> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Display for Error {
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
|
|
|
@ -233,7 +233,7 @@ impl Fabric {
|
|||
|
||||
let mut icac = Vec::new();
|
||||
psm.get_kv_slice(fb_key!(index, ST_ICA), &mut icac)?;
|
||||
let icac = if icac.len() != 0 {
|
||||
let icac = if !icac.is_empty() {
|
||||
Some(Cert::new(icac.as_slice())?)
|
||||
} else {
|
||||
None
|
||||
|
|
|
@ -51,7 +51,7 @@ impl InteractionModel {
|
|||
rx_buf: &[u8],
|
||||
proto_tx: &mut Packet,
|
||||
) -> Result<ResponseRequired, Error> {
|
||||
if InteractionModel::req_timeout_handled(trans, proto_tx)? == true {
|
||||
if InteractionModel::req_timeout_handled(trans, proto_tx)? {
|
||||
return Ok(ResponseRequired::Yes);
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ impl InteractionModel {
|
|||
let inv_req = InvReq::from_tlv(&root)?;
|
||||
|
||||
let timed_tx = trans.get_timeout().map(|_| true);
|
||||
let timed_request = inv_req.timed_request.filter(|a| *a == true);
|
||||
let timed_request = inv_req.timed_request.filter(|a| *a);
|
||||
// Either both should be None, or both should be Some(true)
|
||||
if timed_tx != timed_request {
|
||||
InteractionModel::create_status_response(proto_tx, IMStatusCode::TimedRequestMisMatch)?;
|
||||
|
|
|
@ -179,7 +179,7 @@ impl proto_demux::HandleProto for InteractionModel {
|
|||
}
|
||||
|
||||
fn get_proto_id(&self) -> usize {
|
||||
PROTO_ID_INTERACTION_MODEL as usize
|
||||
PROTO_ID_INTERACTION_MODEL
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -348,22 +348,20 @@ pub mod ib {
|
|||
} else {
|
||||
f(ListOperation::EditItem(index), data)
|
||||
}
|
||||
} else {
|
||||
if data.confirm_array().is_ok() {
|
||||
// If data is list, this is either Delete List or OverWrite List operation
|
||||
// in either case, we have to first delete the whole list
|
||||
f(ListOperation::DeleteList, data)?;
|
||||
// Now the data must be a list, that should be added item by item
|
||||
} else if data.confirm_array().is_ok() {
|
||||
// If data is list, this is either Delete List or OverWrite List operation
|
||||
// in either case, we have to first delete the whole list
|
||||
f(ListOperation::DeleteList, data)?;
|
||||
// Now the data must be a list, that should be added item by item
|
||||
|
||||
let container = data.enter().ok_or(Error::Invalid)?;
|
||||
for d in container {
|
||||
f(ListOperation::AddItem, &d)?;
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
// If data is not a list, this must be an add operation
|
||||
f(ListOperation::AddItem, data)
|
||||
let container = data.enter().ok_or(Error::Invalid)?;
|
||||
for d in container {
|
||||
f(ListOperation::AddItem, &d)?;
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
// If data is not a list, this must be an add operation
|
||||
f(ListOperation::AddItem, data)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ impl InteractionModel {
|
|||
rx_buf: &[u8],
|
||||
proto_tx: &mut Packet,
|
||||
) -> Result<ResponseRequired, Error> {
|
||||
if InteractionModel::req_timeout_handled(trans, proto_tx)? == true {
|
||||
if InteractionModel::req_timeout_handled(trans, proto_tx)? {
|
||||
return Ok(ResponseRequired::Yes);
|
||||
}
|
||||
proto_tx.set_proto_opcode(OpCode::WriteResponse as u8);
|
||||
|
|
|
@ -50,6 +50,7 @@
|
|||
//! pid: 0xFFF1,
|
||||
//! hw_ver: 2,
|
||||
//! sw_ver: 1,
|
||||
//! serial_no: "aabbcc".to_string(),
|
||||
//! };
|
||||
//!
|
||||
//! /// Get the Matter Object
|
||||
|
|
|
@ -42,6 +42,7 @@ const SHORT_DISCRIMINATOR_SHIFT: u16 = 8;
|
|||
static mut G_MDNS: Option<Arc<Mdns>> = None;
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum ServiceMode {
|
||||
/// The commissioned state
|
||||
Commissioned,
|
||||
|
|
|
@ -74,6 +74,6 @@ impl proto_demux::HandleProto for SecureChannel {
|
|||
}
|
||||
|
||||
fn get_proto_id(&self) -> usize {
|
||||
PROTO_ID_SECURE_CHANNEL as usize
|
||||
PROTO_ID_SECURE_CHANNEL
|
||||
}
|
||||
}
|
||||
|
|
|
@ -293,7 +293,7 @@ impl PAKE {
|
|||
let local_sessid = ctx.exch_ctx.sess.reserve_new_sess_id();
|
||||
let spake2p_data: u32 = ((local_sessid as u32) << 16) | a.initiator_ssid as u32;
|
||||
let mut spake2p = Box::new(Spake2P::new());
|
||||
spake2p.set_app_data(spake2p_data as u32);
|
||||
spake2p.set_app_data(spake2p_data);
|
||||
|
||||
// Generate response
|
||||
let mut tw = TLVWriter::new(ctx.tx.get_writebuf()?);
|
||||
|
|
|
@ -88,7 +88,7 @@ static TAG_EXTRACTOR: [ExtractTag; 8] = [
|
|||
// ImplPrf32 5
|
||||
|t| TagType::ImplPrf32(LittleEndian::read_u32(&t.buf[t.current..])),
|
||||
// FullQual48 6
|
||||
|t| TagType::FullQual48(LittleEndian::read_u48(&t.buf[t.current..]) as u64),
|
||||
|t| TagType::FullQual48(LittleEndian::read_u48(&t.buf[t.current..])),
|
||||
// FullQual64 7
|
||||
|t| TagType::FullQual64(LittleEndian::read_u64(&t.buf[t.current..])),
|
||||
];
|
||||
|
@ -330,32 +330,28 @@ impl<'a> PartialEq for TLVElement<'a> {
|
|||
let ours = ours.unwrap();
|
||||
let theirs = theirs.unwrap();
|
||||
|
||||
match ours.element_type {
|
||||
ElementType::EndCnt => {
|
||||
if nest_level == 0 {
|
||||
break;
|
||||
} else {
|
||||
nest_level -= 1;
|
||||
}
|
||||
if let ElementType::EndCnt = ours.element_type {
|
||||
if nest_level == 0 {
|
||||
break;
|
||||
}
|
||||
_ => {
|
||||
if is_container(ours.element_type) {
|
||||
nest_level += 1;
|
||||
// Only compare the discriminants in case of array/list/structures,
|
||||
// instead of actual element values. Those will be subsets within this same
|
||||
// list that will get validated anyway
|
||||
if std::mem::discriminant(&ours.element_type)
|
||||
!= std::mem::discriminant(&theirs.element_type)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
} else if ours.element_type != theirs.element_type {
|
||||
nest_level -= 1;
|
||||
} else {
|
||||
if is_container(ours.element_type) {
|
||||
nest_level += 1;
|
||||
// Only compare the discriminants in case of array/list/structures,
|
||||
// instead of actual element values. Those will be subsets within this same
|
||||
// list that will get validated anyway
|
||||
if std::mem::discriminant(&ours.element_type)
|
||||
!= std::mem::discriminant(&theirs.element_type)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
} else if ours.element_type != theirs.element_type {
|
||||
return false;
|
||||
}
|
||||
|
||||
if ours.tag_type != theirs.tag_type {
|
||||
return false;
|
||||
}
|
||||
if ours.tag_type != theirs.tag_type {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -668,9 +664,8 @@ impl<'a> TLVContainerIterator<'a> {
|
|||
}
|
||||
_ => return Some(last_elem),
|
||||
}
|
||||
} else {
|
||||
nest_level -= 1;
|
||||
}
|
||||
nest_level -= 1;
|
||||
}
|
||||
_ => {
|
||||
if is_container(element.element_type) {
|
||||
|
|
|
@ -88,7 +88,7 @@ macro_rules! totlv_for {
|
|||
};
|
||||
}
|
||||
|
||||
impl<'a, T: ToTLV, const N: usize> ToTLV for [T; N] {
|
||||
impl<T: ToTLV, const N: usize> ToTLV for [T; N] {
|
||||
fn to_tlv(&self, tw: &mut TLVWriter, tag: TagType) -> Result<(), Error> {
|
||||
tw.start_array(tag)?;
|
||||
for i in self {
|
||||
|
@ -211,7 +211,7 @@ impl<'a, T: FromTLV<'a>> FromTLV<'a> for Option<T> {
|
|||
impl<T: ToTLV> ToTLV for Option<T> {
|
||||
fn to_tlv(&self, tw: &mut TLVWriter, tag: TagType) -> Result<(), Error> {
|
||||
match self {
|
||||
Some(s) => (s.to_tlv(tw, tag)),
|
||||
Some(s) => s.to_tlv(tw, tag),
|
||||
None => Ok(()),
|
||||
}
|
||||
}
|
||||
|
@ -321,12 +321,10 @@ impl<'a, T: ToTLV> TLVArray<'a, T> {
|
|||
|
||||
impl<'a, T: ToTLV + FromTLV<'a> + Copy> TLVArray<'a, T> {
|
||||
pub fn get_index(&self, index: usize) -> T {
|
||||
let mut curr = 0;
|
||||
for element in self.iter() {
|
||||
for (curr, element) in self.iter().enumerate() {
|
||||
if curr == index {
|
||||
return element;
|
||||
}
|
||||
curr += 1;
|
||||
}
|
||||
panic!("Out of bounds");
|
||||
}
|
||||
|
@ -360,7 +358,7 @@ where
|
|||
{
|
||||
fn eq(&self, other: &&[T]) -> bool {
|
||||
let mut iter1 = self.iter();
|
||||
let mut iter2 = other.into_iter();
|
||||
let mut iter2 = other.iter();
|
||||
loop {
|
||||
match (iter1.next(), iter2.next()) {
|
||||
(None, None) => return true,
|
||||
|
@ -402,7 +400,7 @@ impl<'a, T: Debug + ToTLV + FromTLV<'a> + Copy> Debug for TLVArray<'a, T> {
|
|||
for i in self.iter() {
|
||||
writeln!(f, "{:?}", i)?;
|
||||
}
|
||||
writeln!(f, "")
|
||||
writeln!(f)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -74,8 +74,8 @@ impl<'a, 'b> TLVWriter<'a, 'b> {
|
|||
TagType::CommonPrf32(v) => (3, v as u64),
|
||||
TagType::ImplPrf16(v) => (4, v as u64),
|
||||
TagType::ImplPrf32(v) => (5, v as u64),
|
||||
TagType::FullQual48(v) => (6, v as u64),
|
||||
TagType::FullQual64(v) => (7, v as u64),
|
||||
TagType::FullQual48(v) => (6, v),
|
||||
TagType::FullQual64(v) => (7, v),
|
||||
};
|
||||
self.buf
|
||||
.le_u8(((tag_id) << TAG_SHIFT_BITS) | (val_type as u8))?;
|
||||
|
@ -269,13 +269,11 @@ mod tests {
|
|||
|
||||
tw.u8(TagType::Anonymous, 12).unwrap();
|
||||
tw.u8(TagType::Context(1), 13).unwrap();
|
||||
match tw.u16(TagType::Anonymous, 12) {
|
||||
Ok(_) => panic!("This should have returned error"),
|
||||
_ => (),
|
||||
if tw.u16(TagType::Anonymous, 12).is_ok() {
|
||||
panic!("This should have returned error")
|
||||
}
|
||||
match tw.u16(TagType::Context(2), 13) {
|
||||
Ok(_) => panic!("This should have returned error"),
|
||||
_ => (),
|
||||
if tw.u16(TagType::Context(2), 13).is_ok() {
|
||||
panic!("This should have returned error")
|
||||
}
|
||||
assert_eq!(buf, [4, 12, 36, 1, 13, 4]);
|
||||
}
|
||||
|
|
|
@ -123,10 +123,7 @@ impl Exchange {
|
|||
}
|
||||
|
||||
pub fn is_data_none(&self) -> bool {
|
||||
match self.data {
|
||||
DataOption::None => true,
|
||||
_ => false,
|
||||
}
|
||||
matches!(self.data, DataOption::None)
|
||||
}
|
||||
|
||||
pub fn set_data_boxed(&mut self, data: Box<dyn Any>) {
|
||||
|
@ -147,12 +144,11 @@ impl Exchange {
|
|||
|
||||
pub fn take_data_boxed<T: Any>(&mut self) -> Option<Box<T>> {
|
||||
let old = std::mem::replace(&mut self.data, DataOption::None);
|
||||
match old {
|
||||
DataOption::Boxed(d) => d.downcast::<T>().ok(),
|
||||
_ => {
|
||||
self.data = old;
|
||||
None
|
||||
}
|
||||
if let DataOption::Boxed(d) = old {
|
||||
d.downcast::<T>().ok()
|
||||
} else {
|
||||
self.data = old;
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -293,15 +289,14 @@ impl ExchangeMgr {
|
|||
// Get the session
|
||||
let (mut proto_rx, index) = self.sess_mgr.recv()?;
|
||||
|
||||
let index = match index {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
// The sessions were full, evict one session, and re-perform post-recv
|
||||
let evict_index = self.sess_mgr.get_lru();
|
||||
self.evict_session(evict_index)?;
|
||||
info!("Reattempting session creation");
|
||||
self.sess_mgr.post_recv(&proto_rx)?.ok_or(Error::Invalid)?
|
||||
}
|
||||
let index = if let Some(s) = index {
|
||||
s
|
||||
} else {
|
||||
// The sessions were full, evict one session, and re-perform post-recv
|
||||
let evict_index = self.sess_mgr.get_lru();
|
||||
self.evict_session(evict_index)?;
|
||||
info!("Reattempting session creation");
|
||||
self.sess_mgr.post_recv(&proto_rx)?.ok_or(Error::Invalid)?
|
||||
};
|
||||
let mut session = self.sess_mgr.get_session_handle(index);
|
||||
|
||||
|
@ -352,7 +347,7 @@ impl ExchangeMgr {
|
|||
}
|
||||
}
|
||||
for (exch_id, _) in to_purge.iter() {
|
||||
self.exchanges.remove(&*exch_id);
|
||||
self.exchanges.remove(exch_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -370,7 +365,7 @@ impl ExchangeMgr {
|
|||
// As per the spec, we need to send a CLOSE here
|
||||
|
||||
let mut session = self.sess_mgr.get_session_handle(index);
|
||||
let mut tx = Slab::<PacketPool>::new(Packet::new_tx()?).ok_or(Error::NoSpace)?;
|
||||
let mut tx = Slab::<PacketPool>::try_new(Packet::new_tx()?).ok_or(Error::NoSpace)?;
|
||||
secure_channel::common::create_sc_status_report(
|
||||
&mut tx,
|
||||
secure_channel::common::SCStatusCodes::CloseSession,
|
||||
|
@ -408,13 +403,13 @@ impl ExchangeMgr {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn add_session(&mut self, clone_data: CloneData) -> Result<SessionHandle, Error> {
|
||||
let sess_idx = match self.sess_mgr.clone_session(&clone_data) {
|
||||
pub fn add_session(&mut self, clone_data: &CloneData) -> Result<SessionHandle, Error> {
|
||||
let sess_idx = match self.sess_mgr.clone_session(clone_data) {
|
||||
Ok(idx) => idx,
|
||||
Err(Error::NoSpace) => {
|
||||
let evict_index = self.sess_mgr.get_lru();
|
||||
self.evict_session(evict_index)?;
|
||||
self.sess_mgr.clone_session(&clone_data)?
|
||||
self.sess_mgr.clone_session(clone_data)?
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e);
|
||||
|
@ -437,6 +432,7 @@ impl fmt::Display for ExchangeMgr {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::bool_assert_comparison)]
|
||||
mod tests {
|
||||
|
||||
use crate::{
|
||||
|
@ -496,8 +492,8 @@ mod tests {
|
|||
let mut peer_sess_id = 100;
|
||||
for _ in 1..count {
|
||||
let clone_data = get_clone_data(peer_sess_id, local_sess_id);
|
||||
match mgr.add_session(clone_data) {
|
||||
Ok(s) => (assert_eq!(peer_sess_id, s.get_peer_sess_id())),
|
||||
match mgr.add_session(&clone_data) {
|
||||
Ok(s) => assert_eq!(peer_sess_id, s.get_peer_sess_id()),
|
||||
Err(Error::NoSpace) => break,
|
||||
_ => {
|
||||
panic!("Couldn't, create session");
|
||||
|
@ -558,7 +554,7 @@ mod tests {
|
|||
for i in 1..(MAX_SESSIONS + 1) {
|
||||
// Now purposefully overflow the sessions by adding another session
|
||||
let session = mgr
|
||||
.add_session(get_clone_data(new_peer_sess_id, new_local_sess_id))
|
||||
.add_session(&get_clone_data(new_peer_sess_id, new_local_sess_id))
|
||||
.unwrap();
|
||||
assert_eq!(session.get_peer_sess_id(), new_peer_sess_id);
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ impl Mgr {
|
|||
// If a new session was created, add it
|
||||
let _ = self
|
||||
.exch_mgr
|
||||
.add_session(clone_data)
|
||||
.add_session(&clone_data)
|
||||
.map_err(|e| error!("Error adding new session {:?}", e));
|
||||
}
|
||||
_ => {
|
||||
|
@ -170,6 +170,6 @@ impl Mgr {
|
|||
}
|
||||
|
||||
fn new_tx() -> Result<BoxSlab<PacketPool>, Error> {
|
||||
Slab::<PacketPool>::new(Packet::new_tx()?).ok_or(Error::PacketPoolExhaust)
|
||||
Slab::<PacketPool>::try_new(Packet::new_tx()?).ok_or(Error::PacketPoolExhaust)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
pub mod exchange;
|
||||
pub mod mgr;
|
||||
pub mod mrp;
|
||||
|
|
|
@ -138,8 +138,8 @@ impl<'a> Packet<'a> {
|
|||
|
||||
pub fn as_borrow_slice(&mut self) -> &mut [u8] {
|
||||
match &mut self.data {
|
||||
Direction::Rx(pb, _) => (pb.as_borrow_slice()),
|
||||
Direction::Tx(wb) => (wb.as_mut_slice()),
|
||||
Direction::Rx(pb, _) => pb.as_borrow_slice(),
|
||||
Direction::Tx(wb) => wb.as_mut_slice(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
use std::sync::Once;
|
||||
|
||||
use async_channel::{bounded, Receiver, Sender};
|
||||
|
|
|
@ -466,7 +466,8 @@ impl SessionMgr {
|
|||
}
|
||||
|
||||
pub fn recv(&mut self) -> Result<(BoxSlab<PacketPool>, Option<usize>), Error> {
|
||||
let mut rx = Slab::<PacketPool>::new(Packet::new_rx()?).ok_or(Error::PacketPoolExhaust)?;
|
||||
let mut rx =
|
||||
Slab::<PacketPool>::try_new(Packet::new_rx()?).ok_or(Error::PacketPoolExhaust)?;
|
||||
|
||||
let network = self.network.as_ref().ok_or(Error::NoNetworkInterface)?;
|
||||
|
||||
|
|
|
@ -117,22 +117,19 @@ mod tests {
|
|||
|
||||
assert_eq!(buf.le_u8().unwrap(), 0x01);
|
||||
|
||||
match buf.le_u16() {
|
||||
Ok(_) => panic!("This should have returned error"),
|
||||
Err(_) => (),
|
||||
if buf.le_u16().is_ok() {
|
||||
panic!("This should have returned error")
|
||||
}
|
||||
|
||||
match buf.le_u32() {
|
||||
Ok(_) => panic!("This should have returned error"),
|
||||
Err(_) => (),
|
||||
if buf.le_u32().is_ok() {
|
||||
panic!("This should have returned error")
|
||||
}
|
||||
|
||||
// Now consume the leftover byte
|
||||
assert_eq!(buf.le_u8().unwrap(), 65);
|
||||
|
||||
match buf.le_u8() {
|
||||
Ok(_) => panic!("This should have returned error"),
|
||||
Err(_) => (),
|
||||
if buf.le_u8().is_ok() {
|
||||
panic!("This should have returned error")
|
||||
}
|
||||
assert_eq!(buf.as_slice(), []);
|
||||
}
|
||||
|
@ -161,9 +158,8 @@ mod tests {
|
|||
assert_eq!(buf.le_u8().unwrap(), 0x01);
|
||||
assert_eq!(buf.le_u16().unwrap(), 65);
|
||||
assert_eq!(buf.le_u32().unwrap(), 0xcafebabe);
|
||||
match buf.tail(5) {
|
||||
Ok(_) => panic!("This should have returned error"),
|
||||
Err(_) => (),
|
||||
if buf.tail(5).is_ok() {
|
||||
panic!("This should have returned error")
|
||||
}
|
||||
assert_eq!(buf.tail(2).unwrap(), [0xc, 0xd]);
|
||||
}
|
||||
|
|
|
@ -199,24 +199,20 @@ mod tests {
|
|||
buf.le_u64(0xcafebabecafebabe).unwrap();
|
||||
// Now the buffer is fully filled up, so no further puts will happen
|
||||
|
||||
match buf.le_u8(1) {
|
||||
Ok(_) => panic!("Should return error"),
|
||||
_ => (),
|
||||
if buf.le_u8(1).is_ok() {
|
||||
panic!("Should return error")
|
||||
}
|
||||
|
||||
match buf.le_u16(65) {
|
||||
Ok(_) => panic!("Should return error"),
|
||||
_ => (),
|
||||
if buf.le_u16(65).is_ok() {
|
||||
panic!("Should return error")
|
||||
}
|
||||
|
||||
match buf.le_u32(0xcafebabe) {
|
||||
Ok(_) => panic!("Should return error"),
|
||||
_ => (),
|
||||
if buf.le_u32(0xcafebabe).is_ok() {
|
||||
panic!("Should return error")
|
||||
}
|
||||
|
||||
match buf.le_u64(0xcafebabecafebabe) {
|
||||
Ok(_) => panic!("Should return error"),
|
||||
_ => (),
|
||||
if buf.le_u64(0xcafebabecafebabe).is_ok() {
|
||||
panic!("Should return error")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -267,9 +263,8 @@ mod tests {
|
|||
|
||||
buf.le_u16(65).unwrap();
|
||||
let new_slice: [u8; 5] = [0xaa, 0xbb, 0xcc, 0xdd, 0xee];
|
||||
match buf.copy_from_slice(&new_slice) {
|
||||
Ok(_) => panic!("This should have returned error"),
|
||||
Err(_) => (),
|
||||
if buf.copy_from_slice(&new_slice).is_ok() {
|
||||
panic!("This should have returned error")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -296,9 +291,8 @@ mod tests {
|
|||
|
||||
buf.le_u16(65).unwrap();
|
||||
let new_slice: [u8; 6] = [0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff];
|
||||
match buf.prepend(&new_slice) {
|
||||
Ok(_) => panic!("Prepend should return error"),
|
||||
Err(_) => (),
|
||||
if buf.prepend(&new_slice).is_ok() {
|
||||
panic!("Prepend should return error")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ impl ClusterType for EchoCluster {
|
|||
) -> Result<(), IMStatusCode> {
|
||||
match num::FromPrimitive::from_u16(attr.attr_id) {
|
||||
Some(Attributes::AttWriteList) => {
|
||||
attr_list_write(attr, data, |op, data| self.write_attr_list(op, data))
|
||||
attr_list_write(attr, data, |op, data| self.write_attr_list(&op, data))
|
||||
}
|
||||
_ => self.base.write_attribute_from_tlv(attr.attr_id, data),
|
||||
}
|
||||
|
@ -132,7 +132,7 @@ impl ClusterType for EchoCluster {
|
|||
.cmd
|
||||
.path
|
||||
.leaf
|
||||
.map(|c| num::FromPrimitive::from_u32(c))
|
||||
.map(num::FromPrimitive::from_u32)
|
||||
.ok_or(IMStatusCode::UnsupportedCommand)?
|
||||
.ok_or(IMStatusCode::UnsupportedCommand)?;
|
||||
match cmd {
|
||||
|
@ -206,7 +206,7 @@ impl EchoCluster {
|
|||
|
||||
fn write_attr_list(
|
||||
&mut self,
|
||||
op: ListOperation,
|
||||
op: &ListOperation,
|
||||
data: &TLVElement,
|
||||
) -> Result<(), IMStatusCode> {
|
||||
let tc_handle = TestChecker::get().unwrap();
|
||||
|
@ -224,16 +224,16 @@ impl EchoCluster {
|
|||
}
|
||||
ListOperation::EditItem(index) => {
|
||||
let data = data.u16().map_err(|_| IMStatusCode::Failure)?;
|
||||
if tc.write_list[index as usize].is_some() {
|
||||
tc.write_list[index as usize] = Some(data);
|
||||
if tc.write_list[*index as usize].is_some() {
|
||||
tc.write_list[*index as usize] = Some(data);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(IMStatusCode::InvalidAction)
|
||||
}
|
||||
}
|
||||
ListOperation::DeleteItem(index) => {
|
||||
if tc.write_list[index as usize].is_some() {
|
||||
tc.write_list[index as usize] = None;
|
||||
if tc.write_list[*index as usize].is_some() {
|
||||
tc.write_list[*index as usize] = None;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(IMStatusCode::InvalidAction)
|
||||
|
|
|
@ -94,6 +94,7 @@ impl ImEngine {
|
|||
pid: 11,
|
||||
hw_ver: 12,
|
||||
sw_ver: 13,
|
||||
serial_no: "aabbccdd".to_string(),
|
||||
};
|
||||
let dev_att = Box::new(DummyDevAtt {});
|
||||
let fabric_mgr = Arc::new(FabricMgr::new().unwrap());
|
||||
|
@ -136,7 +137,7 @@ impl ImEngine {
|
|||
pub fn process<'a>(&mut self, input: &ImInput, data_out: &'a mut [u8]) -> (u8, &'a mut [u8]) {
|
||||
let mut new_exch = Exchange::new(1, 0, exchange::Role::Responder);
|
||||
// Choose whether to use a new exchange, or use the one from the ImEngine configuration
|
||||
let mut exch = self.exch.as_mut().unwrap_or_else(|| &mut new_exch);
|
||||
let exch = self.exch.as_mut().unwrap_or(&mut new_exch);
|
||||
|
||||
let mut sess_mgr: SessionMgr = Default::default();
|
||||
|
||||
|
@ -153,12 +154,9 @@ impl ImEngine {
|
|||
);
|
||||
let sess_idx = sess_mgr.clone_session(&clone_data).unwrap();
|
||||
let sess = sess_mgr.get_session_handle(sess_idx);
|
||||
let exch_ctx = ExchangeCtx {
|
||||
exch: &mut exch,
|
||||
sess,
|
||||
};
|
||||
let mut rx = Slab::<PacketPool>::new(Packet::new_rx().unwrap()).unwrap();
|
||||
let tx = Slab::<PacketPool>::new(Packet::new_tx().unwrap()).unwrap();
|
||||
let exch_ctx = ExchangeCtx { exch, sess };
|
||||
let mut rx = Slab::<PacketPool>::try_new(Packet::new_rx().unwrap()).unwrap();
|
||||
let tx = Slab::<PacketPool>::try_new(Packet::new_tx().unwrap()).unwrap();
|
||||
// Create fake rx packet
|
||||
rx.set_proto_id(0x01);
|
||||
rx.set_proto_opcode(input.action as u8);
|
||||
|
|
|
@ -204,10 +204,10 @@ fn read_cluster_id_write_attr(im: &ImEngine, endpoint: u16) -> AttrValue {
|
|||
let node = im.dm.node.read().unwrap();
|
||||
let echo = node.get_cluster(endpoint, echo_cluster::ID).unwrap();
|
||||
|
||||
*echo
|
||||
.base()
|
||||
echo.base()
|
||||
.read_attribute_raw(echo_cluster::Attributes::AttWrite as u16)
|
||||
.unwrap()
|
||||
.clone()
|
||||
}
|
||||
|
||||
fn read_cluster_id_data_ver(im: &ImEngine, endpoint: u16) -> u32 {
|
||||
|
|
|
@ -238,6 +238,7 @@ fn test_read_wc_endpoint_wc_attribute() {
|
|||
let attr_list_tlvs = get_tlvs(
|
||||
&mut buf,
|
||||
&[
|
||||
GlobalElements::FeatureMap as u16,
|
||||
GlobalElements::AttributeList as u16,
|
||||
echo_cluster::Attributes::Att1 as u16,
|
||||
echo_cluster::Attributes::Att2 as u16,
|
||||
|
@ -247,6 +248,14 @@ fn test_read_wc_endpoint_wc_attribute() {
|
|||
);
|
||||
|
||||
let expected = &[
|
||||
attr_data!(
|
||||
GenericPath::new(
|
||||
Some(0),
|
||||
Some(echo_cluster::ID),
|
||||
Some(GlobalElements::FeatureMap as u32),
|
||||
),
|
||||
ElementType::U8(0)
|
||||
),
|
||||
attr_data!(
|
||||
GenericPath::new(
|
||||
Some(0),
|
||||
|
@ -279,6 +288,14 @@ fn test_read_wc_endpoint_wc_attribute() {
|
|||
),
|
||||
ElementType::U32(echo_cluster::ATTR_CUSTOM_VALUE)
|
||||
),
|
||||
attr_data!(
|
||||
GenericPath::new(
|
||||
Some(1),
|
||||
Some(echo_cluster::ID),
|
||||
Some(GlobalElements::FeatureMap as u32),
|
||||
),
|
||||
ElementType::U8(0)
|
||||
),
|
||||
attr_data!(
|
||||
GenericPath::new(
|
||||
Some(1),
|
||||
|
|
|
@ -83,7 +83,7 @@ enum WriteResponse<'a> {
|
|||
// Helper for handling Write Attribute sequences
|
||||
fn handle_timed_write_reqs(
|
||||
input: &[AttrData],
|
||||
expected: WriteResponse,
|
||||
expected: &WriteResponse,
|
||||
timeout: u16,
|
||||
delay: u16,
|
||||
) -> DataModel {
|
||||
|
@ -159,10 +159,10 @@ fn test_timed_write_fail_and_success() {
|
|||
];
|
||||
|
||||
// Test with incorrect handling
|
||||
handle_timed_write_reqs(input, WriteResponse::TransactionError, 400, 500);
|
||||
handle_timed_write_reqs(input, &WriteResponse::TransactionError, 400, 500);
|
||||
|
||||
// Test with correct handling
|
||||
let dm = handle_timed_write_reqs(input, WriteResponse::TransactionSuccess(expected), 400, 0);
|
||||
let dm = handle_timed_write_reqs(input, &WriteResponse::TransactionSuccess(expected), 400, 0);
|
||||
assert_eq!(
|
||||
AttrValue::Uint16(val0),
|
||||
dm.read_attribute_raw(
|
||||
|
@ -190,7 +190,7 @@ enum TimedInvResponse<'a> {
|
|||
// Helper for handling Invoke Command sequences
|
||||
fn handle_timed_commands(
|
||||
input: &[CmdData],
|
||||
expected: TimedInvResponse,
|
||||
expected: &TimedInvResponse,
|
||||
timeout: u16,
|
||||
delay: u16,
|
||||
set_timed_request: bool,
|
||||
|
@ -222,7 +222,7 @@ fn handle_timed_commands(
|
|||
Some(OpCode::StatusResponse)
|
||||
);
|
||||
let status_resp = StatusResp::from_tlv(&root).unwrap();
|
||||
assert_eq!(status_resp.status, e);
|
||||
assert_eq!(status_resp.status, *e);
|
||||
}
|
||||
}
|
||||
dm
|
||||
|
@ -237,7 +237,7 @@ fn test_timed_cmd_success() {
|
|||
let expected = &[echo_resp!(0, 10), echo_resp!(1, 30)];
|
||||
handle_timed_commands(
|
||||
input,
|
||||
TimedInvResponse::TransactionSuccess(expected),
|
||||
&TimedInvResponse::TransactionSuccess(expected),
|
||||
400,
|
||||
0,
|
||||
true,
|
||||
|
@ -252,7 +252,7 @@ fn test_timed_cmd_timeout() {
|
|||
let input = &[echo_req!(0, 5), echo_req!(1, 10)];
|
||||
handle_timed_commands(
|
||||
input,
|
||||
TimedInvResponse::TransactionError(IMStatusCode::Timeout),
|
||||
&TimedInvResponse::TransactionError(IMStatusCode::Timeout),
|
||||
400,
|
||||
500,
|
||||
true,
|
||||
|
@ -267,7 +267,7 @@ fn test_timed_cmd_timedout_mismatch() {
|
|||
let input = &[echo_req!(0, 5), echo_req!(1, 10)];
|
||||
handle_timed_commands(
|
||||
input,
|
||||
TimedInvResponse::TransactionError(IMStatusCode::TimedRequestMisMatch),
|
||||
&TimedInvResponse::TransactionError(IMStatusCode::TimedRequestMisMatch),
|
||||
400,
|
||||
0,
|
||||
false,
|
||||
|
@ -276,7 +276,7 @@ fn test_timed_cmd_timedout_mismatch() {
|
|||
let input = &[echo_req!(0, 5), echo_req!(1, 10)];
|
||||
handle_timed_commands(
|
||||
input,
|
||||
TimedInvResponse::TransactionError(IMStatusCode::TimedRequestMisMatch),
|
||||
&TimedInvResponse::TransactionError(IMStatusCode::TimedRequestMisMatch),
|
||||
0,
|
||||
0,
|
||||
true,
|
||||
|
|
|
@ -136,8 +136,8 @@ fn handle_data(action: OpCode, data_in: &[u8], data_out: &mut [u8]) -> (DataMode
|
|||
exch: &mut exch,
|
||||
sess,
|
||||
};
|
||||
let mut rx = Slab::<PacketPool>::new(Packet::new_rx().unwrap()).unwrap();
|
||||
let tx = Slab::<PacketPool>::new(Packet::new_tx().unwrap()).unwrap();
|
||||
let mut rx = Slab::<PacketPool>::try_new(Packet::new_rx().unwrap()).unwrap();
|
||||
let tx = Slab::<PacketPool>::try_new(Packet::new_tx().unwrap()).unwrap();
|
||||
// Create fake rx packet
|
||||
rx.set_proto_id(0x01);
|
||||
rx.set_proto_opcode(action as u8);
|
||||
|
|
|
@ -47,7 +47,7 @@ impl Default for TlvArgs {
|
|||
fn parse_tlvargs(ast: &DeriveInput) -> TlvArgs {
|
||||
let mut tlvargs: TlvArgs = Default::default();
|
||||
|
||||
if ast.attrs.len() > 0 {
|
||||
if !ast.attrs.is_empty() {
|
||||
if let List(MetaList {
|
||||
path,
|
||||
paren_token: _,
|
||||
|
@ -87,7 +87,7 @@ fn parse_tlvargs(ast: &DeriveInput) -> TlvArgs {
|
|||
}
|
||||
|
||||
fn parse_tag_val(field: &syn::Field) -> Option<u8> {
|
||||
if field.attrs.len() > 0 {
|
||||
if !field.attrs.is_empty() {
|
||||
if let List(MetaList {
|
||||
path,
|
||||
paren_token: _,
|
||||
|
@ -110,8 +110,8 @@ fn parse_tag_val(field: &syn::Field) -> Option<u8> {
|
|||
fn gen_totlv_for_struct(
|
||||
fields: &syn::FieldsNamed,
|
||||
struct_name: &proc_macro2::Ident,
|
||||
tlvargs: TlvArgs,
|
||||
generics: syn::Generics,
|
||||
tlvargs: &TlvArgs,
|
||||
generics: &syn::Generics,
|
||||
) -> TokenStream {
|
||||
let mut tag_start = tlvargs.start;
|
||||
let datatype = format_ident!("start_{}", tlvargs.datatype);
|
||||
|
@ -127,7 +127,7 @@ fn gen_totlv_for_struct(
|
|||
// keys.push(quote! { #literal_key_str });
|
||||
idents.push(&field.ident);
|
||||
// types.push(type_name.to_token_stream());
|
||||
if let Some(a) = parse_tag_val(&field) {
|
||||
if let Some(a) = parse_tag_val(field) {
|
||||
tags.push(a);
|
||||
} else {
|
||||
tags.push(tag_start);
|
||||
|
@ -152,10 +152,10 @@ fn gen_totlv_for_struct(
|
|||
|
||||
/// Generate a ToTlv implementation for an enum
|
||||
fn gen_totlv_for_enum(
|
||||
data_enum: syn::DataEnum,
|
||||
data_enum: &syn::DataEnum,
|
||||
enum_name: &proc_macro2::Ident,
|
||||
tlvargs: TlvArgs,
|
||||
generics: syn::Generics,
|
||||
tlvargs: &TlvArgs,
|
||||
generics: &syn::Generics,
|
||||
) -> TokenStream {
|
||||
let mut tag_start = tlvargs.start;
|
||||
|
||||
|
@ -232,9 +232,9 @@ pub fn derive_totlv(item: TokenStream) -> TokenStream {
|
|||
..
|
||||
}) = ast.data
|
||||
{
|
||||
gen_totlv_for_struct(fields, name, tlvargs, generics)
|
||||
gen_totlv_for_struct(fields, name, &tlvargs, &generics)
|
||||
} else if let syn::Data::Enum(data_enum) = ast.data {
|
||||
gen_totlv_for_enum(data_enum, name, tlvargs, generics)
|
||||
gen_totlv_for_enum(&data_enum, name, &tlvargs, &generics)
|
||||
} else {
|
||||
panic!(
|
||||
"Derive ToTLV - Only supported Struct for now {:?}",
|
||||
|
@ -248,7 +248,7 @@ fn gen_fromtlv_for_struct(
|
|||
fields: &syn::FieldsNamed,
|
||||
struct_name: &proc_macro2::Ident,
|
||||
tlvargs: TlvArgs,
|
||||
generics: syn::Generics,
|
||||
generics: &syn::Generics,
|
||||
) -> TokenStream {
|
||||
let mut tag_start = tlvargs.start;
|
||||
let lifetime = tlvargs.lifetime;
|
||||
|
@ -260,7 +260,7 @@ fn gen_fromtlv_for_struct(
|
|||
|
||||
for field in fields.named.iter() {
|
||||
let type_name = &field.ty;
|
||||
if let Some(a) = parse_tag_val(&field) {
|
||||
if let Some(a) = parse_tag_val(field) {
|
||||
// TODO: The current limitation with this is that a hard-coded integer
|
||||
// value has to be mentioned in the tagval attribute. This is because
|
||||
// our tags vector is for integers, and pushing an 'identifier' on it
|
||||
|
@ -330,10 +330,10 @@ fn gen_fromtlv_for_struct(
|
|||
|
||||
/// Generate a FromTlv implementation for an enum
|
||||
fn gen_fromtlv_for_enum(
|
||||
data_enum: syn::DataEnum,
|
||||
data_enum: &syn::DataEnum,
|
||||
enum_name: &proc_macro2::Ident,
|
||||
tlvargs: TlvArgs,
|
||||
generics: syn::Generics,
|
||||
generics: &syn::Generics,
|
||||
) -> TokenStream {
|
||||
let mut tag_start = tlvargs.start;
|
||||
let lifetime = tlvargs.lifetime;
|
||||
|
@ -422,9 +422,9 @@ pub fn derive_fromtlv(item: TokenStream) -> TokenStream {
|
|||
..
|
||||
}) = ast.data
|
||||
{
|
||||
gen_fromtlv_for_struct(fields, name, tlvargs, generics)
|
||||
gen_fromtlv_for_struct(fields, name, tlvargs, &generics)
|
||||
} else if let syn::Data::Enum(data_enum) = ast.data {
|
||||
gen_fromtlv_for_enum(data_enum, name, tlvargs, generics)
|
||||
gen_fromtlv_for_enum(&data_enum, name, tlvargs, &generics)
|
||||
} else {
|
||||
panic!(
|
||||
"Derive FromTLV - Only supported Struct for now {:?}",
|
||||
|
|
Loading…
Add table
Reference in a new issue