Compare commits

...
Sign in to create a new pull request.

147 commits

Author SHA1 Message Date
Kedar Sovani
97c9619dba
Merge pull request #94 from ssnover/zeroconf
Add feature zeroconf in order to support avahi mDNS registration via zeroconf crate
2023-10-06 22:33:59 +05:30
Kedar Sovani
89f2bc4d98
Merge pull request #112 from ivmarkov/ci
Fix nightly CI
2023-10-06 22:33:14 +05:30
Kedar Sovani
7b55e7fbfb
Merge pull request #110 from jasta/fix-ipv6-mcast
[mdns] Fix multicast routing error on esp32 (and likely other platforms)
2023-10-06 22:32:52 +05:30
ivmarkov
152419472b Fix nightly CI 2023-10-06 14:24:49 +00:00
Josh Guilfoyle
8faa31a63a Manually fix clippy error related to unused import in conditional branch 2023-10-03 20:13:27 -07:00
Josh Guilfoyle
d84402f571 [mdns] Fix multicast routing error on esp32 (and likely other platforms)
According to the RFC
(https://datatracker.ietf.org/doc/html/rfc2553#section-3.3), it is
necessary to disambiguate link-local addresses with the interface index
(in the scope_id field).  Lacking this field, newer versions of lwip that
support proper IPv6 scopes will yield EHOSTUNREACH (Host unreachable).
Other implementations like on Linux and OS X will likely be affected by
the lack of this field for more complex networking setups.

Fixes #100

Run cargo fmt again

Run cargo clippy again

Revert "Run cargo clippy again"

This reverts commit e3bba1f6367172d9ecd07c8c8fb7263cda40e8f6.
2023-10-03 20:07:00 -07:00
Kedar Sovani
e39fd18b73
Merge pull request #104 from ivmarkov/main
Fix CI
2023-10-01 22:52:57 +05:30
ivmarkov
29a5c14590 Fix CI 2023-10-01 07:11:15 +00:00
Kedar Sovani
0fe4ae906f
Merge pull request #93 from ssnover/std-box-dyn-error
Add a boxed dyn error to make error info easier to access on hosted systems
2023-09-25 22:35:58 +05:30
Kedar Sovani
7ef08ad559
Merge pull request #97 from ssnover/tlv-parse-cert-extensions
Handle non-consecutive tag numbers in Sigma3 certificate extensions
2023-09-25 12:24:43 +05:30
Kedar Sovani
d02f0ba834
Merge pull request #99 from ivmarkov/main
#98 with signed CLA
2023-09-25 12:19:02 +05:30
ivmarkov
793870c6cc #98 with signed CLA 2023-09-25 06:17:51 +00:00
Shane Snover
7caf1febe3 Pull in the change for unsoundness in lifetime to get the build working 2023-09-24 20:04:33 -06:00
Shane Snover
c4dcfa540a Merge branch 'main' into zeroconf 2023-09-24 19:42:51 -06:00
Shane Snover
53b8c9ffd7 Bump the zeroconf version 2023-09-24 19:05:23 -06:00
Shane Snover
f8cd402153 Use the unordered parameter to the tlv derive macro 2023-09-19 22:55:34 -06:00
Shane Snover
5cda85898b Add comment explaining the difference between ICAC1_SUCCESS and ICAC2_SUCCESS 2023-09-17 17:49:15 -06:00
Shane Snover
629feea4ec Oops too much of the structure 2023-09-17 17:48:06 -06:00
Shane Snover
ed227f77cd Add ICAC cert test vector which exercises the non-consecutive extensions tag parsing 2023-09-17 17:15:42 -06:00
Shane Snover
21536dd10e Add test to verify the cert can be deserialized out of order, serialized again, and then when deserialized it matches 2023-09-17 17:07:40 -06:00
Shane Snover
5e81647291 Implement FromTLV for Extensions manually 2023-09-17 16:42:37 -06:00
Shane Snover
c064fb12a4 Update with service name 2023-09-17 11:23:44 -06:00
Kedar Sovani
8b73cbd71a
Merge pull request #95 from ssnover/fix-armfailsafe-command-arg-types
Update the types of the arguments for General Commissioning cluster's ArmFailSafe command to match spec
2023-09-15 11:52:58 +05:30
Shane Snover
c74c46d3ed Update the types of the arguments for General Commissioning cluster's ArmFailSafe command to match spec 2023-09-12 18:58:11 -06:00
Shane Snover
b93875658b Update with fork for zeroconf 2023-09-12 18:23:42 -06:00
Shane Snover
65661f13db Implement linux mdns with avahi/zerconf 2023-09-06 23:43:09 -06:00
Kedar Sovani
320d1ec989
Merge pull request #88 from ivmarkov/session-eviction
Handle out of sessions and out of exchanges
2023-08-31 11:45:12 +05:30
ssnover
c443a72b42 Add a boxed dyn error to make error info easier to access on hosted systems 2023-08-29 15:04:58 -06:00
Kedar Sovani
188fe1b5af
Merge pull request #92 from KorvinSzanto/patch-1
Fix typo in target for esp32c6
2023-08-28 07:19:51 +05:30
Korvin Szanto
addd38c4a6
Fix typo in target for esp32c6 2023-08-27 12:36:02 -07:00
Kedar Sovani
3f48e2d9c9
Merge pull request #91 from ivmarkov/qr
no_std QR code rendering
2023-08-24 17:39:01 +05:30
ivmarkov
b89539c8c6 no_std QR code rendering 2023-08-20 18:56:12 +00:00
ivmarkov
e171e33510 Handle out of sessions and out of exchanges 2023-08-17 05:39:56 +00:00
Kedar Sovani
4c347c0c0b
Merge pull request #87 from ssnover/fix-documentation-page-example
Fix the example section of the cargo generated documentation
2023-08-16 11:54:31 +05:30
ssnover
817138b355 Use ignore for now until this example is fixed 2023-08-15 13:41:13 -06:00
ssnover
d49288d437 Put the example code in code tags 2023-08-15 13:28:26 -06:00
Kedar Sovani
bd166e4597
Merge pull request #85 from kedars/bugfix/ios_fixes_part2
Multiple fixes for working with iOS
2023-08-12 19:11:26 +05:30
Kedar Sovani
e305ec1d89 Bump-up crate version 2023-08-09 15:24:33 +05:30
Kedar Sovani
0319ece0ab Bump up the stack utilisation 2023-08-09 13:35:23 +05:30
Kedar Sovani
46ef8ef596 ACL: For Writes, perform all ACL checks before any *write* begins 2023-08-09 12:15:00 +05:30
Kedar Sovani
18979feeca ACL: Targets in ACL entries are NULLable 2023-08-09 12:15:00 +05:30
Kedar Sovani
4bb0831168 BasicInfo: Add ProductName/VendorName/NodeLabel 2023-08-09 12:14:55 +05:30
Kedar Sovani
ce3bf6b6fb
Merge pull request #84 from ivmarkov/main
Fix CI by addressing the Clippy warnings
2023-08-04 06:43:55 +05:30
ivmarkov
7a601b191e Clippy 2023-08-03 14:30:25 +00:00
Kedar Sovani
227bb77ba1
Merge pull request #81 from kedars/bugfix/multiple_ios_fixes
Multiple fixes for iOS support
2023-08-01 17:38:33 +05:30
Kedar Sovani
21f0bb4e3a Fix tests for data model updates 2023-08-01 17:33:19 +05:30
Kedar Sovani
faf3c60946 Placeholder clusters that show in 'server-list' to make iOS happy 2023-08-01 14:01:42 +05:30
Kedar Sovani
96ceaa4263 Weird: Disable #[allow(dead_code)], as it was causing incorrect behaviour
For some reason, this caused the 'start' attribute to be ignored in the
tlvargs. Need to investigate further.
2023-08-01 14:01:42 +05:30
Kedar Sovani
eceef8b207 CASE: Support ICAC Optional in AddNocReq and AddTrusted RCA in CASE 2023-08-01 14:01:42 +05:30
Kedar Sovani
dfd2f7e56e ASN1: Handle special case for Not-After == 0 2023-08-01 14:01:42 +05:30
Kedar Sovani
6bec796bad GenComm: BasicCommInfo Attribute should return the entire structure 2023-08-01 14:01:42 +05:30
Kedar Sovani
e02b316030
Merge pull request #80 from kedars/bugfix/ios_support
Support Attributes of Nw Commissioning Cluster
2023-08-01 14:01:05 +05:30
Kedar Sovani
50f18dbbee
Merge pull request #82 from ivmarkov/main
Do not hold on to RefCell borrows across await points
2023-08-01 14:00:49 +05:30
ivmarkov
f53f3b789d Do not hold on to RefCell borrows across await points 2023-08-01 06:49:42 +00:00
Kedar Sovani
7f8ea83403 NwCommissioning: Include mandatory Attributes of NwCommissioning Cluster 2023-07-29 15:14:35 +05:30
Kedar Sovani
54e64014a5 DataModel: Quality discriminants easier to align with the names in the spec 2023-07-29 14:46:14 +05:30
Kedar Sovani
ede024cf71
Merge pull request #79 from ivmarkov/adjustments
Adjust the authors header
2023-07-28 11:59:32 +05:30
ivmarkov
29127e7e07 Adjust the authors header 2023-07-28 06:22:13 +00:00
Kedar Sovani
062f669369
Merge pull request #78 from cheat-sc/fix-a-typo
mdns: builtin: Fix a typo
2023-07-25 09:10:49 +05:30
Kedar Sovani
13c2504d4c
Merge pull request #77 from thekuwayama/fix__quality
modify: Quality bits into separated flags
2023-07-25 09:10:02 +05:30
Kedar Sovani
1fccb18464
Merge pull request #75 from kedars/bugfix/osx_support
Fix broken build on OS-X
2023-07-25 09:09:48 +05:30
Kedar Sovani
2b00a886a7
Merge pull request #76 from ivmarkov/main
cargo fmt and clippy; build and test most features; publish actions
2023-07-25 09:09:34 +05:30
Shohei Maruyama
ae72d1cd31 mdns: builtin: Fix a typo
This commit fixes just a typo.

Signed-off-by: Shohei Maruyama <cheat.sc.linux@outlook.com>
2023-07-25 03:53:10 +09:00
Kedar Sovani
ded50dd780 Fix broken build on OS-X 2023-07-24 11:54:16 +05:30
thekuwayama
b3224d2b40 add SN Quality 2023-07-23 22:41:53 +09:00
thekuwayama
5a25904a07 modify: Quality bits into separated flags 2023-07-23 18:49:28 +09:00
ivmarkov
d2d5571755 Fix badges 2023-07-23 07:25:38 +00:00
ivmarkov
50b2433fb5 Address review feedback 2023-07-23 07:13:54 +00:00
ivmarkov
92b24920ce cargo fmt and clippy; build and test most features; publish actions 2023-07-22 16:29:50 +00:00
Kedar Sovani
b73c65d8b6
Merge pull request #74 from ivmarkov/main
Rename matter(-iot) to rs-matter
2023-07-22 18:08:56 +05:30
ivmarkov
bafedb022b Rename matter(-iot) to rs-matter; matter_macro_derive to rs-matter-macros; tlv_tool to just tlv 2023-07-22 10:31:29 +00:00
Kedar Sovani
6bbac0b6e9
Merge pull request #72 from ivmarkov/sequential-embassy-net
no_std + async support
2023-07-22 15:11:29 +05:30
ivmarkov
91e13292da Remove the note referring to the no_std and sequential branches 2023-07-22 07:00:53 +00:00
ivmarkov
916f2148f8 Simplify API by combining Matter, Transport and TransportRunner; simplify Mdns and Psm runners 2023-07-22 05:50:02 +00:00
ivmarkov
71b9a578d0 Remove embassy-net features that matter-rs is not using 2023-07-21 12:15:12 +00:00
ivmarkov
263279e714 Make multicast ipv6 optional 2023-07-21 12:15:12 +00:00
ivmarkov
aa2d5dfe20 Compatibility with embassy-net fixed multicast support 2023-07-21 12:15:12 +00:00
ivmarkov
24cdf079a6 New helper APIs in Transport 2023-07-21 12:15:12 +00:00
ivmarkov
0d73ba74ee UDP stack based on embassy-net 2023-07-21 12:15:12 +00:00
ivmarkov
0eecce5f8d UDP stack based on embassy-net 2023-07-21 12:15:12 +00:00
Kedar Sovani
762438ca8e on_off_light: Save ACLs and Fabrics to PSM 2023-07-21 12:15:12 +00:00
ivmarkov
9576fd8d9a Fix #60 2023-07-21 12:15:12 +00:00
ivmarkov
7f9ccbc38d Sequential Exchange API 2023-07-21 12:15:12 +00:00
ivmarkov
44e01a5881 Configurable parts_list in descriptor 2023-07-21 12:15:12 +00:00
ivmarkov
831853630b Add from/to TLV for i16, i32 and i64 2023-07-21 12:15:12 +00:00
ivmarkov
879f816438 More comments for tailoring the example for no_std 2023-07-21 12:15:12 +00:00
ivmarkov
5b9fd502c7 Fix the no_std build 2023-07-21 12:15:12 +00:00
ivmarkov
62aa69202f Workaround broken join_multicast_v4 on ESP-IDF 2023-07-21 12:15:12 +00:00
ivmarkov
e8babedd87 Support for ESP-IDF build 2023-07-21 12:15:12 +00:00
ivmarkov
488ef5b9f0 Proper mDNS responder 2023-07-21 12:15:12 +00:00
ivmarkov
b882aad1ff Clippy 2023-07-21 12:15:12 +00:00
ivmarkov
c0d1b85d9d Default mDns impl 2023-07-21 12:15:11 +00:00
ivmarkov
de3d3de004 Make Matter covariant over its lifetime 2023-07-21 12:15:11 +00:00
ivmarkov
1b879f1a5b Simplify main user-facing API 2023-07-21 12:15:11 +00:00
ivmarkov
8e9d8887da Fix a bug in mDNS 2023-07-21 12:15:11 +00:00
ivmarkov
b94484b67e Make sure nix is not brought in no-std compiles 2023-07-21 12:15:11 +00:00
ivmarkov
2cde37899d Make the example working again 2023-07-21 12:15:11 +00:00
ivmarkov
443324a764 More inlines 2023-07-21 12:15:11 +00:00
ivmarkov
931e30601e Clippy 2023-07-21 12:15:11 +00:00
ivmarkov
357eb73c6f Control memory by removing implicit copy 2023-07-21 12:15:11 +00:00
ivmarkov
1e6cd69de8 built-in mDNS; memory optimizations 2023-07-21 12:15:11 +00:00
ivmarkov
bd61c95c7d no_std needs default features switched off for several crates 2023-07-21 12:15:11 +00:00
ivmarkov
870ae6f21c Move MATTER_PORT outside of STD-only udp module 2023-07-21 12:15:11 +00:00
ivmarkov
592d1ee028 Just use time-rs in no_std mode 2023-07-21 12:15:11 +00:00
ivmarkov
a4b8b53014 Builds for STD with ESP IDF 2023-07-21 12:15:10 +00:00
ivmarkov
9d59c79674 Colorizing is now no_std compatible 2023-07-21 12:12:55 +00:00
ivmarkov
e741cab89d More crypto fixes 2023-07-21 12:12:55 +00:00
ivmarkov
695869f13a Fix compilation errors in crypto 2023-07-21 12:12:55 +00:00
ivmarkov
06b0fcd6f5 Fix no_std errors 2023-07-21 12:12:55 +00:00
ivmarkov
89014ed7f2 Remove heapless::String from QR API 2023-07-21 12:12:55 +00:00
imarkov
974ac4d1d8 Optional feature to capture stacktrace on error 2023-07-21 12:12:55 +00:00
ivmarkov
3dccc0d710 Persistence - trace info 2023-07-21 12:12:55 +00:00
ivmarkov
934ecb9165 Persistence bugfixing 2023-07-21 12:12:55 +00:00
ivmarkov
86e01a0a1b Simple persistance via TLV 2023-07-21 12:12:55 +00:00
ivmarkov
4b39884f6f Bugfix: unnecessary struct container 2023-07-21 12:12:55 +00:00
ivmarkov
e8e847cea6 Bugfix: subscription_id was not sent 2023-07-21 12:12:55 +00:00
ivmarkov
076ba06e07 Bugfix: missing descriptor cluster 2023-07-21 12:12:55 +00:00
ivmarkov
635be2c35a Error log on arm failure 2023-07-21 12:12:55 +00:00
ivmarkov
2a57ecbd87 Bugfix: only report devtype for the queried endpoint 2023-07-21 12:12:55 +00:00
ivmarkov
09a523fc50 TX packets are reused; need way to reset them 2023-07-21 12:12:55 +00:00
ivmarkov
2fc4e6ddcf Root cert buffer too short 2023-07-21 12:12:55 +00:00
ivmarkov
9964466138 MRP standalone ack messages should not be acknowledged 2023-07-21 12:12:55 +00:00
ivmarkov
f804c21c0b Bugfix: fabric adding wrongly started at index 0 2023-07-21 12:12:55 +00:00
ivmarkov
f9536be1e3 Bugfix: two separate failsafe instances were used 2023-07-21 12:12:55 +00:00
ivmarkov
b2805570ea Restore transaction completion code 2023-07-21 12:12:55 +00:00
ivmarkov
9a23a2af2d Bugfix: arm failsafe was reporting wrong status 2023-07-21 12:12:55 +00:00
ivmarkov
7ef7e93eb4 Heap-allocated packets not necessary; no_std and no-alloc build supported end-to-end 2023-07-21 12:12:55 +00:00
ivmarkov
8b3bb9527c Comm with chip-tool 2023-07-21 12:12:54 +00:00
ivmarkov
36011c2e3c Actually add the bonjour feature 2023-07-21 12:12:54 +00:00
ivmarkov
eb3c9cdfb1 Cleanup a bit the mDns story 2023-07-21 12:12:54 +00:00
ivmarkov
2ea31432d5 On-off example now buildable 2023-07-21 12:12:54 +00:00
ivmarkov
faf5af3e1f no_std printing of QR code (kind of...) 2023-07-21 12:12:54 +00:00
ivmarkov
d558c73f8d Cleanup the dependencies as much as possible 2023-07-21 12:12:53 +00:00
ivmarkov
d934912007 Fix compilation error since the introduction of UtcCalendar 2023-07-21 12:09:56 +00:00
ivmarkov
688d7ea8d5 More ergonomic api when STD is available 2023-07-21 12:09:56 +00:00
ivmarkov
505fa39e82 Create new secure channel sessions without async-channel 2023-07-21 12:09:56 +00:00
ivmarkov
d9c99d73ee Chrono dep made optional 2023-07-21 12:09:56 +00:00
ivmarkov
bd87ac4ab3 Linux & MacOS mDNS services now implement the Mdns trait 2023-07-21 12:09:56 +00:00
ivmarkov
b4b549bb10 Fix several no_std incompatibilities 2023-07-21 12:09:56 +00:00
ivmarkov
bcbac965cd Remove allocations from Cert handling 2023-07-21 12:09:56 +00:00
ivmarkov
89aab6f444 Remove allocations from Base38 and QR calc 2023-07-21 12:09:56 +00:00
ivmarkov
fcc87bfaf4 Long reads and subscriptions reintroduced 2023-07-21 12:09:56 +00:00
ivmarkov
817d55aecc Start reintroducing long reads and subscriptions from mainline 2023-07-21 12:09:56 +00:00
ivmarkov
40f353c92e Support for no_std
Support for no_std

Further no_std compat
2023-07-21 12:09:56 +00:00
Kedar Sovani
4b85887f33
Merge pull request #70 from simlay/refactor-ci-to-use-matrix-for-crypto-backends
Refactored CIs to use a matrix for cryptography backend feature flags.
2023-07-20 17:27:11 +05:30
Sebastian Imlay
15497a611a Renamed Test-Linux-OpenSSL to Test-Linux 2023-07-16 02:22:14 -04:00
Sebastian Imlay
a8dce54478 Refactored CIs to use a matrix for cryptographic backend featuer flags. 2023-07-16 02:16:03 -04:00
190 changed files with 17054 additions and 11722 deletions

View file

@ -1,26 +0,0 @@
name: Build-TLV-Tool
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build
run: cd tools/tlv_tool; cargo build --verbose
- name: Archive artifacts
uses: actions/upload-artifact@v2
with:
name: tlv_tool
path: tools/tlv_tool/target/debug/tlv_tool

39
.github/workflows/ci-tlv-tool.yml vendored Normal file
View file

@ -0,0 +1,39 @@
name: CITLVTool
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
schedule:
- cron: "20 7 * * *"
workflow_dispatch:
env:
CARGO_TERM_COLOR: always
jobs:
build_tlv_tool:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Fmt
run: cargo fmt -- --check
working-directory: tools/tlv
- name: Clippy
run: cargo clippy --no-deps -- -Dwarnings
working-directory: tools/tlv
- name: Build
run: cargo build
working-directory: tools/tlv
- name: Archive artifacts
uses: actions/upload-artifact@v2
with:
name: tlv
path: tools/tlv/target/debug/tlv

48
.github/workflows/ci.yml vendored Normal file
View file

@ -0,0 +1,48 @@
name: CI
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
schedule:
- cron: "50 6 * * *"
workflow_dispatch:
env:
RUST_TOOLCHAIN: nightly
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CARGO_TERM_COLOR: always
jobs:
build_and_test:
runs-on: ubuntu-latest
strategy:
matrix:
crypto-backend: ['rustcrypto', 'mbedtls', 'openssl']
features: ['', 'alloc', 'os']
toolchain: ['stable', 'nightly']
steps:
- name: Rust
if: matrix.toolchain == 'nightly'
uses: dtolnay/rust-toolchain@v1
with:
toolchain: ${{ env.RUST_TOOLCHAIN }}
components: rustfmt, clippy, rust-src
- name: Checkout
uses: actions/checkout@v3
- name: Fmt
run: cargo +${{ matrix.toolchain == 'nightly' && env.RUST_TOOLCHAIN || 'stable'}} fmt -- --check
- name: Clippy
run: cargo +${{ matrix.toolchain == 'nightly' && env.RUST_TOOLCHAIN || 'stable'}} clippy --no-deps --no-default-features --features ${{matrix.crypto-backend}},${{matrix.features}},${{ matrix.toolchain == 'nightly' && 'nightly' || ''}} -- -Dwarnings
- name: Build
run: cargo +${{ matrix.toolchain == 'nightly' && env.RUST_TOOLCHAIN || 'stable'}} build --no-default-features --features ${{matrix.crypto-backend}},${{matrix.features}},${{ matrix.toolchain == 'nightly' && 'nightly' || ''}}
- name: Test
if: matrix.features == 'os'
run: cargo +${{ matrix.toolchain == 'nightly' && env.RUST_TOOLCHAIN || 'stable'}} test --no-default-features --features ${{matrix.crypto-backend}},${{matrix.features}},${{ matrix.toolchain == 'nightly' && 'nightly' || ''}} -- --test-threads=1

17
.github/workflows/publish-dry-run.yml vendored Normal file
View file

@ -0,0 +1,17 @@
name: PublishDryRun
on: workflow_dispatch
env:
CRATE_NAME: rs-matter
jobs:
publish_dry_run:
name: PublishDryRun
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: PublishDryRun
run: cargo publish -p rs-matter --dry-run

View file

@ -0,0 +1,14 @@
name: PublishMacrosDryRun
on: workflow_dispatch
jobs:
publish_macros_dry_run:
name: PublishMacrosDryRun
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: PublishDryRun-Macros
run: cargo publish -p rs-matter-macros --dry-run

17
.github/workflows/publish-macros.yml vendored Normal file
View file

@ -0,0 +1,17 @@
name: PublishMacros
on: workflow_dispatch
jobs:
publish_macros:
name: PublishMacros
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Login
run: cargo login ${{ secrets.CRATES_IO_TOKEN }}
- name: Publish-Macros
run: cargo publish -p rs-matter-macros

32
.github/workflows/publish.yml vendored Normal file
View file

@ -0,0 +1,32 @@
name: Publish
on: workflow_dispatch
env:
CRATE_NAME: rs-matter
jobs:
publish:
name: Publish
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Login
run: cargo login ${{ secrets.CRATES_IO_TOKEN }}
- name: Publish
run: cargo publish -p rs-matter
- name: Get the crate version from cargo
run: |
version=$(cd rs-matter; cargo metadata --format-version=1 --no-deps | jq -r ".packages[] | select(.name == \"${{env.CRATE_NAME}}\") | .version")
echo "crate_version=$version" >> $GITHUB_ENV
echo "${{env.CRATE_NAME}} version: $version"
- name: Tag the new release
uses: rickstaa/action-create-tag@v1
with:
tag: v${{env.crate_version}}
message: "Release v${{env.crate_version}}"

View file

@ -1,22 +0,0 @@
name: Test-Linux-mbedTLS
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
build_and_test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build
run: cd matter; cargo build --verbose --no-default-features --features crypto_mbedtls
- name: Run tests
run: cd matter; cargo test --verbose --no-default-features --features crypto_mbedtls -- --test-threads=1

View file

@ -1,22 +0,0 @@
name: Test-Linux-OpenSSL
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
build_and_test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build
run: cd matter; cargo build --verbose --no-default-features --features crypto_openssl
- name: Run tests
run: cd matter; cargo test --verbose --no-default-features --features crypto_openssl -- --test-threads=1

View file

@ -1,22 +0,0 @@
name: Test-Linux-RustCrypto
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]
env:
CARGO_TERM_COLOR: always
jobs:
build_and_test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Build
run: cd matter; cargo build --verbose --no-default-features --features crypto_rustcrypto
- name: Run tests
run: cd matter; cargo test --verbose --no-default-features --features crypto_rustcrypto -- --test-threads=1

1
.gitignore vendored
View file

@ -1,3 +1,4 @@
target
Cargo.lock
.vscode
.embuild

View file

@ -1,4 +1,17 @@
[workspace]
members = ["matter", "matter_macro_derive", "boxslab", "tools/tlv_tool"]
resolver = "2"
members = ["rs-matter", "rs-matter-macros"]
exclude = ["examples/*"]
exclude = ["examples/*", "tools/tlv"]
# For compatibility with ESP IDF
[patch.crates-io]
polling = { git = "https://github.com/esp-rs-compat/polling" }
socket2 = { git = "https://github.com/esp-rs-compat/socket2" }
[profile.release]
opt-level = 3
[profile.dev]
debug = true
opt-level = 3

View file

@ -1,59 +1,50 @@
# matter-rs: The Rust Implementation of Matter
# rs-matter: The Rust Implementation of Matter
![experimental](https://img.shields.io/badge/status-Experimental-red) [![license](https://img.shields.io/badge/license-Apache2-green.svg)](https://raw.githubusercontent.com/project-chip/matter-rs/main/LICENSE)
[![Test Linux (OpenSSL)](https://github.com/project-chip/matter-rs/actions/workflows/test-linux-openssl.yml/badge.svg)](https://github.com/project-chip/matter-rs/actions/workflows/test-linux-openssl.yml)
[![Test Linux (mbedTLS)](https://github.com/project-chip/matter-rs/actions/workflows/test-linux-mbedtls.yml/badge.svg)](https://github.com/project-chip/matter-rs/actions/workflows/test-linux-mbedtls.yml)
## Important Note
All development work is now ongoing in two other branches ([no_std](https://github.com/project-chip/matter-rs/tree/no_std) and [sequential](https://github.com/project-chip/matter-rs/tree/sequential) - explained below). The plan is one of these two branches to become the new `main`.
We highly encourage users to try out both of these branches (there is a working `onoff_light` example in both) and provide feedback.
### [no_std](https://github.com/project-chip/matter-rs/tree/no_std)
The purpose of this branch - as the name suggests - is to introduce `no_std` compatibility to the `matter-rs` library, so that it is possible to target constrained environments like MCUs which more often than not have no support for the Rust Standard library (threads, network sockets, filesystem and so on).
We have been successful in this endeavour. The library now only requires Rust `core` and runs on e.g. ESP32 baremental Rust targets.
When `matter-rs` is used on targets that do not support the Rust Standard Library, user is expected to provide the following:
- A `rand` function that can fill a `&[u8]` slice with random data
- An `epoch` function (a "current time" utility); note that since this utility is only used for measuring timeouts, it is OK to provide a function that e.g. measures elapsed millis since system boot, rather than something that tries to adhere to the UNIX epoch (1/1/1970)
- An MCU-specific UDP stack that the user would need to connect to the `matter-rs` library
Besides just having `no_std` compatibility, the `no_std` branch does not need an allocator. I.e. all structures internal to the `matter-rs` librarty are statically allocated.
Last but not least, the `no_std` branch by itself does **not** do any IO. In other words, it is "compute only" (as in, "give me a network packet and I'll produce one or more that you have to send; how you receive/send those is up to you"). Ditto for persisting fabrics and ACLs - it is up to the user to listen the matter stack for changes to those and persist.
### [sequential](https://github.com/project-chip/matter-rs/tree/sequential)
The `sequential` branch builds on top of the work implemented in the `no_std` branch by utilizing code implemented as `async` functions and methods. Committing to `async` has multiple benefits:
- (Internal for the library) We were able to turn several explicit state machines into implicit ones (after all, `async` is primarily about generating state machines automatically based on "sequential" user codee that uses the async/await language constructs - hence the name of the branch)
- (External, for the user) The ergonomics of the Exchange API in this branch (in other words, the "transport aspect of the Matter CSA spec) is much better, approaching that of dealing with regular TCP/IP sockets in the Rust Standard Library. This is only possible by utilizing async functions and methods, because - let's not forget - `matter-rs` needs to run on MCUs where native threading and task scheduling capabilities might not even exist, hence "sequentially-looking" request/response interaction can only be expressed asynchronously, or with explicit state machines.
- Certain pending concepts are much easier to implement via async functions and methods:
- Re-sending packets which were not acknowledged by the receiver yet (the MRP protocol as per the Matter spec)
- The "initiator" side of an exchange (think client clusters)
- This branch provides facilities to implement asynchronous read, write and invoke handling for server clusters, which is beneficial in certain scenarios (i.e. brdige devices)
The `async` metaphor however comes with a bit higher memory usage, due to not enough optimizations being implemented yet in the rust language when the async code is transpiled to state machines.
![experimental](https://img.shields.io/badge/status-Experimental-red)
[![license](https://img.shields.io/badge/license-Apache2-green.svg)](https://raw.githubusercontent.com/project-chip/matter-rs/main/LICENSE)
[![CI](https://github.com/project-chip/matter-rs/actions/workflows/ci.yml/badge.svg)](https://github.com/project-chip/matter-rs/actions/workflows/ci.yml)
[![CI - TLV](https://github.com/project-chip/matter-rs/actions/workflows/ci-tlv-tool.yml/badge.svg)](https://github.com/project-chip/matter-rs/actions/workflows/ci-tlv-tool.yml)
[![crates.io](https://img.shields.io/crates/v/rs-matter.svg)](https://crates.io/crates/rs-matter)
[![Matrix](https://img.shields.io/matrix/matter-rs:matrix.org?label=join%20matrix&color=BEC5C9&logo=matrix)](https://matrix.to/#/#matter-rs:matrix.org)
## Build
Building the library:
### Building the library
```
$ cargo build
```
Building the example:
### Building and running the example (Linux, MacOS X)
```
$ RUST_LOG="matter" cargo run --example onoff_light
$ cargo run --example onoff_light
```
With the chip-tool (the current tool for testing Matter) use the Ethernet commissioning mechanism:
### Building the example (Espressif's ESP-IDF)
* Install all build prerequisites described [here](https://github.com/esp-rs/esp-idf-template#prerequisites)
* Build with the following command line:
```
export MCU=esp32; export CARGO_TARGET_XTENSA_ESP32_ESPIDF_LINKER=ldproxy; export RUSTFLAGS="-C default-linker-libraries"; export WIFI_SSID=ssid;export WIFI_PASS=pass; cargo build --example onoff_light --no-default-features --features esp-idf --target xtensa-esp32-espidf -Zbuild-std=std,panic_abort
```
* If you are building for a different Espressif MCU, change the `MCU` variable, the `xtensa-esp32-espidf` target and the name of the `CARGO_TARGET_<esp-idf-target-uppercase>_LINKER` variable to match your MCU and its Rust target. Available Espressif MCUs and targets are:
* esp32 / xtensa-esp32-espidf
* esp32s2 / xtensa-esp32s2-espidf
* esp32s3 / xtensa-esp32s3-espidf
* esp32c3 / riscv32imc-esp-espidf
* esp32c5 / riscv32imc-esp-espidf
* esp32c6 / riscv32imac-esp-espidf
* Put in `WIFI_SSID` / `WIFI_PASS` the SSID & password for your wireless router
* Flash using the `espflash` utility described in the build prerequsites' link above
### Building the example (ESP32-XX baremetal or RP2040)
Coming soon!
## Test
With the `chip-tool` (the current tool for testing Matter) use the Ethernet commissioning mechanism:
```
$ chip-tool pairing code 12344321 <Pairing-Code>

View file

@ -1,9 +0,0 @@
[package]
name = "boxslab"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
bitmaps={version="3.2.0", features=[]}

View file

@ -1,237 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::{
mem::MaybeUninit,
ops::{Deref, DerefMut},
sync::Mutex,
};
// TODO: why is max bitmap size 64 a correct max size? Could we match
// boxslabs instead or store used/not used inside the box slabs themselves?
const MAX_BITMAP_SIZE: usize = 64;
pub struct Bitmap {
inner: bitmaps::Bitmap<MAX_BITMAP_SIZE>,
max_size: usize,
}
impl Bitmap {
pub fn new(max_size: usize) -> Self {
assert!(max_size <= MAX_BITMAP_SIZE);
Bitmap {
inner: bitmaps::Bitmap::new(),
max_size,
}
}
pub fn set(&mut self, index: usize) -> bool {
assert!(index < self.max_size);
self.inner.set(index, true)
}
pub fn reset(&mut self, index: usize) -> bool {
assert!(index < self.max_size);
self.inner.set(index, false)
}
pub fn first_false_index(&self) -> Option<usize> {
match self.inner.first_false_index() {
Some(idx) if idx < self.max_size => Some(idx),
_ => None,
}
}
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
pub fn is_full(&self) -> bool {
self.first_false_index().is_none()
}
}
#[macro_export]
macro_rules! box_slab {
($name:ident,$t:ty,$v:expr) => {
use std::mem::MaybeUninit;
use std::sync::Once;
use $crate::{BoxSlab, Slab, SlabPool};
pub struct $name;
impl SlabPool for $name {
type SlabType = $t;
fn get_slab() -> &'static Slab<Self> {
const MAYBE_INIT: MaybeUninit<$t> = MaybeUninit::uninit();
static mut SLAB_POOL: [MaybeUninit<$t>; $v] = [MAYBE_INIT; $v];
static mut SLAB_SPACE: Option<Slab<$name>> = None;
static mut INIT: Once = Once::new();
unsafe {
INIT.call_once(|| {
SLAB_SPACE = Some(Slab::<$name>::init(&mut SLAB_POOL, $v));
});
SLAB_SPACE.as_ref().unwrap()
}
}
}
};
}
pub trait SlabPool {
type SlabType: 'static;
fn get_slab() -> &'static Slab<Self>
where
Self: Sized;
}
pub struct Inner<T: 'static + SlabPool> {
pool: &'static mut [MaybeUninit<T::SlabType>],
map: Bitmap,
}
// TODO: Instead of a mutex, we should replace this with a CAS loop
pub struct Slab<T: 'static + SlabPool>(Mutex<Inner<T>>);
impl<T: SlabPool> Slab<T> {
pub fn init(pool: &'static mut [MaybeUninit<T::SlabType>], size: usize) -> Self {
Self(Mutex::new(Inner {
pool,
map: Bitmap::new(size),
}))
}
pub fn try_new(new_object: T::SlabType) -> Option<BoxSlab<T>> {
let slab = T::get_slab();
let mut inner = slab.0.lock().unwrap();
if let Some(index) = inner.map.first_false_index() {
inner.map.set(index);
inner.pool[index].write(new_object);
let cell_ptr = unsafe { &mut *inner.pool[index].as_mut_ptr() };
Some(BoxSlab {
data: cell_ptr,
index,
})
} else {
None
}
}
pub fn free(&self, index: usize) {
let mut inner = self.0.lock().unwrap();
inner.map.reset(index);
let old_value = std::mem::replace(&mut inner.pool[index], MaybeUninit::uninit());
let _old_value = unsafe { old_value.assume_init() };
// This will drop the old_value
}
}
pub struct BoxSlab<T: 'static + SlabPool> {
// Because the data is a reference within the MaybeUninit, we don't have a mechanism
// to go out to the MaybeUninit from this reference. Hence this index
index: usize,
// TODO: We should figure out a way to get rid of the index too
data: &'static mut T::SlabType,
}
impl<T: 'static + SlabPool> Drop for BoxSlab<T> {
fn drop(&mut self) {
T::get_slab().free(self.index);
}
}
impl<T: SlabPool> Deref for BoxSlab<T> {
type Target = T::SlabType;
fn deref(&self) -> &Self::Target {
self.data
}
}
impl<T: SlabPool> DerefMut for BoxSlab<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.data
}
}
#[cfg(test)]
mod tests {
use std::{ops::Deref, sync::Arc};
pub struct Test {
val: Arc<u32>,
}
box_slab!(TestSlab, Test, 3);
#[test]
fn simple_alloc_free() {
{
let a = Slab::<TestSlab>::try_new(Test { val: Arc::new(10) }).unwrap();
assert_eq!(*a.val.deref(), 10);
let inner = TestSlab::get_slab().0.lock().unwrap();
assert!(!inner.map.is_empty());
}
// Validates that the 'Drop' got executed
let inner = TestSlab::get_slab().0.lock().unwrap();
assert!(inner.map.is_empty());
println!("Box Size {}", std::mem::size_of::<Box<Test>>());
println!("BoxSlab Size {}", std::mem::size_of::<BoxSlab<TestSlab>>());
}
#[test]
fn alloc_full_block() {
{
let a = Slab::<TestSlab>::try_new(Test { val: Arc::new(10) }).unwrap();
let b = Slab::<TestSlab>::try_new(Test { val: Arc::new(11) }).unwrap();
let c = Slab::<TestSlab>::try_new(Test { val: Arc::new(12) }).unwrap();
// Test that at overflow, we return None
assert!(Slab::<TestSlab>::try_new(Test { val: Arc::new(13) }).is_none(),);
assert_eq!(*b.val.deref(), 11);
{
let inner = TestSlab::get_slab().0.lock().unwrap();
// Test that the bitmap is marked as full
assert!(inner.map.is_full());
}
// Purposefully drop, to test that new allocation is possible
std::mem::drop(b);
let d = Slab::<TestSlab>::try_new(Test { val: Arc::new(21) }).unwrap();
assert_eq!(*d.val.deref(), 21);
// Ensure older allocations are still valid
assert_eq!(*a.val.deref(), 10);
assert_eq!(*c.val.deref(), 12);
}
// Validates that the 'Drop' got executed - test that the bitmap is empty
let inner = TestSlab::get_slab().0.lock().unwrap();
assert!(inner.map.is_empty());
}
#[test]
fn test_drop_logic() {
let root = Arc::new(10);
{
let _a = Slab::<TestSlab>::try_new(Test { val: root.clone() }).unwrap();
let _b = Slab::<TestSlab>::try_new(Test { val: root.clone() }).unwrap();
let _c = Slab::<TestSlab>::try_new(Test { val: root.clone() }).unwrap();
assert_eq!(Arc::strong_count(&root), 4);
}
// Test that Drop was correctly called on all the members of the pool
assert_eq!(Arc::strong_count(&root), 1);
}
}

View file

@ -15,8 +15,8 @@
* limitations under the License.
*/
use matter::data_model::sdm::dev_att::{DataType, DevAttDataFetcher};
use matter::error::Error;
use rs_matter::data_model::sdm::dev_att::{DataType, DevAttDataFetcher};
use rs_matter::error::{Error, ErrorCode};
pub struct HardCodedDevAtt {}
@ -159,7 +159,7 @@ impl DevAttDataFetcher for HardCodedDevAtt {
data.copy_from_slice(src);
Ok(src.len())
} else {
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
}
}
}

View file

@ -15,40 +15,367 @@
* limitations under the License.
*/
use core::borrow::Borrow;
use core::pin::pin;
use embassy_futures::select::select3;
use log::info;
use rs_matter::core::{CommissioningData, Matter};
use rs_matter::data_model::cluster_basic_information::BasicInfoConfig;
use rs_matter::data_model::cluster_on_off;
use rs_matter::data_model::device_types::DEV_TYPE_ON_OFF_LIGHT;
use rs_matter::data_model::objects::*;
use rs_matter::data_model::root_endpoint;
use rs_matter::data_model::system_model::descriptor;
use rs_matter::error::Error;
use rs_matter::mdns::{MdnsRunBuffers, MdnsService};
use rs_matter::secure_channel::spake2p::VerifierData;
use rs_matter::transport::core::RunBuffers;
use rs_matter::transport::network::{Ipv4Addr, Ipv6Addr, NetworkStack};
use rs_matter::utils::select::EitherUnwrap;
mod dev_att;
use matter::core::{self, CommissioningData};
use matter::data_model::cluster_basic_information::BasicInfoConfig;
use matter::data_model::device_types::device_type_add_on_off_light;
use matter::secure_channel::spake2p::VerifierData;
fn main() {
env_logger::init();
let comm_data = CommissioningData {
// TODO: Hard-coded for now
verifier: VerifierData::new_with_pw(123456),
discriminator: 250,
};
#[cfg(feature = "std")]
fn main() -> Result<(), Error> {
let thread = std::thread::Builder::new()
.stack_size(160 * 1024)
.spawn(run)
.unwrap();
// vid/pid should match those in the DAC
let dev_info = BasicInfoConfig {
thread.join().unwrap()
}
// NOTE (no_std): For no_std, name this entry point according to your MCU platform
#[cfg(not(feature = "std"))]
#[no_mangle]
fn app_main() {
run().unwrap();
}
fn run() -> Result<(), Error> {
initialize_logger();
info!(
"Matter memory: mDNS={}, Matter={}, MdnsBuffers={}, RunBuffers={}",
core::mem::size_of::<MdnsService>(),
core::mem::size_of::<Matter>(),
core::mem::size_of::<MdnsRunBuffers>(),
core::mem::size_of::<RunBuffers>(),
);
let dev_det = BasicInfoConfig {
vid: 0xFFF1,
pid: 0x8000,
hw_ver: 2,
sw_ver: 1,
sw_ver_str: "1".to_string(),
serial_no: "aabbccdd".to_string(),
device_name: "OnOff Light".to_string(),
sw_ver_str: "1",
serial_no: "aabbccdd",
device_name: "OnOff Light",
product_name: "Light123",
vendor_name: "Vendor PQR",
};
let dev_att = Box::new(dev_att::HardCodedDevAtt::new());
let mut matter = core::Matter::new(dev_info, dev_att, comm_data).unwrap();
let dm = matter.get_data_model();
let (ipv4_addr, ipv6_addr, interface) = initialize_network()?;
let dev_att = dev_att::HardCodedDevAtt::new();
#[cfg(feature = "std")]
let epoch = rs_matter::utils::epoch::sys_epoch;
#[cfg(feature = "std")]
let rand = rs_matter::utils::rand::sys_rand;
// NOTE (no_std): For no_std, provide your own function here
#[cfg(not(feature = "std"))]
let epoch = rs_matter::utils::epoch::dummy_epoch;
// NOTE (no_std): For no_std, provide your own function here
#[cfg(not(feature = "std"))]
let rand = rs_matter::utils::rand::dummy_rand;
let mdns = MdnsService::new(
0,
"rs-matter-demo",
ipv4_addr.octets(),
Some((ipv6_addr.octets(), interface)),
&dev_det,
rs_matter::MATTER_PORT,
);
info!("mDNS initialized");
let matter = Matter::new(
// vid/pid should match those in the DAC
&dev_det,
&dev_att,
&mdns,
epoch,
rand,
rs_matter::MATTER_PORT,
);
info!("Matter initialized");
#[cfg(all(feature = "std", not(target_os = "espidf")))]
let mut psm = rs_matter::persist::Psm::new(&matter, std::env::temp_dir().join("rs-matter"))?;
let handler = HandlerCompat(handler(&matter));
// When using a custom UDP stack, remove the network stack initialization below
// and call `Matter::run_piped()` instead, by utilizing the TX & RX `Pipe` structs
// to push/pull your UDP packets from/to the Matter stack.
// Ditto for `MdnsService`.
//
// When using the `embassy-net` feature (as opposed to the Rust Standard Library network stack),
// this initialization would be more complex.
let stack = NetworkStack::new();
let mut mdns_buffers = MdnsRunBuffers::new();
let mut mdns_runner = pin!(mdns.run(&stack, &mut mdns_buffers));
let mut buffers = RunBuffers::new();
let runner = matter.run(
&stack,
&mut buffers,
CommissioningData {
// TODO: Hard-coded for now
verifier: VerifierData::new_with_pw(123456, *matter.borrow()),
discriminator: 250,
},
&handler,
);
info!(
"Matter transport runner memory: {}",
core::mem::size_of_val(&runner)
);
let mut runner = pin!(runner);
#[cfg(all(feature = "std", not(target_os = "espidf")))]
let mut psm_runner = pin!(psm.run());
#[cfg(not(all(feature = "std", not(target_os = "espidf"))))]
let mut psm_runner = pin!(core::future::pending());
let runner = select3(&mut runner, &mut mdns_runner, &mut psm_runner);
#[cfg(feature = "std")]
async_io::block_on(runner).unwrap()?;
// NOTE (no_std): For no_std, replace with your own more efficient no_std executor,
// because the executor used below is a simple busy-loop poller
#[cfg(not(feature = "std"))]
embassy_futures::block_on(&mut runner).unwrap()?;
Ok(())
}
const NODE: Node<'static> = Node {
id: 0,
endpoints: &[
root_endpoint::endpoint(0),
Endpoint {
id: 1,
device_type: DEV_TYPE_ON_OFF_LIGHT,
clusters: &[descriptor::CLUSTER, cluster_on_off::CLUSTER],
},
],
};
fn handler<'a>(matter: &'a Matter<'a>) -> impl Metadata + NonBlockingHandler + 'a {
(
NODE,
root_endpoint::handler(0, matter)
.chain(
1,
descriptor::ID,
descriptor::DescriptorCluster::new(*matter.borrow()),
)
.chain(
1,
cluster_on_off::ID,
cluster_on_off::OnOffCluster::new(*matter.borrow()),
),
)
}
// NOTE (no_std): For no_std, implement here your own way of initializing the logger
#[cfg(all(not(feature = "std"), not(target_os = "espidf")))]
#[inline(never)]
fn initialize_logger() {}
// NOTE (no_std): For no_std, implement here your own way of initializing the network
#[cfg(all(not(feature = "std"), not(target_os = "espidf")))]
#[inline(never)]
fn initialize_network() -> Result<(Ipv4Addr, Ipv6Addr, u32), Error> {
Ok((Ipv4Addr::UNSPECIFIED, Ipv6Addr::UNSPECIFIED, 0))
}
#[cfg(all(feature = "std", not(target_os = "espidf")))]
#[inline(never)]
fn initialize_logger() {
env_logger::init_from_env(
env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"),
);
}
#[cfg(all(feature = "std", not(target_os = "espidf")))]
#[inline(never)]
fn initialize_network() -> Result<(Ipv4Addr, Ipv6Addr, u32), Error> {
use log::error;
use nix::{net::if_::InterfaceFlags, sys::socket::SockaddrIn6};
use rs_matter::error::ErrorCode;
let interfaces = || {
nix::ifaddrs::getifaddrs().unwrap().filter(|ia| {
ia.flags
.contains(InterfaceFlags::IFF_UP | InterfaceFlags::IFF_BROADCAST)
&& !ia
.flags
.intersects(InterfaceFlags::IFF_LOOPBACK | InterfaceFlags::IFF_POINTOPOINT)
})
};
// A quick and dirty way to get a network interface that has a link-local IPv6 address assigned as well as a non-loopback IPv4
// Most likely, this is the interface we need
// (as opposed to all the docker and libvirt interfaces that might be assigned on the machine and which seem by default to be IPv4 only)
let (iname, ip, ipv6) = interfaces()
.filter_map(|ia| {
ia.address
.and_then(|addr| addr.as_sockaddr_in6().map(SockaddrIn6::ip))
.filter(|ip| ip.octets()[..2] == [0xfe, 0x80])
.map(|ipv6| (ia.interface_name, ipv6))
})
.filter_map(|(iname, ipv6)| {
interfaces()
.filter(|ia2| ia2.interface_name == iname)
.find_map(|ia2| {
ia2.address
.and_then(|addr| addr.as_sockaddr_in().map(|addr| addr.ip().into()))
.map(|ip| (iname.clone(), ip, ipv6))
})
})
.next()
.ok_or_else(|| {
error!("Cannot find network interface suitable for mDNS broadcasting");
ErrorCode::Network
})?;
info!(
"Will use network interface {} with {}/{} for mDNS",
iname, ip, ipv6
);
Ok((ip, ipv6, 0 as _))
}
#[cfg(target_os = "espidf")]
#[inline(never)]
fn initialize_logger() {
esp_idf_svc::log::EspLogger::initialize_default();
}
#[cfg(target_os = "espidf")]
#[inline(never)]
fn initialize_network() -> Result<(Ipv4Addr, Ipv6Addr, u32), Error> {
use core::time::Duration;
use embedded_svc::wifi::{AuthMethod, ClientConfiguration, Configuration};
use esp_idf_hal::prelude::Peripherals;
use esp_idf_svc::handle::RawHandle;
use esp_idf_svc::wifi::{BlockingWifi, EspWifi};
use esp_idf_svc::{eventloop::EspSystemEventLoop, nvs::EspDefaultNvsPartition};
use esp_idf_sys::{
self as _, esp, esp_ip6_addr_t, esp_netif_create_ip6_linklocal, esp_netif_get_ip6_linklocal,
}; // If using the `binstart` feature of `esp-idf-sys`, always keep this module imported
const SSID: &'static str = env!("WIFI_SSID");
const PASSWORD: &'static str = env!("WIFI_PASS");
#[allow(clippy::needless_update)]
{
let mut node = dm.node.write().unwrap();
let endpoint = device_type_add_on_off_light(&mut node).unwrap();
println!("Added OnOff Light Device type at endpoint id: {}", endpoint);
println!("Data Model now is: {}", node);
// VFS is necessary for poll-based async IO
esp_idf_sys::esp!(unsafe {
esp_idf_sys::esp_vfs_eventfd_register(&esp_idf_sys::esp_vfs_eventfd_config_t {
max_fds: 5,
..Default::default()
})
})?;
}
matter.start_daemon().unwrap();
let peripherals = Peripherals::take().unwrap();
let sys_loop = EspSystemEventLoop::take()?;
let nvs = EspDefaultNvsPartition::take()?;
let mut wifi = EspWifi::new(peripherals.modem, sys_loop.clone(), Some(nvs))?;
let mut bwifi = BlockingWifi::wrap(&mut wifi, sys_loop)?;
let wifi_configuration: Configuration = Configuration::Client(ClientConfiguration {
ssid: SSID.into(),
bssid: None,
auth_method: AuthMethod::WPA2Personal,
password: PASSWORD.into(),
channel: None,
});
bwifi.set_configuration(&wifi_configuration)?;
bwifi.start()?;
info!("Wifi started");
bwifi.connect()?;
info!("Wifi connected");
esp!(unsafe {
esp_netif_create_ip6_linklocal(bwifi.wifi_mut().sta_netif_mut().handle() as _)
})?;
bwifi.wait_netif_up()?;
info!("Wifi netif up");
let ip_info = wifi.sta_netif().get_ip_info()?;
let mut ipv6: esp_ip6_addr_t = Default::default();
info!("Waiting for IPv6 address");
while esp!(unsafe { esp_netif_get_ip6_linklocal(wifi.sta_netif().handle() as _, &mut ipv6) })
.is_err()
{
info!("Waiting...");
std::thread::sleep(Duration::from_secs(2));
}
info!("Wifi DHCP info: {:?}, IPv6: {:?}", ip_info, ipv6.addr);
let ipv4_octets = ip_info.ip.octets();
let ipv6_octets = [
ipv6.addr[0].to_le_bytes()[0],
ipv6.addr[0].to_le_bytes()[1],
ipv6.addr[0].to_le_bytes()[2],
ipv6.addr[0].to_le_bytes()[3],
ipv6.addr[1].to_le_bytes()[0],
ipv6.addr[1].to_le_bytes()[1],
ipv6.addr[1].to_le_bytes()[2],
ipv6.addr[1].to_le_bytes()[3],
ipv6.addr[2].to_le_bytes()[0],
ipv6.addr[2].to_le_bytes()[1],
ipv6.addr[2].to_le_bytes()[2],
ipv6.addr[2].to_le_bytes()[3],
ipv6.addr[3].to_le_bytes()[0],
ipv6.addr[3].to_le_bytes()[1],
ipv6.addr[3].to_le_bytes()[2],
ipv6.addr[3].to_le_bytes()[3],
];
let interface = wifi.sta_netif().get_index();
// Not OK of course, but for a demo this is good enough
// Wifi will continue to be available and working in the background
core::mem::forget(wifi);
Ok((ipv4_octets.into(), ipv6_octets.into(), interface))
}

View file

@ -15,8 +15,8 @@
* limitations under the License.
*/
use matter::data_model::sdm::dev_att::{DataType, DevAttDataFetcher};
use matter::error::Error;
use rs_matter::data_model::sdm::dev_att::{DataType, DevAttDataFetcher};
use rs_matter::error::Error;
pub struct HardCodedDevAtt {}
@ -159,7 +159,7 @@ impl DevAttDataFetcher for HardCodedDevAtt {
data.copy_from_slice(src);
Ok(src.len())
} else {
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
}
}
}

View file

@ -15,55 +15,56 @@
* limitations under the License.
*/
mod dev_att;
use matter::core::{self, CommissioningData};
use matter::data_model::cluster_basic_information::BasicInfoConfig;
use matter::data_model::cluster_media_playback::{Commands, MediaPlaybackCluster};
use matter::data_model::device_types::DEV_TYPE_ON_SMART_SPEAKER;
use matter::secure_channel::spake2p::VerifierData;
// TODO
// mod dev_att;
// use rs_matter::core::{self, CommissioningData};
// use rs_matter::data_model::cluster_basic_information::BasicInfoConfig;
// use rs_matter::data_model::cluster_media_playback::{Commands, MediaPlaybackCluster};
// use rs_matter::data_model::device_types::DEV_TYPE_ON_SMART_SPEAKER;
// use rs_matter::secure_channel::spake2p::VerifierData;
fn main() {
env_logger::init();
let comm_data = CommissioningData {
// TODO: Hard-coded for now
verifier: VerifierData::new_with_pw(123456),
discriminator: 250,
};
// env_logger::init();
// let comm_data = CommissioningData {
// // TODO: Hard-coded for now
// verifier: VerifierData::new_with_pw(123456),
// discriminator: 250,
// };
// vid/pid should match those in the DAC
let dev_info = BasicInfoConfig {
vid: 0xFFF1,
pid: 0x8002,
hw_ver: 2,
sw_ver: 1,
sw_ver_str: "1".to_string(),
serial_no: "aabbccdd".to_string(),
device_name: "Smart Speaker".to_string(),
};
let dev_att = Box::new(dev_att::HardCodedDevAtt::new());
// // vid/pid should match those in the DAC
// let dev_info = BasicInfoConfig {
// vid: 0xFFF1,
// pid: 0x8002,
// hw_ver: 2,
// sw_ver: 1,
// sw_ver_str: "1".to_string(),
// serial_no: "aabbccdd".to_string(),
// device_name: "Smart Speaker".to_string(),
// };
// let dev_att = Box::new(dev_att::HardCodedDevAtt::new());
let mut matter = core::Matter::new(dev_info, dev_att, comm_data).unwrap();
let dm = matter.get_data_model();
{
let mut node = dm.node.write().unwrap();
// let mut matter = core::Matter::new(dev_info, dev_att, comm_data).unwrap();
// let dm = matter.get_data_model();
// {
// let mut node = dm.node.write().unwrap();
let endpoint_audio = node.add_endpoint(DEV_TYPE_ON_SMART_SPEAKER).unwrap();
let mut media_playback_cluster = MediaPlaybackCluster::new().unwrap();
// let endpoint_audio = node.add_endpoint(DEV_TYPE_ON_SMART_SPEAKER).unwrap();
// let mut media_playback_cluster = MediaPlaybackCluster::new().unwrap();
// Add some callbacks
let play_callback = Box::new(|| log::info!("Comamnd [Play] handled with callback."));
let pause_callback = Box::new(|| log::info!("Comamnd [Pause] handled with callback."));
let stop_callback = Box::new(|| log::info!("Comamnd [Stop] handled with callback."));
let start_over_callback =
Box::new(|| log::info!("Comamnd [StartOver] handled with callback."));
media_playback_cluster.add_callback(Commands::Play, play_callback);
media_playback_cluster.add_callback(Commands::Pause, pause_callback);
media_playback_cluster.add_callback(Commands::Stop, stop_callback);
media_playback_cluster.add_callback(Commands::StartOver, start_over_callback);
// // Add some callbacks
// let play_callback = Box::new(|| log::info!("Comamnd [Play] handled with callback."));
// let pause_callback = Box::new(|| log::info!("Comamnd [Pause] handled with callback."));
// let stop_callback = Box::new(|| log::info!("Comamnd [Stop] handled with callback."));
// let start_over_callback =
// Box::new(|| log::info!("Comamnd [StartOver] handled with callback."));
// media_playback_cluster.add_callback(Commands::Play, play_callback);
// media_playback_cluster.add_callback(Commands::Pause, pause_callback);
// media_playback_cluster.add_callback(Commands::Stop, stop_callback);
// media_playback_cluster.add_callback(Commands::StartOver, start_over_callback);
node.add_cluster(endpoint_audio, media_playback_cluster)
.unwrap();
println!("Added Speaker type at endpoint id: {}", endpoint_audio)
}
matter.start_daemon().unwrap();
// node.add_cluster(endpoint_audio, media_playback_cluster)
// .unwrap();
// println!("Added Speaker type at endpoint id: {}", endpoint_audio)
// }
// matter.start_daemon().unwrap();
}

View file

@ -0,0 +1,70 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use rs_matter::core::{self, CommissioningData};
use rs_matter::data_model::cluster_basic_information::BasicInfoConfig;
use rs_matter::data_model::cluster_media_playback::{Commands, MediaPlaybackCluster};
use rs_matter::data_model::device_types::DEV_TYPE_ON_SMART_SPEAKER;
use rs_matter::secure_channel::spake2p::VerifierData;
mod dev_att;
fn main() {
env_logger::init();
let comm_data = CommissioningData {
// TODO: Hard-coded for now
verifier: VerifierData::new_with_pw(123456),
discriminator: 250,
};
// vid/pid should match those in the DAC
let dev_info = BasicInfoConfig {
vid: 0xFFF1,
pid: 0x8002,
hw_ver: 2,
sw_ver: 1,
sw_ver_str: "1".to_string(),
serial_no: "aabbccdd".to_string(),
device_name: "Smart Speaker".to_string(),
};
let dev_att = Box::new(dev_att::HardCodedDevAtt::new());
let mut matter = core::Matter::new(dev_info, dev_att, comm_data).unwrap();
let dm = matter.get_data_model();
{
let mut node = dm.node.write().unwrap();
let endpoint_audio = node.add_endpoint(DEV_TYPE_ON_SMART_SPEAKER).unwrap();
let mut media_playback_cluster = MediaPlaybackCluster::new().unwrap();
// Add some callbacks
let play_callback = Box::new(|| log::info!("Comamnd [Play] handled with callback."));
let pause_callback = Box::new(|| log::info!("Comamnd [Pause] handled with callback."));
let stop_callback = Box::new(|| log::info!("Comamnd [Stop] handled with callback."));
let start_over_callback =
Box::new(|| log::info!("Comamnd [StartOver] handled with callback."));
media_playback_cluster.add_callback(Commands::Play, play_callback);
media_playback_cluster.add_callback(Commands::Pause, pause_callback);
media_playback_cluster.add_callback(Commands::Stop, stop_callback);
media_playback_cluster.add_callback(Commands::StartOver, start_over_callback);
node.add_cluster(endpoint_audio, media_playback_cluster)
.unwrap();
println!("Added Speaker type at endpoint id: {}", endpoint_audio)
}
matter.start_daemon().unwrap();
}

View file

@ -1,83 +0,0 @@
[package]
name = "matter-iot"
version = "0.1.0"
edition = "2018"
authors = ["Kedar Sovani <kedars@gmail.com>"]
description = "Native RUST implementation of the Matter (Smart-Home) ecosystem"
repository = "https://github.com/kedars/matter-rs"
readme = "README.md"
keywords = ["matter", "smart", "smart-home", "IoT", "ESP32"]
categories = ["embedded", "network-programming"]
license = "MIT"
[lib]
name = "matter"
path = "src/lib.rs"
[features]
default = ["crypto_mbedtls"]
crypto_openssl = ["openssl", "foreign-types", "hmac", "sha2"]
crypto_mbedtls = ["mbedtls"]
crypto_esp_mbedtls = ["esp-idf-sys"]
crypto_rustcrypto = ["sha2", "hmac", "pbkdf2", "hkdf", "aes", "ccm", "p256", "elliptic-curve", "crypto-bigint", "x509-cert"]
[dependencies]
boxslab = { path = "../boxslab" }
matter_macro_derive = { path = "../matter_macro_derive" }
bitflags = "1.3"
byteorder = "1.4.3"
heapless = { version = "0.7.16", features = ["x86-sync-pool"] }
generic-array = "0.14.6"
num = "0.4"
num-derive = "0.3.3"
num-traits = "0.2.15"
log = { version = "0.4.17", features = ["max_level_debug", "release_max_level_debug"] }
env_logger = { version = "0.10.0", default-features = false, features = [] }
rand = "0.8.5"
esp-idf-sys = { version = "0.32", features = ["binstart"], optional = true }
subtle = "2.4.1"
colored = "2.0.0"
smol = "1.3.0"
owning_ref = "0.4.1"
safemem = "0.3.3"
chrono = { version = "0.4.23", default-features = false, features = ["clock", "std"] }
async-channel = "1.8"
# crypto
openssl = { git = "https://github.com/sfackler/rust-openssl", optional = true }
foreign-types = { version = "0.3.2", optional = true }
mbedtls = { version = "0.9", optional = true }
sha2 = { version = "0.10", default-features = false, optional = true }
hmac = { version = "0.12", optional = true }
pbkdf2 = { version = "0.12", optional = true }
hkdf = { version = "0.12", optional = true }
aes = { version = "0.8", optional = true }
ccm = { version = "0.5", default-features = false, features = ["alloc"], optional = true }
p256 = { version = "0.13.0", default-features = false, features = ["arithmetic", "ecdh", "ecdsa"], optional = true }
elliptic-curve = { version = "0.13.2", optional = true }
crypto-bigint = { version = "0.4", default-features = false, optional = true }
# Note: requires std
x509-cert = { version = "0.2.0", default-features = false, features = ["pem", "std"], optional = true }
# to compute the check digit
verhoeff = "1"
# print QR code
qrcode = { version = "0.12", default-features = false }
[target.'cfg(target_os = "macos")'.dependencies]
astro-dnssd = "0.3"
# MDNS support
[target.'cfg(target_os = "linux")'.dependencies]
lazy_static = "1.4.0"
libmdns = { version = "0.7.4" }
[[example]]
name = "onoff_light"
path = "../examples/onoff_light/src/main.rs"
[[example]]
name = "speaker"
path = "../examples/speaker/src/main.rs"

View file

@ -1,225 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Base38 encoding and decoding functions.
use crate::error::Error;
const BASE38_CHARS: [char; 38] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',
'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '-', '.',
];
const UNUSED: u8 = 255;
// map of base38 charater to numeric value
// subtract 45 from the character, then index into this array, if possible
const DECODE_BASE38: [u8; 46] = [
36, // '-', =45
37, // '.', =46
UNUSED, // '/', =47
0, // '0', =48
1, // '1', =49
2, // '2', =50
3, // '3', =51
4, // '4', =52
5, // '5', =53
6, // '6', =54
7, // '7', =55
8, // '8', =56
9, // '9', =57
UNUSED, // ':', =58
UNUSED, // ';', =59
UNUSED, // '<', =50
UNUSED, // '=', =61
UNUSED, // '>', =62
UNUSED, // '?', =63
UNUSED, // '@', =64
10, // 'A', =65
11, // 'B', =66
12, // 'C', =67
13, // 'D', =68
14, // 'E', =69
15, // 'F', =70
16, // 'G', =71
17, // 'H', =72
18, // 'I', =73
19, // 'J', =74
20, // 'K', =75
21, // 'L', =76
22, // 'M', =77
23, // 'N', =78
24, // 'O', =79
25, // 'P', =80
26, // 'Q', =81
27, // 'R', =82
28, // 'S', =83
29, // 'T', =84
30, // 'U', =85
31, // 'V', =86
32, // 'W', =87
33, // 'X', =88
34, // 'Y', =89
35, // 'Z', =90
];
const BASE38_CHARACTERS_NEEDED_IN_NBYTES_CHUNK: [u8; 3] = [2, 4, 5];
const RADIX: u32 = BASE38_CHARS.len() as u32;
/// Encode a byte array into a base38 string.
///
/// # Arguments
/// * `bytes` - byte array to encode
/// * `length` - optional length of the byte array to encode. If not specified, the entire byte array is encoded.
pub fn encode(bytes: &[u8], length: Option<usize>) -> String {
let mut offset = 0;
let mut result = String::new();
// if length is specified, use it, otherwise use the length of the byte array
// if length is specified but is greater than the length of the byte array, use the length of the byte array
let b_len = bytes.len();
let length = length.map(|l| l.min(b_len)).unwrap_or(b_len);
while offset < length {
let remaining = length - offset;
match remaining.cmp(&2) {
std::cmp::Ordering::Greater => {
result.push_str(&encode_base38(
((bytes[offset + 2] as u32) << 16)
| ((bytes[offset + 1] as u32) << 8)
| (bytes[offset] as u32),
5,
));
offset += 3;
}
std::cmp::Ordering::Equal => {
result.push_str(&encode_base38(
((bytes[offset + 1] as u32) << 8) | (bytes[offset] as u32),
4,
));
break;
}
std::cmp::Ordering::Less => {
result.push_str(&encode_base38(bytes[offset] as u32, 2));
break;
}
}
}
result
}
fn encode_base38(mut value: u32, char_count: u8) -> String {
let mut result = String::new();
for _ in 0..char_count {
let remainder = value % 38;
result.push(BASE38_CHARS[remainder as usize]);
value = (value - remainder) / 38;
}
result
}
/// Decode a base38-encoded string into a byte slice
///
/// # Arguments
/// * `base38_str` - base38-encoded string to decode
///
/// Fails if the string contains invalid characters
pub fn decode(base38_str: &str) -> Result<Vec<u8>, Error> {
let mut result = Vec::new();
let mut base38_characters_number: usize = base38_str.len();
let mut decoded_base38_characters: usize = 0;
while base38_characters_number > 0 {
let base38_characters_in_chunk: usize;
let bytes_in_decoded_chunk: usize;
if base38_characters_number >= BASE38_CHARACTERS_NEEDED_IN_NBYTES_CHUNK[2] as usize {
base38_characters_in_chunk = BASE38_CHARACTERS_NEEDED_IN_NBYTES_CHUNK[2] as usize;
bytes_in_decoded_chunk = 3;
} else if base38_characters_number == BASE38_CHARACTERS_NEEDED_IN_NBYTES_CHUNK[1] as usize {
base38_characters_in_chunk = BASE38_CHARACTERS_NEEDED_IN_NBYTES_CHUNK[1] as usize;
bytes_in_decoded_chunk = 2;
} else if base38_characters_number == BASE38_CHARACTERS_NEEDED_IN_NBYTES_CHUNK[0] as usize {
base38_characters_in_chunk = BASE38_CHARACTERS_NEEDED_IN_NBYTES_CHUNK[0] as usize;
bytes_in_decoded_chunk = 1;
} else {
return Err(Error::InvalidData);
}
let mut value = 0u32;
for i in (1..=base38_characters_in_chunk).rev() {
let mut base38_chars = base38_str.chars();
let v = decode_char(base38_chars.nth(decoded_base38_characters + i - 1).unwrap())?;
value = value * RADIX + v as u32;
}
decoded_base38_characters += base38_characters_in_chunk;
base38_characters_number -= base38_characters_in_chunk;
for _i in 0..bytes_in_decoded_chunk {
result.push(value as u8);
value >>= 8;
}
if value > 0 {
// encoded value is too big to represent a correct chunk of size 1, 2 or 3 bytes
return Err(Error::InvalidArgument);
}
}
Ok(result)
}
fn decode_char(c: char) -> Result<u8, Error> {
let c = c as u8;
if !(45..=90).contains(&c) {
return Err(Error::InvalidData);
}
let c = DECODE_BASE38[c as usize - 45];
if c == UNUSED {
return Err(Error::InvalidData);
}
Ok(c)
}
#[cfg(test)]
mod tests {
use super::*;
const ENCODED: &str = "-MOA57ZU02IT2L2BJ00";
const DECODED: [u8; 11] = [
0x88, 0xff, 0xa7, 0x91, 0x50, 0x40, 0x00, 0x47, 0x51, 0xdd, 0x02,
];
#[test]
fn can_base38_encode() {
assert_eq!(encode(&DECODED, None), ENCODED);
assert_eq!(encode(&DECODED, Some(11)), ENCODED);
// length is greater than the length of the byte array
assert_eq!(encode(&DECODED, Some(12)), ENCODED);
}
#[test]
fn can_base38_decode() {
assert_eq!(decode(ENCODED).expect("can not decode base38"), DECODED);
}
}

View file

@ -1,110 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
acl::AclMgr,
data_model::{
cluster_basic_information::BasicInfoConfig, core::DataModel,
sdm::dev_att::DevAttDataFetcher,
},
error::*,
fabric::FabricMgr,
interaction_model::InteractionModel,
mdns::Mdns,
pairing::{print_pairing_code_and_qr, DiscoveryCapabilities},
secure_channel::{core::SecureChannel, pake::PaseMgr, spake2p::VerifierData},
transport,
};
use std::sync::Arc;
/// Device Commissioning Data
pub struct CommissioningData {
/// The data like password or verifier that is required to authenticate
pub verifier: VerifierData,
/// The 12-bit discriminator used to differentiate between multiple devices
pub discriminator: u16,
}
/// The primary Matter Object
pub struct Matter {
transport_mgr: transport::mgr::Mgr,
data_model: DataModel,
fabric_mgr: Arc<FabricMgr>,
}
impl Matter {
/// Creates a new Matter object
///
/// # Parameters
/// * dev_att: An object that implements the trait [DevAttDataFetcher]. Any Matter device
/// requires a set of device attestation certificates and keys. It is the responsibility of
/// this object to return the device attestation details when queried upon.
pub fn new(
dev_det: BasicInfoConfig,
dev_att: Box<dyn DevAttDataFetcher>,
dev_comm: CommissioningData,
) -> Result<Box<Matter>, Error> {
let mdns = Mdns::get()?;
mdns.set_values(dev_det.vid, dev_det.pid, &dev_det.device_name);
let fabric_mgr = Arc::new(FabricMgr::new()?);
let open_comm_window = fabric_mgr.is_empty();
if open_comm_window {
print_pairing_code_and_qr(&dev_det, &dev_comm, DiscoveryCapabilities::default());
}
let acl_mgr = Arc::new(AclMgr::new()?);
let mut pase = PaseMgr::new();
let data_model =
DataModel::new(dev_det, dev_att, fabric_mgr.clone(), acl_mgr, pase.clone())?;
let mut matter = Box::new(Matter {
transport_mgr: transport::mgr::Mgr::new()?,
data_model,
fabric_mgr,
});
let interaction_model =
Box::new(InteractionModel::new(Box::new(matter.data_model.clone())));
matter.transport_mgr.register_protocol(interaction_model)?;
if open_comm_window {
pase.enable_pase_session(dev_comm.verifier, dev_comm.discriminator)?;
}
let secure_channel = Box::new(SecureChannel::new(pase, matter.fabric_mgr.clone()));
matter.transport_mgr.register_protocol(secure_channel)?;
Ok(matter)
}
/// Returns an Arc to [DataModel]
///
/// The Data Model is where you express what is the type of your device. Typically
/// once you gets this reference, you acquire the write lock and add your device
/// types, clusters, attributes, commands to the data model.
pub fn get_data_model(&self) -> DataModel {
self.data_model.clone()
}
/// Starts the Matter daemon
///
/// This call does NOT return
///
/// This call starts the Matter daemon that starts communication with other Matter
/// devices on the network.
pub fn start_daemon(&mut self) -> Result<(), Error> {
self.transport_mgr.start()
}
}

View file

@ -1,57 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use log::error;
use crate::error::Error;
use super::CryptoKeyPair;
pub struct KeyPairDummy {}
impl KeyPairDummy {
pub fn new() -> Result<Self, Error> {
Ok(Self {})
}
}
impl CryptoKeyPair for KeyPairDummy {
fn get_csr<'a>(&self, _out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
error!("This API should never get called");
Err(Error::Invalid)
}
fn get_public_key(&self, _pub_key: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
}
fn get_private_key(&self, _pub_key: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
}
fn derive_secret(self, _peer_pub_key: &[u8], _secret: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
}
fn sign_msg(&self, _msg: &[u8], _signature: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
}
fn verify_msg(&self, _msg: &[u8], _signature: &[u8]) -> Result<(), Error> {
error!("This API should never get called");
Err(Error::Invalid)
}
}

View file

@ -1,114 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use super::objects::*;
use crate::error::*;
use num_derive::FromPrimitive;
pub const ID: u32 = 0x0028;
#[derive(FromPrimitive)]
pub enum Attributes {
DMRevision = 0,
VendorId = 2,
ProductId = 4,
HwVer = 7,
SwVer = 9,
SwVerString = 0xa,
SerialNo = 0x0f,
}
#[derive(Default)]
pub struct BasicInfoConfig {
pub vid: u16,
pub pid: u16,
pub hw_ver: u16,
pub sw_ver: u32,
pub sw_ver_str: String,
pub serial_no: String,
/// Device name; up to 32 characters
pub device_name: String,
}
pub struct BasicInfoCluster {
base: Cluster,
}
impl BasicInfoCluster {
pub fn new(cfg: BasicInfoConfig) -> Result<Box<Self>, Error> {
let mut cluster = Box::new(BasicInfoCluster {
base: Cluster::new(ID)?,
});
let attrs = [
Attribute::new(
Attributes::DMRevision as u16,
AttrValue::Uint8(1),
Access::RV,
Quality::FIXED,
),
Attribute::new(
Attributes::VendorId as u16,
AttrValue::Uint16(cfg.vid),
Access::RV,
Quality::FIXED,
),
Attribute::new(
Attributes::ProductId as u16,
AttrValue::Uint16(cfg.pid),
Access::RV,
Quality::FIXED,
),
Attribute::new(
Attributes::HwVer as u16,
AttrValue::Uint16(cfg.hw_ver),
Access::RV,
Quality::FIXED,
),
Attribute::new(
Attributes::SwVer as u16,
AttrValue::Uint32(cfg.sw_ver),
Access::RV,
Quality::FIXED,
),
Attribute::new(
Attributes::SwVerString as u16,
AttrValue::Utf8(cfg.sw_ver_str),
Access::RV,
Quality::FIXED,
),
Attribute::new(
Attributes::SerialNo as u16,
AttrValue::Utf8(cfg.serial_no),
Access::RV,
Quality::FIXED,
),
];
cluster.base.add_attributes(&attrs[..])?;
Ok(cluster)
}
}
impl ClusterType for BasicInfoCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
}

View file

@ -1,128 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use super::objects::*;
use crate::{
cmd_enter,
error::*,
interaction_model::{command::CommandReq, core::IMStatusCode},
};
use log::info;
use num_derive::FromPrimitive;
pub const ID: u32 = 0x0006;
pub enum Attributes {
OnOff = 0x0,
}
#[derive(FromPrimitive)]
pub enum Commands {
Off = 0x0,
On = 0x01,
Toggle = 0x02,
}
fn attr_on_off_new() -> Attribute {
// OnOff, Value: false
Attribute::new(
Attributes::OnOff as u16,
AttrValue::Bool(false),
Access::RV,
Quality::PERSISTENT,
)
}
pub struct OnOffCluster {
base: Cluster,
}
impl OnOffCluster {
pub fn new() -> Result<Box<Self>, Error> {
let mut cluster = Box::new(OnOffCluster {
base: Cluster::new(ID)?,
});
cluster.base.add_attribute(attr_on_off_new())?;
Ok(cluster)
}
}
impl ClusterType for OnOffCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
fn handle_command(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
let cmd = cmd_req
.cmd
.path
.leaf
.map(num::FromPrimitive::from_u32)
.ok_or(IMStatusCode::UnsupportedCommand)?
.ok_or(IMStatusCode::UnsupportedCommand)?;
match cmd {
Commands::Off => {
cmd_enter!("Off");
let value = self
.base
.read_attribute_raw(Attributes::OnOff as u16)
.unwrap();
if AttrValue::Bool(true) == *value {
self.base
.write_attribute_raw(Attributes::OnOff as u16, AttrValue::Bool(false))
.map_err(|_| IMStatusCode::Failure)?;
}
cmd_req.trans.complete();
Err(IMStatusCode::Success)
}
Commands::On => {
cmd_enter!("On");
let value = self
.base
.read_attribute_raw(Attributes::OnOff as u16)
.unwrap();
if AttrValue::Bool(false) == *value {
self.base
.write_attribute_raw(Attributes::OnOff as u16, AttrValue::Bool(true))
.map_err(|_| IMStatusCode::Failure)?;
}
cmd_req.trans.complete();
Err(IMStatusCode::Success)
}
Commands::Toggle => {
cmd_enter!("Toggle");
let value = match self
.base
.read_attribute_raw(Attributes::OnOff as u16)
.unwrap()
{
&AttrValue::Bool(v) => v,
_ => false,
};
self.base
.write_attribute_raw(Attributes::OnOff as u16, AttrValue::Bool(!value))
.map_err(|_| IMStatusCode::Failure)?;
cmd_req.trans.complete();
Err(IMStatusCode::Success)
}
}
}
}

View file

@ -1,44 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
data_model::objects::{Cluster, ClusterType},
error::Error,
};
const CLUSTER_NETWORK_COMMISSIONING_ID: u32 = 0x0031;
pub struct TemplateCluster {
base: Cluster,
}
impl ClusterType for TemplateCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
}
impl TemplateCluster {
pub fn new() -> Result<Box<Self>, Error> {
Ok(Box::new(Self {
base: Cluster::new(CLUSTER_NETWORK_COMMISSIONING_ID)?,
}))
}
}

View file

@ -1,394 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use self::subscribe::SubsCtx;
use super::{
cluster_basic_information::BasicInfoConfig,
device_types::device_type_add_root_node,
objects::{self, *},
sdm::dev_att::DevAttDataFetcher,
system_model::descriptor::DescriptorCluster,
};
use crate::{
acl::{AccessReq, Accessor, AccessorSubjects, AclMgr, AuthMode},
error::*,
fabric::FabricMgr,
interaction_model::{
command::CommandReq,
core::{IMStatusCode, OpCode},
messages::{
ib::{self, AttrData, DataVersionFilter},
msg::{self, InvReq, ReadReq, WriteReq},
GenericPath,
},
InteractionConsumer, Transaction,
},
secure_channel::pake::PaseMgr,
tlv::{self, FromTLV, TLVArray, TLVWriter, TagType, ToTLV},
transport::{
proto_demux::ResponseRequired,
session::{Session, SessionMode},
},
};
use log::{error, info};
use std::sync::{Arc, RwLock};
#[derive(Clone)]
pub struct DataModel {
pub node: Arc<RwLock<Box<Node>>>,
acl_mgr: Arc<AclMgr>,
}
impl DataModel {
pub fn new(
dev_details: BasicInfoConfig,
dev_att: Box<dyn DevAttDataFetcher>,
fabric_mgr: Arc<FabricMgr>,
acl_mgr: Arc<AclMgr>,
pase_mgr: PaseMgr,
) -> Result<Self, Error> {
let dm = DataModel {
node: Arc::new(RwLock::new(Node::new()?)),
acl_mgr: acl_mgr.clone(),
};
{
let mut node = dm.node.write()?;
node.set_changes_cb(Box::new(dm.clone()));
device_type_add_root_node(
&mut node,
dev_details,
dev_att,
fabric_mgr,
acl_mgr,
pase_mgr,
)?;
}
Ok(dm)
}
// Encode a write attribute from a path that may or may not be wildcard
fn handle_write_attr_path(
node: &mut Node,
accessor: &Accessor,
attr_data: &AttrData,
tw: &mut TLVWriter,
) {
let gen_path = attr_data.path.to_gp();
let mut encoder = AttrWriteEncoder::new(tw, TagType::Anonymous);
encoder.set_path(gen_path);
// The unsupported pieces of the wildcard path
if attr_data.path.cluster.is_none() {
encoder.encode_status(IMStatusCode::UnsupportedCluster, 0);
return;
}
if attr_data.path.attr.is_none() {
encoder.encode_status(IMStatusCode::UnsupportedAttribute, 0);
return;
}
// Get the data
let write_data = match &attr_data.data {
EncodeValue::Closure(_) | EncodeValue::Value(_) => {
error!("Not supported");
return;
}
EncodeValue::Tlv(t) => t,
};
if gen_path.is_wildcard() {
// This is a wildcard path, skip error
// This is required because there could be access control errors too that need
// to be taken care of.
encoder.skip_error();
}
let mut attr = AttrDetails {
// will be udpated in the loop below
attr_id: 0,
list_index: attr_data.path.list_index,
fab_filter: false,
fab_idx: accessor.fab_idx,
};
let result = node.for_each_cluster_mut(&gen_path, |path, c| {
if attr_data.data_ver.is_some() && Some(c.base().get_dataver()) != attr_data.data_ver {
encoder.encode_status(IMStatusCode::DataVersionMismatch, 0);
return Ok(());
}
attr.attr_id = path.leaf.unwrap_or_default() as u16;
encoder.set_path(*path);
let mut access_req = AccessReq::new(accessor, path, Access::WRITE);
let r = match Cluster::write_attribute(c, &mut access_req, write_data, &attr) {
Ok(_) => IMStatusCode::Success,
Err(e) => e,
};
encoder.encode_status(r, 0);
Ok(())
});
if let Err(e) = result {
// We hit this only if this is a non-wildcard path and some parts of the path are missing
encoder.encode_status(e, 0);
}
}
// Handle command from a path that may or may not be wildcard
fn handle_command_path(node: &mut Node, cmd_req: &mut CommandReq) {
let wildcard = cmd_req.cmd.path.is_wildcard();
let path = cmd_req.cmd.path;
let result = node.for_each_cluster_mut(&path, |path, c| {
cmd_req.cmd.path = *path;
let result = c.handle_command(cmd_req);
if let Err(e) = result {
// It is likely that we might have to do an 'Access' aware traversal
// if there are other conditions in the wildcard scenario that shouldn't be
// encoded as CmdStatus
if !(wildcard && e == IMStatusCode::UnsupportedCommand) {
let invoke_resp = ib::InvResp::status_new(cmd_req.cmd, e, 0);
let _ = invoke_resp.to_tlv(cmd_req.resp, TagType::Anonymous);
}
}
Ok(())
});
if !wildcard {
if let Err(e) = result {
// We hit this only if this is a non-wildcard path
let invoke_resp = ib::InvResp::status_new(cmd_req.cmd, e, 0);
let _ = invoke_resp.to_tlv(cmd_req.resp, TagType::Anonymous);
}
}
}
fn sess_to_accessor(&self, sess: &Session) -> Accessor {
match sess.get_session_mode() {
SessionMode::Case(c) => {
let mut subject =
AccessorSubjects::new(sess.get_peer_node_id().unwrap_or_default());
for i in c.cat_ids {
if i != 0 {
let _ = subject.add_catid(i);
}
}
Accessor::new(c.fab_idx, subject, AuthMode::Case, self.acl_mgr.clone())
}
SessionMode::Pase => Accessor::new(
0,
AccessorSubjects::new(1),
AuthMode::Pase,
self.acl_mgr.clone(),
),
SessionMode::PlainText => Accessor::new(
0,
AccessorSubjects::new(1),
AuthMode::Invalid,
self.acl_mgr.clone(),
),
}
}
/// Returns true if the path matches the cluster path and the data version is a match
fn data_filter_matches(
filters: &Option<&TLVArray<DataVersionFilter>>,
path: &GenericPath,
data_ver: u32,
) -> bool {
if let Some(filters) = *filters {
for filter in filters.iter() {
// TODO: No handling of 'node' comparision yet
if Some(filter.path.endpoint) == path.endpoint
&& Some(filter.path.cluster) == path.cluster
&& filter.data_ver == data_ver
{
return true;
}
}
}
false
}
}
pub mod read;
pub mod subscribe;
/// Type of Resume Request
enum ResumeReq {
Subscribe(subscribe::SubsCtx),
Read(read::ResumeReadReq),
}
impl objects::ChangeConsumer for DataModel {
fn endpoint_added(&self, id: EndptId, endpoint: &mut Endpoint) -> Result<(), Error> {
endpoint.add_cluster(DescriptorCluster::new(id, self.clone())?)?;
Ok(())
}
}
impl InteractionConsumer for DataModel {
fn consume_write_attr(
&self,
write_req: &WriteReq,
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(), Error> {
let accessor = self.sess_to_accessor(trans.session);
tw.start_array(TagType::Context(msg::WriteRespTag::WriteResponses as u8))?;
let mut node = self.node.write().unwrap();
for attr_data in write_req.write_requests.iter() {
DataModel::handle_write_attr_path(&mut node, &accessor, &attr_data, tw);
}
tw.end_container()?;
Ok(())
}
fn consume_read_attr(
&self,
rx_buf: &[u8],
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(), Error> {
let mut resume_from = None;
let root = tlv::get_root_node(rx_buf)?;
let req = ReadReq::from_tlv(&root)?;
self.handle_read_req(&req, trans, tw, &mut resume_from)?;
if resume_from.is_some() {
// This is a multi-hop read transaction, remember this read request
let resume = read::ResumeReadReq::new(rx_buf, &resume_from)?;
if !trans.exch.is_data_none() {
error!("Exchange data already set, and multi-hop read");
return Err(Error::InvalidState);
}
trans.exch.set_data_boxed(Box::new(ResumeReq::Read(resume)));
}
Ok(())
}
fn consume_invoke_cmd(
&self,
inv_req_msg: &InvReq,
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(), Error> {
let mut node = self.node.write().unwrap();
if let Some(inv_requests) = &inv_req_msg.inv_requests {
// Array of InvokeResponse IBs
tw.start_array(TagType::Context(msg::InvRespTag::InvokeResponses as u8))?;
for i in inv_requests.iter() {
let data = if let Some(data) = i.data.unwrap_tlv() {
data
} else {
continue;
};
info!("Invoke Commmand Handler executing: {:?}", i.path);
let mut cmd_req = CommandReq {
cmd: i.path,
data,
trans,
resp: tw,
};
DataModel::handle_command_path(&mut node, &mut cmd_req);
}
tw.end_container()?;
}
Ok(())
}
fn consume_status_report(
&self,
req: &msg::StatusResp,
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(OpCode, ResponseRequired), Error> {
if let Some(mut resume) = trans.exch.take_data_boxed::<ResumeReq>() {
let result = match *resume {
ResumeReq::Read(ref mut read) => self.handle_resume_read(read, trans, tw)?,
ResumeReq::Subscribe(ref mut ctx) => ctx.handle_status_report(trans, tw, self)?,
};
trans.exch.set_data_boxed(resume);
Ok(result)
} else {
// Nothing to do for now
trans.complete();
info!("Received status report with status {:?}", req.status);
Ok((OpCode::Reserved, ResponseRequired::No))
}
}
fn consume_subscribe(
&self,
rx_buf: &[u8],
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(OpCode, ResponseRequired), Error> {
if !trans.exch.is_data_none() {
error!("Exchange data already set!");
return Err(Error::InvalidState);
}
let ctx = SubsCtx::new(rx_buf, trans, tw, self)?;
trans
.exch
.set_data_boxed(Box::new(ResumeReq::Subscribe(ctx)));
Ok((OpCode::ReportData, ResponseRequired::Yes))
}
}
/// Encoder for generating a response to a write request
pub struct AttrWriteEncoder<'a, 'b, 'c> {
tw: &'a mut TLVWriter<'b, 'c>,
tag: TagType,
path: GenericPath,
skip_error: bool,
}
impl<'a, 'b, 'c> AttrWriteEncoder<'a, 'b, 'c> {
pub fn new(tw: &'a mut TLVWriter<'b, 'c>, tag: TagType) -> Self {
Self {
tw,
tag,
path: Default::default(),
skip_error: false,
}
}
pub fn skip_error(&mut self) {
self.skip_error = true;
}
pub fn set_path(&mut self, path: GenericPath) {
self.path = path;
}
}
impl<'a, 'b, 'c> Encoder for AttrWriteEncoder<'a, 'b, 'c> {
fn encode(&mut self, _value: EncodeValue) {
// Only status encodes for AttrWriteResponse
}
fn encode_status(&mut self, status: IMStatusCode, cluster_status: u16) {
if self.skip_error && status != IMStatusCode::Success {
// Don't encode errors
return;
}
let resp = ib::AttrStatus::new(&self.path, status, cluster_status);
let _ = resp.to_tlv(self.tw, self.tag);
}
}

View file

@ -1,319 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
acl::{AccessReq, Accessor},
data_model::{core::DataModel, objects::*},
error::*,
interaction_model::{
core::{IMStatusCode, OpCode},
messages::{
ib::{self, DataVersionFilter},
msg::{self, ReadReq, ReportDataTag::MoreChunkedMsgs, ReportDataTag::SupressResponse},
GenericPath,
},
Transaction,
},
tlv::{self, FromTLV, TLVArray, TLVWriter, TagType, ToTLV},
transport::{packet::Packet, proto_demux::ResponseRequired},
utils::writebuf::WriteBuf,
wb_shrink, wb_unshrink,
};
use log::error;
/// Encoder for generating a response to a read request
pub struct AttrReadEncoder<'a, 'b, 'c> {
tw: &'a mut TLVWriter<'b, 'c>,
data_ver: u32,
path: GenericPath,
skip_error: bool,
data_ver_filters: Option<&'a TLVArray<'a, DataVersionFilter>>,
is_buffer_full: bool,
}
impl<'a, 'b, 'c> AttrReadEncoder<'a, 'b, 'c> {
pub fn new(tw: &'a mut TLVWriter<'b, 'c>) -> Self {
Self {
tw,
data_ver: 0,
skip_error: false,
path: Default::default(),
data_ver_filters: None,
is_buffer_full: false,
}
}
pub fn skip_error(&mut self, skip: bool) {
self.skip_error = skip;
}
pub fn set_data_ver(&mut self, data_ver: u32) {
self.data_ver = data_ver;
}
pub fn set_data_ver_filters(&mut self, filters: &'a TLVArray<'a, DataVersionFilter>) {
self.data_ver_filters = Some(filters);
}
pub fn set_path(&mut self, path: GenericPath) {
self.path = path;
}
pub fn is_buffer_full(&self) -> bool {
self.is_buffer_full
}
}
impl<'a, 'b, 'c> Encoder for AttrReadEncoder<'a, 'b, 'c> {
fn encode(&mut self, value: EncodeValue) {
let resp = ib::AttrResp::Data(ib::AttrData::new(
Some(self.data_ver),
ib::AttrPath::new(&self.path),
value,
));
let anchor = self.tw.get_tail();
if resp.to_tlv(self.tw, TagType::Anonymous).is_err() {
self.is_buffer_full = true;
self.tw.rewind_to(anchor);
}
}
fn encode_status(&mut self, status: IMStatusCode, cluster_status: u16) {
if !self.skip_error {
let resp =
ib::AttrResp::Status(ib::AttrStatus::new(&self.path, status, cluster_status));
let _ = resp.to_tlv(self.tw, TagType::Anonymous);
}
}
}
/// State to maintain when a Read Request needs to be resumed
/// resumed - the next chunk of the read needs to be returned
#[derive(Default)]
pub struct ResumeReadReq {
/// The Read Request Attribute Path that caused chunking, and this is the path
/// that needs to be resumed.
pub pending_req: Option<Packet<'static>>,
/// The Attribute that couldn't be encoded because our buffer got full. The next chunk
/// will start encoding from this attribute onwards.
/// Note that given wildcard reads, one PendingPath in the member above can generated
/// multiple encode paths. Hence this has to be maintained separately.
pub resume_from: Option<GenericPath>,
}
impl ResumeReadReq {
pub fn new(rx_buf: &[u8], resume_from: &Option<GenericPath>) -> Result<Self, Error> {
let mut packet = Packet::new_rx()?;
let dst = packet.as_borrow_slice();
let src_len = rx_buf.len();
dst[..src_len].copy_from_slice(rx_buf);
packet.get_parsebuf()?.set_len(src_len);
Ok(ResumeReadReq {
pending_req: Some(packet),
resume_from: *resume_from,
})
}
}
impl DataModel {
pub fn read_attribute_raw(
&self,
endpoint: EndptId,
cluster: ClusterId,
attr: AttrId,
) -> Result<AttrValue, IMStatusCode> {
let node = self.node.read().unwrap();
let cluster = node.get_cluster(endpoint, cluster)?;
cluster.base().read_attribute_raw(attr).map(|a| a.clone())
}
/// Encode a read attribute from a path that may or may not be wildcard
///
/// If the buffer gets full while generating the read response, we will return
/// an Err(path), where the path is the path that we should resume from, for the next chunk.
/// This facilitates chunk management
fn handle_read_attr_path(
node: &Node,
accessor: &Accessor,
attr_encoder: &mut AttrReadEncoder,
attr_details: &mut AttrDetails,
resume_from: &mut Option<GenericPath>,
) -> Result<(), Error> {
let mut status = Ok(());
let path = attr_encoder.path;
// Skip error reporting for wildcard paths, don't for concrete paths
attr_encoder.skip_error(path.is_wildcard());
let result = node.for_each_attribute(&path, |path, c| {
// Ignore processing if data filter matches.
// For a wildcard attribute, this may end happening unnecessarily for all attributes, although
// a single skip for the cluster is sufficient. That requires us to replace this for_each with a
// for_each_cluster
let cluster_data_ver = c.base().get_dataver();
if Self::data_filter_matches(&attr_encoder.data_ver_filters, path, cluster_data_ver) {
return Ok(());
}
// The resume_from indicates that this is the next chunk of a previous Read Request. In such cases, we
// need to skip until we hit this path.
if let Some(r) = resume_from {
// If resume_from is valid, and we haven't hit the resume_from yet, skip encoding
if r != path {
return Ok(());
} else {
// Else, wipe out the resume_from so subsequent paths can be encoded
*resume_from = None;
}
}
attr_details.attr_id = path.leaf.unwrap_or_default() as u16;
// Overwrite the previous path with the concrete path
attr_encoder.set_path(*path);
// Set the cluster's data version
attr_encoder.set_data_ver(cluster_data_ver);
let mut access_req = AccessReq::new(accessor, path, Access::READ);
Cluster::read_attribute(c, &mut access_req, attr_encoder, attr_details);
if attr_encoder.is_buffer_full() {
// Buffer is full, next time resume from this attribute
*resume_from = Some(*path);
status = Err(Error::NoSpace);
}
Ok(())
});
if let Err(e) = result {
// We hit this only if this is a non-wildcard path
attr_encoder.encode_status(e, 0);
}
status
}
/// Process an array of Attribute Read Requests
///
/// When the API returns the chunked read is on, if *resume_from is Some(x) otherwise
/// the read is complete
pub(super) fn handle_read_attr_array(
&self,
read_req: &ReadReq,
trans: &mut Transaction,
old_tw: &mut TLVWriter,
resume_from: &mut Option<GenericPath>,
) -> Result<(), Error> {
let old_wb = old_tw.get_buf();
// Note, this function may be called from multiple places: a) an actual read
// request, a b) resumed read request, c) subscribe request or d) resumed subscribe
// request. Hopefully 18 is sufficient to address all those scenarios.
//
// This is the amount of space we reserve for other things to be attached towards
// the end
const RESERVE_SIZE: usize = 24;
let mut new_wb = wb_shrink!(old_wb, RESERVE_SIZE);
let mut tw = TLVWriter::new(&mut new_wb);
let mut attr_encoder = AttrReadEncoder::new(&mut tw);
if let Some(filters) = &read_req.dataver_filters {
attr_encoder.set_data_ver_filters(filters);
}
if let Some(attr_requests) = &read_req.attr_requests {
let accessor = self.sess_to_accessor(trans.session);
let mut attr_details = AttrDetails::new(accessor.fab_idx, read_req.fabric_filtered);
let node = self.node.read().unwrap();
attr_encoder
.tw
.start_array(TagType::Context(msg::ReportDataTag::AttributeReports as u8))?;
let mut result = Ok(());
for attr_path in attr_requests.iter() {
attr_encoder.set_path(attr_path.to_gp());
// Extract the attr_path fields into various structures
attr_details.list_index = attr_path.list_index;
result = DataModel::handle_read_attr_path(
&node,
&accessor,
&mut attr_encoder,
&mut attr_details,
resume_from,
);
if result.is_err() {
break;
}
}
// Now that all the read reports are captured, let's use the old_tw that is
// the full writebuf, and hopefully as all the necessary space to store this
wb_unshrink!(old_wb, new_wb);
old_tw.end_container()?; // Finish the AttrReports
if result.is_err() {
// If there was an error, indicate chunking. The resume_read_req would have been
// already populated in the loop above.
old_tw.bool(TagType::Context(MoreChunkedMsgs as u8), true)?;
} else {
// A None resume_from indicates no chunking
*resume_from = None;
}
}
Ok(())
}
/// Handle a read request
///
/// This could be called from an actual read request or a resumed read request. Subscription
/// requests do not come to this function.
/// When the API returns the chunked read is on, if *resume_from is Some(x) otherwise
/// the read is complete
pub fn handle_read_req(
&self,
read_req: &ReadReq,
trans: &mut Transaction,
tw: &mut TLVWriter,
resume_from: &mut Option<GenericPath>,
) -> Result<(OpCode, ResponseRequired), Error> {
tw.start_struct(TagType::Anonymous)?;
self.handle_read_attr_array(read_req, trans, tw, resume_from)?;
if resume_from.is_none() {
tw.bool(TagType::Context(SupressResponse as u8), true)?;
// Mark transaction complete, if not chunked
trans.complete();
}
tw.end_container()?;
Ok((OpCode::ReportData, ResponseRequired::Yes))
}
/// Handle a resumed read request
pub fn handle_resume_read(
&self,
resume_read_req: &mut ResumeReadReq,
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(OpCode, ResponseRequired), Error> {
if let Some(packet) = resume_read_req.pending_req.as_mut() {
let rx_buf = packet.get_parsebuf()?.as_borrow_slice();
let root = tlv::get_root_node(rx_buf)?;
let req = ReadReq::from_tlv(&root)?;
self.handle_read_req(&req, trans, tw, &mut resume_read_req.resume_from)
} else {
// No pending req, is that even possible?
error!("This shouldn't have happened");
Ok((OpCode::Reserved, ResponseRequired::No))
}
}
}

View file

@ -1,142 +0,0 @@
/*
*
* Copyright (c) 2023 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::atomic::{AtomicU32, Ordering};
use crate::{
error::Error,
interaction_model::{
core::OpCode,
messages::{
msg::{self, SubscribeReq, SubscribeResp},
GenericPath,
},
},
tlv::{self, get_root_node_struct, FromTLV, TLVWriter, TagType, ToTLV},
transport::proto_demux::ResponseRequired,
};
use super::{read::ResumeReadReq, DataModel, Transaction};
static SUBS_ID: AtomicU32 = AtomicU32::new(1);
#[derive(PartialEq)]
enum SubsState {
Confirming,
Confirmed,
}
pub struct SubsCtx {
state: SubsState,
id: u32,
resume_read_req: Option<ResumeReadReq>,
}
impl SubsCtx {
pub fn new(
rx_buf: &[u8],
trans: &mut Transaction,
tw: &mut TLVWriter,
dm: &DataModel,
) -> Result<Self, Error> {
let root = get_root_node_struct(rx_buf)?;
let req = SubscribeReq::from_tlv(&root)?;
let mut ctx = SubsCtx {
state: SubsState::Confirming,
// TODO
id: SUBS_ID.fetch_add(1, Ordering::SeqCst),
resume_read_req: None,
};
let mut resume_from = None;
ctx.do_read(&req, trans, tw, dm, &mut resume_from)?;
if resume_from.is_some() {
// This is a multi-hop read transaction, remember this read request
ctx.resume_read_req = Some(ResumeReadReq::new(rx_buf, &resume_from)?);
}
Ok(ctx)
}
pub fn handle_status_report(
&mut self,
trans: &mut Transaction,
tw: &mut TLVWriter,
dm: &DataModel,
) -> Result<(OpCode, ResponseRequired), Error> {
if self.state != SubsState::Confirming {
// Not relevant for us
trans.complete();
return Err(Error::Invalid);
}
// Is there a previous resume read pending
if self.resume_read_req.is_some() {
let mut resume_read_req = self.resume_read_req.take().unwrap();
if let Some(packet) = resume_read_req.pending_req.as_mut() {
let rx_buf = packet.get_parsebuf()?.as_borrow_slice();
let root = tlv::get_root_node(rx_buf)?;
let req = SubscribeReq::from_tlv(&root)?;
self.do_read(&req, trans, tw, dm, &mut resume_read_req.resume_from)?;
if resume_read_req.resume_from.is_some() {
// More chunks are pending, setup resume_read_req again
self.resume_read_req = Some(resume_read_req);
}
return Ok((OpCode::ReportData, ResponseRequired::Yes));
}
}
// We are here implies that the read is now complete
self.confirm_subscription(trans, tw)
}
fn confirm_subscription(
&mut self,
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(OpCode, ResponseRequired), Error> {
self.state = SubsState::Confirmed;
// TODO
let resp = SubscribeResp::new(self.id, 40);
resp.to_tlv(tw, TagType::Anonymous)?;
trans.complete();
Ok((OpCode::SubscriptResponse, ResponseRequired::Yes))
}
fn do_read(
&mut self,
req: &SubscribeReq,
trans: &mut Transaction,
tw: &mut TLVWriter,
dm: &DataModel,
resume_from: &mut Option<GenericPath>,
) -> Result<(), Error> {
let read_req = req.to_read_req();
tw.start_struct(TagType::Anonymous)?;
tw.u32(
TagType::Context(msg::ReportDataTag::SubscriptionId as u8),
self.id,
)?;
dm.handle_read_attr_array(&read_req, trans, tw, resume_from)?;
tw.end_container()?;
Ok(())
}
}

View file

@ -1,85 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use super::cluster_basic_information::BasicInfoCluster;
use super::cluster_basic_information::BasicInfoConfig;
use super::cluster_on_off::OnOffCluster;
use super::objects::*;
use super::sdm::admin_commissioning::AdminCommCluster;
use super::sdm::dev_att::DevAttDataFetcher;
use super::sdm::general_commissioning::GenCommCluster;
use super::sdm::noc::NocCluster;
use super::sdm::nw_commissioning::NwCommCluster;
use super::system_model::access_control::AccessControlCluster;
use crate::acl::AclMgr;
use crate::error::*;
use crate::fabric::FabricMgr;
use crate::secure_channel::pake::PaseMgr;
use std::sync::Arc;
use std::sync::RwLockWriteGuard;
pub const DEV_TYPE_ROOT_NODE: DeviceType = DeviceType {
dtype: 0x0016,
drev: 1,
};
type WriteNode<'a> = RwLockWriteGuard<'a, Box<Node>>;
pub fn device_type_add_root_node(
node: &mut WriteNode,
dev_info: BasicInfoConfig,
dev_att: Box<dyn DevAttDataFetcher>,
fabric_mgr: Arc<FabricMgr>,
acl_mgr: Arc<AclMgr>,
pase_mgr: PaseMgr,
) -> Result<EndptId, Error> {
// Add the root endpoint
let endpoint = node.add_endpoint(DEV_TYPE_ROOT_NODE)?;
if endpoint != 0 {
// Somehow endpoint 0 was already added, this shouldn't be the case
return Err(Error::Invalid);
};
// Add the mandatory clusters
node.add_cluster(0, BasicInfoCluster::new(dev_info)?)?;
let general_commissioning = GenCommCluster::new()?;
let failsafe = general_commissioning.failsafe();
node.add_cluster(0, general_commissioning)?;
node.add_cluster(0, NwCommCluster::new()?)?;
node.add_cluster(0, AdminCommCluster::new(pase_mgr)?)?;
node.add_cluster(
0,
NocCluster::new(dev_att, fabric_mgr, acl_mgr.clone(), failsafe)?,
)?;
node.add_cluster(0, AccessControlCluster::new(acl_mgr)?)?;
Ok(endpoint)
}
const DEV_TYPE_ON_OFF_LIGHT: DeviceType = DeviceType {
dtype: 0x0100,
drev: 2,
};
pub const DEV_TYPE_ON_SMART_SPEAKER: DeviceType = DeviceType {
dtype: 0x0022,
drev: 2,
};
pub fn device_type_add_on_off_light(node: &mut WriteNode) -> Result<EndptId, Error> {
let endpoint = node.add_endpoint(DEV_TYPE_ON_OFF_LIGHT)?;
node.add_cluster(endpoint, OnOffCluster::new()?)?;
Ok(endpoint)
}

View file

@ -1,348 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
acl::AccessReq,
data_model::objects::{Access, AttrValue, Attribute, EncodeValue, Quality},
error::*,
interaction_model::{command::CommandReq, core::IMStatusCode},
// TODO: This layer shouldn't really depend on the TLV layer, should create an abstraction layer
tlv::{Nullable, TLVElement, TLVWriter, TagType},
};
use log::error;
use num_derive::FromPrimitive;
use rand::Rng;
use std::fmt::{self, Debug};
use super::{AttrId, ClusterId, Encoder};
pub const ATTRS_PER_CLUSTER: usize = 10;
pub const CMDS_PER_CLUSTER: usize = 8;
#[derive(FromPrimitive, Debug)]
pub enum GlobalElements {
_ClusterRevision = 0xFFFD,
FeatureMap = 0xFFFC,
AttributeList = 0xFFFB,
_EventList = 0xFFFA,
_ClientGenCmd = 0xFFF9,
ServerGenCmd = 0xFFF8,
FabricIndex = 0xFE,
}
// TODO: What if we instead of creating this, we just pass the AttrData/AttrPath to the read/write
// methods?
/// The Attribute Details structure records the details about the attribute under consideration.
/// Typically this structure is progressively built as we proceed through the request processing.
pub struct AttrDetails {
/// Fabric Filtering Activated
pub fab_filter: bool,
/// The current Fabric Index
pub fab_idx: u8,
/// List Index, if any
pub list_index: Option<Nullable<u16>>,
/// The actual attribute ID
pub attr_id: AttrId,
}
impl AttrDetails {
pub fn new(fab_idx: u8, fab_filter: bool) -> Self {
Self {
fab_filter,
fab_idx,
list_index: None,
attr_id: 0,
}
}
}
pub trait ClusterType {
// TODO: 5 methods is going to be quite expensive for vtables of all the clusters
fn base(&self) -> &Cluster;
fn base_mut(&mut self) -> &mut Cluster;
fn read_custom_attribute(&self, _encoder: &mut dyn Encoder, _attr: &AttrDetails) {}
fn handle_command(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
let cmd = cmd_req.cmd.path.leaf.map(|a| a as u16);
println!("Received command: {:?}", cmd);
Err(IMStatusCode::UnsupportedCommand)
}
/// Write an attribute
///
/// Note that if this method is defined, you must handle the write for all the attributes. Even those
/// that are not 'custom'. This is different from how you handle the read_custom_attribute() method.
/// The reason for this being, you may want to handle an attribute write request even though it is a
/// standard attribute like u16, u32 etc.
///
/// If you wish to update the standard attribute in the data model database, you must call the
/// write_attribute_from_tlv() method from the base cluster, as is shown here in the default case
fn write_attribute(
&mut self,
attr: &AttrDetails,
data: &TLVElement,
) -> Result<(), IMStatusCode> {
self.base_mut().write_attribute_from_tlv(attr.attr_id, data)
}
}
pub struct Cluster {
pub(super) id: ClusterId,
attributes: Vec<Attribute>,
data_ver: u32,
}
impl Cluster {
pub fn new(id: ClusterId) -> Result<Cluster, Error> {
let mut c = Cluster {
id,
attributes: Vec::with_capacity(ATTRS_PER_CLUSTER),
data_ver: rand::thread_rng().gen_range(0..0xFFFFFFFF),
};
c.add_default_attributes()?;
Ok(c)
}
pub fn id(&self) -> ClusterId {
self.id
}
pub fn get_dataver(&self) -> u32 {
self.data_ver
}
pub fn set_feature_map(&mut self, map: u32) -> Result<(), Error> {
self.write_attribute_raw(GlobalElements::FeatureMap as u16, AttrValue::Uint32(map))
.map_err(|_| Error::Invalid)?;
Ok(())
}
fn add_default_attributes(&mut self) -> Result<(), Error> {
// Default feature map is 0
self.add_attribute(Attribute::new(
GlobalElements::FeatureMap as u16,
AttrValue::Uint32(0),
Access::RV,
Quality::NONE,
))?;
self.add_attribute(Attribute::new(
GlobalElements::AttributeList as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
))
}
pub fn add_attributes(&mut self, attrs: &[Attribute]) -> Result<(), Error> {
if self.attributes.len() + attrs.len() <= self.attributes.capacity() {
self.attributes.extend_from_slice(attrs);
Ok(())
} else {
Err(Error::NoSpace)
}
}
pub fn add_attribute(&mut self, attr: Attribute) -> Result<(), Error> {
if self.attributes.len() < self.attributes.capacity() {
self.attributes.push(attr);
Ok(())
} else {
Err(Error::NoSpace)
}
}
fn get_attribute_index(&self, attr_id: AttrId) -> Option<usize> {
self.attributes.iter().position(|c| c.id == attr_id)
}
fn get_attribute(&self, attr_id: AttrId) -> Result<&Attribute, Error> {
let index = self
.get_attribute_index(attr_id)
.ok_or(Error::AttributeNotFound)?;
Ok(&self.attributes[index])
}
fn get_attribute_mut(&mut self, attr_id: AttrId) -> Result<&mut Attribute, Error> {
let index = self
.get_attribute_index(attr_id)
.ok_or(Error::AttributeNotFound)?;
Ok(&mut self.attributes[index])
}
// Returns a slice of attribute, with either a single attribute or all (wildcard)
pub fn get_wildcard_attribute(
&self,
attribute: Option<AttrId>,
) -> Result<(&[Attribute], bool), IMStatusCode> {
if let Some(a) = attribute {
if let Some(i) = self.get_attribute_index(a) {
Ok((&self.attributes[i..i + 1], false))
} else {
Err(IMStatusCode::UnsupportedAttribute)
}
} else {
Ok((&self.attributes[..], true))
}
}
pub fn read_attribute(
c: &dyn ClusterType,
access_req: &mut AccessReq,
encoder: &mut dyn Encoder,
attr: &AttrDetails,
) {
let mut error = IMStatusCode::Success;
let base = c.base();
let a = if let Ok(a) = base.get_attribute(attr.attr_id) {
a
} else {
encoder.encode_status(IMStatusCode::UnsupportedAttribute, 0);
return;
};
if !a.access.contains(Access::READ) {
error = IMStatusCode::UnsupportedRead;
}
access_req.set_target_perms(a.access);
if !access_req.allow() {
error = IMStatusCode::UnsupportedAccess;
}
if error != IMStatusCode::Success {
encoder.encode_status(error, 0);
} else if Attribute::is_system_attr(attr.attr_id) {
c.base().read_system_attribute(encoder, a)
} else if a.value != AttrValue::Custom {
encoder.encode(EncodeValue::Value(&a.value))
} else {
c.read_custom_attribute(encoder, attr)
}
}
fn encode_attribute_ids(&self, tag: TagType, tw: &mut TLVWriter) {
let _ = tw.start_array(tag);
for a in &self.attributes {
let _ = tw.u16(TagType::Anonymous, a.id);
}
let _ = tw.end_container();
}
fn read_system_attribute(&self, encoder: &mut dyn Encoder, attr: &Attribute) {
let global_attr: Option<GlobalElements> = num::FromPrimitive::from_u16(attr.id);
if let Some(global_attr) = global_attr {
match global_attr {
GlobalElements::AttributeList => {
encoder.encode(EncodeValue::Closure(&|tag, tw| {
self.encode_attribute_ids(tag, tw)
}));
return;
}
GlobalElements::FeatureMap => {
encoder.encode(EncodeValue::Value(&attr.value));
return;
}
_ => {
error!("This attribute not yet handled {:?}", global_attr);
}
}
}
encoder.encode_status(IMStatusCode::UnsupportedAttribute, 0)
}
pub fn read_attribute_raw(&self, attr_id: AttrId) -> Result<&AttrValue, IMStatusCode> {
let a = self
.get_attribute(attr_id)
.map_err(|_| IMStatusCode::UnsupportedAttribute)?;
Ok(&a.value)
}
pub fn write_attribute(
c: &mut dyn ClusterType,
access_req: &mut AccessReq,
data: &TLVElement,
attr: &AttrDetails,
) -> Result<(), IMStatusCode> {
let base = c.base_mut();
let a = if let Ok(a) = base.get_attribute_mut(attr.attr_id) {
a
} else {
return Err(IMStatusCode::UnsupportedAttribute);
};
if !a.access.contains(Access::WRITE) {
return Err(IMStatusCode::UnsupportedWrite);
}
access_req.set_target_perms(a.access);
if !access_req.allow() {
return Err(IMStatusCode::UnsupportedAccess);
}
c.write_attribute(attr, data)
}
pub fn write_attribute_from_tlv(
&mut self,
attr_id: AttrId,
data: &TLVElement,
) -> Result<(), IMStatusCode> {
let a = self.get_attribute_mut(attr_id)?;
if a.value != AttrValue::Custom {
let mut value = a.value.clone();
value
.update_from_tlv(data)
.map_err(|_| IMStatusCode::Failure)?;
a.set_value(value)
.map(|_| {
self.cluster_changed();
})
.map_err(|_| IMStatusCode::UnsupportedWrite)
} else {
Err(IMStatusCode::UnsupportedAttribute)
}
}
pub fn write_attribute_raw(&mut self, attr_id: AttrId, value: AttrValue) -> Result<(), Error> {
let a = self.get_attribute_mut(attr_id)?;
a.set_value(value).map(|_| {
self.cluster_changed();
})
}
/// This method must be called for any changes to the data model
/// Currently this only increments the data version, but we can reuse the same
/// for raising events too
pub fn cluster_changed(&mut self) {
self.data_ver = self.data_ver.wrapping_add(1);
}
}
impl std::fmt::Display for Cluster {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "id:{}, ", self.id)?;
write!(f, "attrs[")?;
let mut comma = "";
for element in self.attributes.iter() {
write!(f, "{} {}", comma, element)?;
comma = ",";
}
write!(f, " ], ")
}
}

View file

@ -1,123 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::fmt::{Debug, Formatter};
use crate::{
error::Error,
interaction_model::core::IMStatusCode,
tlv::{FromTLV, TLVElement, TLVWriter, TagType, ToTLV},
};
use log::error;
// TODO: Should this return an IMStatusCode Error? But if yes, the higher layer
// may have already started encoding the 'success' headers, we might not to manage
// the tw.rewind() in that case, if we add this support
pub type EncodeValueGen<'a> = &'a dyn Fn(TagType, &mut TLVWriter);
#[derive(Copy, Clone)]
/// A structure for encoding various types of values
pub enum EncodeValue<'a> {
/// This indicates a value that is dynamically generated. This variant
/// is typically used in the transmit/to-tlv path where we want to encode a value at
/// run time
Closure(EncodeValueGen<'a>),
/// This indicates a value that is in the TLVElement form. this variant is
/// typically used in the receive/from-tlv path where we don't want to decode the
/// full value but it can be done at the time of its usage
Tlv(TLVElement<'a>),
/// This indicates a static value. This variant is typically used in the transmit/
/// to-tlv path
Value(&'a dyn ToTLV),
}
impl<'a> EncodeValue<'a> {
pub fn unwrap_tlv(self) -> Option<TLVElement<'a>> {
match self {
EncodeValue::Tlv(t) => Some(t),
_ => None,
}
}
}
impl<'a> PartialEq for EncodeValue<'a> {
fn eq(&self, other: &Self) -> bool {
match *self {
EncodeValue::Closure(_) => {
error!("PartialEq not yet supported");
false
}
EncodeValue::Tlv(a) => {
if let EncodeValue::Tlv(b) = *other {
a == b
} else {
false
}
}
// Just claim false for now
EncodeValue::Value(_) => {
error!("PartialEq not yet supported");
false
}
}
}
}
impl<'a> Debug for EncodeValue<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
match *self {
EncodeValue::Closure(_) => write!(f, "Contains closure"),
EncodeValue::Tlv(t) => write!(f, "{:?}", t),
EncodeValue::Value(_) => write!(f, "Contains EncodeValue"),
}?;
Ok(())
}
}
impl<'a> ToTLV for EncodeValue<'a> {
fn to_tlv(&self, tw: &mut TLVWriter, tag_type: TagType) -> Result<(), Error> {
match self {
EncodeValue::Closure(f) => {
(f)(tag_type, tw);
Ok(())
}
EncodeValue::Tlv(_) => panic!("This looks invalid"),
EncodeValue::Value(v) => v.to_tlv(tw, tag_type),
}
}
}
impl<'a> FromTLV<'a> for EncodeValue<'a> {
fn from_tlv(data: &TLVElement<'a>) -> Result<Self, Error> {
Ok(EncodeValue::Tlv(*data))
}
}
/// An object that can encode EncodeValue into the necessary hierarchical structure
/// as expected by the Interaction Model
pub trait Encoder {
/// Encode a given value
fn encode(&mut self, value: EncodeValue);
/// Encode a status report
fn encode_status(&mut self, status: IMStatusCode, cluster_status: u16);
}
#[derive(ToTLV, Copy, Clone)]
pub struct DeviceType {
pub dtype: u16,
pub drev: u16,
}

View file

@ -1,118 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{data_model::objects::ClusterType, error::*, interaction_model::core::IMStatusCode};
use std::fmt;
use super::{ClusterId, DeviceType};
pub const CLUSTERS_PER_ENDPT: usize = 9;
pub struct Endpoint {
dev_type: DeviceType,
clusters: Vec<Box<dyn ClusterType>>,
}
pub type BoxedClusters = [Box<dyn ClusterType>];
impl Endpoint {
pub fn new(dev_type: DeviceType) -> Result<Box<Endpoint>, Error> {
Ok(Box::new(Endpoint {
dev_type,
clusters: Vec::with_capacity(CLUSTERS_PER_ENDPT),
}))
}
pub fn add_cluster(&mut self, cluster: Box<dyn ClusterType>) -> Result<(), Error> {
if self.clusters.len() < self.clusters.capacity() {
self.clusters.push(cluster);
Ok(())
} else {
Err(Error::NoSpace)
}
}
pub fn get_dev_type(&self) -> &DeviceType {
&self.dev_type
}
fn get_cluster_index(&self, cluster_id: ClusterId) -> Option<usize> {
self.clusters.iter().position(|c| c.base().id == cluster_id)
}
pub fn get_cluster(&self, cluster_id: ClusterId) -> Result<&dyn ClusterType, Error> {
let index = self
.get_cluster_index(cluster_id)
.ok_or(Error::ClusterNotFound)?;
Ok(self.clusters[index].as_ref())
}
pub fn get_cluster_mut(
&mut self,
cluster_id: ClusterId,
) -> Result<&mut dyn ClusterType, Error> {
let index = self
.get_cluster_index(cluster_id)
.ok_or(Error::ClusterNotFound)?;
Ok(self.clusters[index].as_mut())
}
// Returns a slice of clusters, with either a single cluster or all (wildcard)
pub fn get_wildcard_clusters(
&self,
cluster: Option<ClusterId>,
) -> Result<(&BoxedClusters, bool), IMStatusCode> {
if let Some(c) = cluster {
if let Some(i) = self.get_cluster_index(c) {
Ok((&self.clusters[i..i + 1], false))
} else {
Err(IMStatusCode::UnsupportedCluster)
}
} else {
Ok((self.clusters.as_slice(), true))
}
}
// Returns a slice of clusters, with either a single cluster or all (wildcard)
pub fn get_wildcard_clusters_mut(
&mut self,
cluster: Option<ClusterId>,
) -> Result<(&mut BoxedClusters, bool), IMStatusCode> {
if let Some(c) = cluster {
if let Some(i) = self.get_cluster_index(c) {
Ok((&mut self.clusters[i..i + 1], false))
} else {
Err(IMStatusCode::UnsupportedCluster)
}
} else {
Ok((&mut self.clusters[..], true))
}
}
}
impl std::fmt::Display for Endpoint {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "clusters:[")?;
let mut comma = "";
for element in self.clusters.iter() {
write!(f, "{} {{ {} }}", comma, element.base())?;
comma = ", ";
}
write!(f, "]")
}
}

View file

@ -1,298 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
data_model::objects::{ClusterType, Endpoint},
error::*,
interaction_model::{core::IMStatusCode, messages::GenericPath},
// TODO: This layer shouldn't really depend on the TLV layer, should create an abstraction layer
};
use std::fmt;
use super::{ClusterId, DeviceType, EndptId};
pub trait ChangeConsumer {
fn endpoint_added(&self, id: EndptId, endpoint: &mut Endpoint) -> Result<(), Error>;
}
pub const ENDPTS_PER_ACC: usize = 3;
pub type BoxedEndpoints = [Option<Box<Endpoint>>];
#[derive(Default)]
pub struct Node {
endpoints: [Option<Box<Endpoint>>; ENDPTS_PER_ACC],
changes_cb: Option<Box<dyn ChangeConsumer>>,
}
impl std::fmt::Display for Node {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "node:")?;
for (i, element) in self.endpoints.iter().enumerate() {
if let Some(e) = element {
writeln!(f, "endpoint {}: {}", i, e)?;
}
}
write!(f, "")
}
}
impl Node {
pub fn new() -> Result<Box<Node>, Error> {
let node = Box::default();
Ok(node)
}
pub fn set_changes_cb(&mut self, consumer: Box<dyn ChangeConsumer>) {
self.changes_cb = Some(consumer);
}
pub fn add_endpoint(&mut self, dev_type: DeviceType) -> Result<EndptId, Error> {
let index = self
.endpoints
.iter()
.position(|x| x.is_none())
.ok_or(Error::NoSpace)?;
let mut endpoint = Endpoint::new(dev_type)?;
if let Some(cb) = &self.changes_cb {
cb.endpoint_added(index as EndptId, &mut endpoint)?;
}
self.endpoints[index] = Some(endpoint);
Ok(index as EndptId)
}
pub fn get_endpoint(&self, endpoint_id: EndptId) -> Result<&Endpoint, Error> {
if (endpoint_id as usize) < ENDPTS_PER_ACC {
let endpoint = self.endpoints[endpoint_id as usize]
.as_ref()
.ok_or(Error::EndpointNotFound)?;
Ok(endpoint)
} else {
Err(Error::EndpointNotFound)
}
}
pub fn get_endpoint_mut(&mut self, endpoint_id: EndptId) -> Result<&mut Endpoint, Error> {
if (endpoint_id as usize) < ENDPTS_PER_ACC {
let endpoint = self.endpoints[endpoint_id as usize]
.as_mut()
.ok_or(Error::EndpointNotFound)?;
Ok(endpoint)
} else {
Err(Error::EndpointNotFound)
}
}
pub fn get_cluster_mut(
&mut self,
e: EndptId,
c: ClusterId,
) -> Result<&mut dyn ClusterType, Error> {
self.get_endpoint_mut(e)?.get_cluster_mut(c)
}
pub fn get_cluster(&self, e: EndptId, c: ClusterId) -> Result<&dyn ClusterType, Error> {
self.get_endpoint(e)?.get_cluster(c)
}
pub fn add_cluster(
&mut self,
endpoint_id: EndptId,
cluster: Box<dyn ClusterType>,
) -> Result<(), Error> {
let endpoint_id = endpoint_id as usize;
if endpoint_id < ENDPTS_PER_ACC {
self.endpoints[endpoint_id]
.as_mut()
.ok_or(Error::NoEndpoint)?
.add_cluster(cluster)
} else {
Err(Error::Invalid)
}
}
// Returns a slice of endpoints, with either a single endpoint or all (wildcard)
pub fn get_wildcard_endpoints(
&self,
endpoint: Option<EndptId>,
) -> Result<(&BoxedEndpoints, usize, bool), IMStatusCode> {
if let Some(e) = endpoint {
let e = e as usize;
if self.endpoints.len() <= e || self.endpoints[e].is_none() {
Err(IMStatusCode::UnsupportedEndpoint)
} else {
Ok((&self.endpoints[e..e + 1], e, false))
}
} else {
Ok((&self.endpoints[..], 0, true))
}
}
pub fn get_wildcard_endpoints_mut(
&mut self,
endpoint: Option<EndptId>,
) -> Result<(&mut BoxedEndpoints, usize, bool), IMStatusCode> {
if let Some(e) = endpoint {
let e = e as usize;
if self.endpoints.len() <= e || self.endpoints[e].is_none() {
Err(IMStatusCode::UnsupportedEndpoint)
} else {
Ok((&mut self.endpoints[e..e + 1], e, false))
}
} else {
Ok((&mut self.endpoints[..], 0, true))
}
}
/// Run a closure for all endpoints as specified in the path
///
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
/// of this function is to only capture the successful invocations and ignore the erroneous
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
/// 'please run this operation on this wildcard path "wherever possible"'
///
/// It is expected that if the closure that you pass here returns an error it may not reach
/// out to the caller, in case there was a wildcard path specified
pub fn for_each_endpoint<T>(&self, path: &GenericPath, mut f: T) -> Result<(), IMStatusCode>
where
T: FnMut(&GenericPath, &Endpoint) -> Result<(), IMStatusCode>,
{
let mut current_path = *path;
let (endpoints, mut endpoint_id, wildcard) = self.get_wildcard_endpoints(path.endpoint)?;
for e in endpoints.iter() {
if let Some(e) = e {
current_path.endpoint = Some(endpoint_id as EndptId);
f(&current_path, e.as_ref())
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
}
endpoint_id += 1;
}
Ok(())
}
/// Run a closure for all endpoints (mutable) as specified in the path
///
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
/// of this function is to only capture the successful invocations and ignore the erroneous
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
/// 'please run this operation on this wildcard path "wherever possible"'
///
/// It is expected that if the closure that you pass here returns an error it may not reach
/// out to the caller, in case there was a wildcard path specified
pub fn for_each_endpoint_mut<T>(
&mut self,
path: &GenericPath,
mut f: T,
) -> Result<(), IMStatusCode>
where
T: FnMut(&GenericPath, &mut Endpoint) -> Result<(), IMStatusCode>,
{
let mut current_path = *path;
let (endpoints, mut endpoint_id, wildcard) =
self.get_wildcard_endpoints_mut(path.endpoint)?;
for e in endpoints.iter_mut() {
if let Some(e) = e {
current_path.endpoint = Some(endpoint_id as EndptId);
f(&current_path, e.as_mut())
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
}
endpoint_id += 1;
}
Ok(())
}
/// Run a closure for all clusters as specified in the path
///
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
/// of this function is to only capture the successful invocations and ignore the erroneous
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
/// 'please run this operation on this wildcard path "wherever possible"'
///
/// It is expected that if the closure that you pass here returns an error it may not reach
/// out to the caller, in case there was a wildcard path specified
pub fn for_each_cluster<T>(&self, path: &GenericPath, mut f: T) -> Result<(), IMStatusCode>
where
T: FnMut(&GenericPath, &dyn ClusterType) -> Result<(), IMStatusCode>,
{
self.for_each_endpoint(path, |p, e| {
let mut current_path = *p;
let (clusters, wildcard) = e.get_wildcard_clusters(p.cluster)?;
for c in clusters.iter() {
current_path.cluster = Some(c.base().id);
f(&current_path, c.as_ref())
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
}
Ok(())
})
}
/// Run a closure for all clusters (mutable) as specified in the path
///
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
/// of this function is to only capture the successful invocations and ignore the erroneous
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
/// 'please run this operation on this wildcard path "wherever possible"'
///
/// It is expected that if the closure that you pass here returns an error it may not reach
/// out to the caller, in case there was a wildcard path specified
pub fn for_each_cluster_mut<T>(
&mut self,
path: &GenericPath,
mut f: T,
) -> Result<(), IMStatusCode>
where
T: FnMut(&GenericPath, &mut dyn ClusterType) -> Result<(), IMStatusCode>,
{
self.for_each_endpoint_mut(path, |p, e| {
let mut current_path = *p;
let (clusters, wildcard) = e.get_wildcard_clusters_mut(p.cluster)?;
for c in clusters.iter_mut() {
current_path.cluster = Some(c.base().id);
f(&current_path, c.as_mut())
.or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
}
Ok(())
})
}
/// Run a closure for all attributes as specified in the path
///
/// Note that the path is a GenericPath and hence can be a wildcard path. The behaviour
/// of this function is to only capture the successful invocations and ignore the erroneous
/// ones. This is inline with the expected behaviour for wildcard, where it implies that
/// 'please run this operation on this wildcard path "wherever possible"'
///
/// It is expected that if the closure that you pass here returns an error it may not reach
/// out to the caller, in case there was a wildcard path specified
pub fn for_each_attribute<T>(&self, path: &GenericPath, mut f: T) -> Result<(), IMStatusCode>
where
T: FnMut(&GenericPath, &dyn ClusterType) -> Result<(), IMStatusCode>,
{
self.for_each_cluster(path, |current_path, c| {
let mut current_path = *current_path;
let (attributes, wildcard) = c
.base()
.get_wildcard_attribute(path.leaf.map(|at| at as u16))?;
for a in attributes.iter() {
current_path.leaf = Some(a.id as u32);
f(&current_path, c).or_else(|e| if !wildcard { Err(e) } else { Ok(()) })?;
}
Ok(())
})
}
}

View file

@ -1,160 +0,0 @@
/*
*
* Copyright (c) 2023 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::cmd_enter;
use crate::data_model::objects::*;
use crate::interaction_model::core::IMStatusCode;
use crate::secure_channel::pake::PaseMgr;
use crate::secure_channel::spake2p::VerifierData;
use crate::tlv::{FromTLV, Nullable, OctetStr, TLVElement};
use crate::{error::*, interaction_model::command::CommandReq};
use log::{error, info};
use num_derive::FromPrimitive;
pub const ID: u32 = 0x003C;
#[derive(FromPrimitive, Debug, Copy, Clone, PartialEq)]
pub enum WindowStatus {
WindowNotOpen = 0,
EnhancedWindowOpen = 1,
BasicWindowOpen = 2,
}
#[derive(FromPrimitive)]
pub enum Attributes {
WindowStatus = 0,
AdminFabricIndex = 1,
AdminVendorId = 2,
}
#[derive(FromPrimitive)]
pub enum Commands {
OpenCommWindow = 0x00,
OpenBasicCommWindow = 0x01,
RevokeComm = 0x02,
}
fn attr_window_status_new() -> Attribute {
Attribute::new(
Attributes::WindowStatus as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
)
}
fn attr_admin_fabid_new() -> Attribute {
Attribute::new(
Attributes::AdminFabricIndex as u16,
AttrValue::Custom,
Access::RV,
Quality::NULLABLE,
)
}
fn attr_admin_vid_new() -> Attribute {
Attribute::new(
Attributes::AdminVendorId as u16,
AttrValue::Custom,
Access::RV,
Quality::NULLABLE,
)
}
pub struct AdminCommCluster {
pase_mgr: PaseMgr,
base: Cluster,
}
impl ClusterType for AdminCommCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
fn read_custom_attribute(&self, encoder: &mut dyn Encoder, attr: &AttrDetails) {
match num::FromPrimitive::from_u16(attr.attr_id) {
Some(Attributes::WindowStatus) => {
let status = 1_u8;
encoder.encode(EncodeValue::Value(&status))
}
Some(Attributes::AdminVendorId) => {
let vid = Nullable::NotNull(1_u8);
encoder.encode(EncodeValue::Value(&vid))
}
Some(Attributes::AdminFabricIndex) => {
let vid = Nullable::NotNull(1_u8);
encoder.encode(EncodeValue::Value(&vid))
}
_ => {
error!("Unsupported Attribute: this shouldn't happen");
}
}
}
fn handle_command(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
let cmd = cmd_req
.cmd
.path
.leaf
.map(num::FromPrimitive::from_u32)
.ok_or(IMStatusCode::UnsupportedCommand)?
.ok_or(IMStatusCode::UnsupportedCommand)?;
match cmd {
Commands::OpenCommWindow => self.handle_command_opencomm_win(cmd_req),
_ => Err(IMStatusCode::UnsupportedCommand),
}
}
}
impl AdminCommCluster {
pub fn new(pase_mgr: PaseMgr) -> Result<Box<Self>, Error> {
let mut c = Box::new(AdminCommCluster {
pase_mgr,
base: Cluster::new(ID)?,
});
c.base.add_attribute(attr_window_status_new())?;
c.base.add_attribute(attr_admin_fabid_new())?;
c.base.add_attribute(attr_admin_vid_new())?;
Ok(c)
}
fn handle_command_opencomm_win(
&mut self,
cmd_req: &mut CommandReq,
) -> Result<(), IMStatusCode> {
cmd_enter!("Open Commissioning Window");
let req =
OpenCommWindowReq::from_tlv(&cmd_req.data).map_err(|_| IMStatusCode::InvalidCommand)?;
let verifier = VerifierData::new(req.verifier.0, req.iterations, req.salt.0);
self.pase_mgr
.enable_pase_session(verifier, req.discriminator)?;
Err(IMStatusCode::Success)
}
}
#[derive(FromTLV)]
#[tlvargs(lifetime = "'a")]
pub struct OpenCommWindowReq<'a> {
_timeout: u16,
verifier: OctetStr<'a>,
discriminator: u16,
iterations: u32,
salt: OctetStr<'a>,
}

View file

@ -1,278 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::cmd_enter;
use crate::data_model::objects::*;
use crate::data_model::sdm::failsafe::FailSafe;
use crate::interaction_model::core::IMStatusCode;
use crate::interaction_model::messages::ib;
use crate::tlv::{FromTLV, TLVElement, TLVWriter, TagType, ToTLV};
use crate::{error::*, interaction_model::command::CommandReq};
use log::{error, info};
use num_derive::FromPrimitive;
use std::sync::Arc;
#[derive(Clone, Copy)]
#[allow(dead_code)]
enum CommissioningError {
Ok = 0,
ErrValueOutsideRange = 1,
ErrInvalidAuth = 2,
ErrNotCommissioning = 3,
ErrBusyWithOtherAdmin = 4,
}
pub const ID: u32 = 0x0030;
#[derive(FromPrimitive)]
pub enum Attributes {
BreadCrumb = 0,
BasicCommissioningInfo = 1,
RegConfig = 2,
LocationCapability = 3,
}
#[derive(FromPrimitive)]
pub enum Commands {
ArmFailsafe = 0x00,
ArmFailsafeResp = 0x01,
SetRegulatoryConfig = 0x02,
SetRegulatoryConfigResp = 0x03,
CommissioningComplete = 0x04,
CommissioningCompleteResp = 0x05,
}
pub enum RegLocationType {
Indoor = 0,
Outdoor = 1,
IndoorOutdoor = 2,
}
fn attr_bread_crumb_new(bread_crumb: u64) -> Attribute {
Attribute::new(
Attributes::BreadCrumb as u16,
AttrValue::Uint64(bread_crumb),
Access::READ | Access::WRITE | Access::NEED_ADMIN,
Quality::NONE,
)
}
fn attr_reg_config_new(reg_config: RegLocationType) -> Attribute {
Attribute::new(
Attributes::RegConfig as u16,
AttrValue::Uint8(reg_config as u8),
Access::RV,
Quality::NONE,
)
}
fn attr_location_capability_new(reg_config: RegLocationType) -> Attribute {
Attribute::new(
Attributes::LocationCapability as u16,
AttrValue::Uint8(reg_config as u8),
Access::RV,
Quality::FIXED,
)
}
fn attr_comm_info_new() -> Attribute {
Attribute::new(
Attributes::BasicCommissioningInfo as u16,
AttrValue::Custom,
Access::RV,
Quality::FIXED,
)
}
#[derive(FromTLV, ToTLV)]
struct FailSafeParams {
expiry_len: u8,
bread_crumb: u8,
}
pub struct GenCommCluster {
expiry_len: u16,
failsafe: Arc<FailSafe>,
base: Cluster,
}
impl ClusterType for GenCommCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
fn read_custom_attribute(&self, encoder: &mut dyn Encoder, attr: &AttrDetails) {
match num::FromPrimitive::from_u16(attr.attr_id) {
Some(Attributes::BasicCommissioningInfo) => {
encoder.encode(EncodeValue::Closure(&|tag, tw| {
let _ = tw.start_struct(tag);
let _ = tw.u16(TagType::Context(0), self.expiry_len);
let _ = tw.end_container();
}))
}
_ => {
error!("Unsupported Attribute: this shouldn't happen");
}
}
}
fn handle_command(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
let cmd = cmd_req
.cmd
.path
.leaf
.map(num::FromPrimitive::from_u32)
.ok_or(IMStatusCode::UnsupportedCommand)?
.ok_or(IMStatusCode::UnsupportedCommand)?;
match cmd {
Commands::ArmFailsafe => self.handle_command_armfailsafe(cmd_req),
Commands::SetRegulatoryConfig => self.handle_command_setregulatoryconfig(cmd_req),
Commands::CommissioningComplete => self.handle_command_commissioningcomplete(cmd_req),
_ => Err(IMStatusCode::UnsupportedCommand),
}
}
}
impl GenCommCluster {
pub fn new() -> Result<Box<Self>, Error> {
let failsafe = Arc::new(FailSafe::new());
let mut c = Box::new(GenCommCluster {
// TODO: Arch-Specific
expiry_len: 120,
failsafe,
base: Cluster::new(ID)?,
});
c.base.add_attribute(attr_bread_crumb_new(0))?;
// TODO: Arch-Specific
c.base
.add_attribute(attr_reg_config_new(RegLocationType::IndoorOutdoor))?;
// TODO: Arch-Specific
c.base
.add_attribute(attr_location_capability_new(RegLocationType::IndoorOutdoor))?;
c.base.add_attribute(attr_comm_info_new())?;
Ok(c)
}
pub fn failsafe(&self) -> Arc<FailSafe> {
self.failsafe.clone()
}
fn handle_command_armfailsafe(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
cmd_enter!("ARM Fail Safe");
let p = FailSafeParams::from_tlv(&cmd_req.data)?;
let mut status = CommissioningError::Ok as u8;
if self
.failsafe
.arm(p.expiry_len, cmd_req.trans.session.get_session_mode())
.is_err()
{
status = CommissioningError::ErrBusyWithOtherAdmin as u8;
}
let cmd_data = CommonResponse {
error_code: status,
debug_txt: "".to_owned(),
};
let resp = ib::InvResp::cmd_new(
0,
ID,
Commands::ArmFailsafeResp as u16,
EncodeValue::Value(&cmd_data),
);
let _ = resp.to_tlv(cmd_req.resp, TagType::Anonymous);
cmd_req.trans.complete();
Ok(())
}
fn handle_command_setregulatoryconfig(
&mut self,
cmd_req: &mut CommandReq,
) -> Result<(), IMStatusCode> {
cmd_enter!("Set Regulatory Config");
let country_code = cmd_req
.data
.find_tag(1)
.map_err(|_| IMStatusCode::InvalidCommand)?
.slice()
.map_err(|_| IMStatusCode::InvalidCommand)?;
info!("Received country code: {:?}", country_code);
let cmd_data = CommonResponse {
error_code: 0,
debug_txt: "".to_owned(),
};
let resp = ib::InvResp::cmd_new(
0,
ID,
Commands::SetRegulatoryConfigResp as u16,
EncodeValue::Value(&cmd_data),
);
let _ = resp.to_tlv(cmd_req.resp, TagType::Anonymous);
cmd_req.trans.complete();
Ok(())
}
fn handle_command_commissioningcomplete(
&mut self,
cmd_req: &mut CommandReq,
) -> Result<(), IMStatusCode> {
cmd_enter!("Commissioning Complete");
let mut status: u8 = CommissioningError::Ok as u8;
// Has to be a Case Session
if cmd_req.trans.session.get_local_fabric_idx().is_none() {
status = CommissioningError::ErrInvalidAuth as u8;
}
// AddNOC or UpdateNOC must have happened, and that too for the same fabric
// scope that is for this session
if self
.failsafe
.disarm(cmd_req.trans.session.get_session_mode())
.is_err()
{
status = CommissioningError::ErrInvalidAuth as u8;
}
let cmd_data = CommonResponse {
error_code: status,
debug_txt: "".to_owned(),
};
let resp = ib::InvResp::cmd_new(
0,
ID,
Commands::CommissioningCompleteResp as u16,
EncodeValue::Value(&cmd_data),
);
let _ = resp.to_tlv(cmd_req.resp, TagType::Anonymous);
cmd_req.trans.complete();
Ok(())
}
}
#[derive(FromTLV, ToTLV)]
struct CommonResponse {
error_code: u8,
debug_txt: String,
}

View file

@ -1,612 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use crate::acl::{AclEntry, AclMgr, AuthMode};
use crate::cert::Cert;
use crate::crypto::{self, CryptoKeyPair, KeyPair};
use crate::data_model::objects::*;
use crate::data_model::sdm::dev_att;
use crate::fabric::{Fabric, FabricMgr, MAX_SUPPORTED_FABRICS};
use crate::interaction_model::command::CommandReq;
use crate::interaction_model::core::IMStatusCode;
use crate::interaction_model::messages::ib;
use crate::tlv::{FromTLV, OctetStr, TLVElement, TLVWriter, TagType, ToTLV, UtfStr};
use crate::transport::session::SessionMode;
use crate::utils::writebuf::WriteBuf;
use crate::{cmd_enter, error::*, secure_channel};
use log::{error, info};
use num_derive::FromPrimitive;
use super::dev_att::{DataType, DevAttDataFetcher};
use super::failsafe::FailSafe;
// Node Operational Credentials Cluster
#[derive(Clone, Copy)]
#[allow(dead_code)]
enum NocStatus {
Ok = 0,
InvalidPublicKey = 1,
InvalidNodeOpId = 2,
InvalidNOC = 3,
MissingCsr = 4,
TableFull = 5,
MissingAcl = 6,
MissingIpk = 7,
InsufficientPrivlege = 8,
FabricConflict = 9,
LabelConflict = 10,
InvalidFabricIndex = 11,
}
// Some placeholder value for now
const MAX_CERT_DECLARATION_LEN: usize = 600;
// Some placeholder value for now
const MAX_CSR_LEN: usize = 300;
// As defined in the Matter Spec
const RESP_MAX: usize = 900;
pub const ID: u32 = 0x003E;
#[derive(FromPrimitive)]
pub enum Commands {
AttReq = 0x00,
AttReqResp = 0x01,
CertChainReq = 0x02,
CertChainResp = 0x03,
CSRReq = 0x04,
CSRResp = 0x05,
AddNOC = 0x06,
NOCResp = 0x08,
UpdateFabricLabel = 0x09,
RemoveFabric = 0x0a,
AddTrustedRootCert = 0x0b,
}
#[derive(FromPrimitive)]
pub enum Attributes {
NOCs = 0,
Fabrics = 1,
SupportedFabrics = 2,
CommissionedFabrics = 3,
TrustedRootCerts = 4,
CurrentFabricIndex = 5,
}
pub struct NocCluster {
base: Cluster,
dev_att: Box<dyn DevAttDataFetcher>,
fabric_mgr: Arc<FabricMgr>,
acl_mgr: Arc<AclMgr>,
failsafe: Arc<FailSafe>,
}
struct NocData {
pub key_pair: KeyPair,
pub root_ca: Cert,
}
impl NocData {
pub fn new(key_pair: KeyPair) -> Self {
Self {
key_pair,
root_ca: Cert::default(),
}
}
}
impl NocCluster {
pub fn new(
dev_att: Box<dyn DevAttDataFetcher>,
fabric_mgr: Arc<FabricMgr>,
acl_mgr: Arc<AclMgr>,
failsafe: Arc<FailSafe>,
) -> Result<Box<Self>, Error> {
let mut c = Box::new(Self {
dev_att,
fabric_mgr,
acl_mgr,
failsafe,
base: Cluster::new(ID)?,
});
let attrs = [
Attribute::new(
Attributes::CurrentFabricIndex as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
),
Attribute::new(
Attributes::Fabrics as u16,
AttrValue::Custom,
Access::RV | Access::FAB_SCOPED,
Quality::NONE,
),
Attribute::new(
Attributes::SupportedFabrics as u16,
AttrValue::Uint8(MAX_SUPPORTED_FABRICS as u8),
Access::RV,
Quality::FIXED,
),
Attribute::new(
Attributes::CommissionedFabrics as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
),
];
c.base.add_attributes(&attrs[..])?;
Ok(c)
}
fn add_acl(&self, fab_idx: u8, admin_subject: u64) -> Result<(), Error> {
let mut acl = AclEntry::new(fab_idx, Privilege::ADMIN, AuthMode::Case);
acl.add_subject(admin_subject)?;
self.acl_mgr.add(acl)
}
fn _handle_command_addnoc(&mut self, cmd_req: &mut CommandReq) -> Result<(), NocStatus> {
let noc_data = cmd_req
.trans
.session
.take_data::<NocData>()
.ok_or(NocStatus::MissingCsr)?;
if !self
.failsafe
.allow_noc_change()
.map_err(|_| NocStatus::InsufficientPrivlege)?
{
error!("AddNOC not allowed by Fail Safe");
return Err(NocStatus::InsufficientPrivlege);
}
// This command's processing may take longer, send a stand alone ACK to the peer to avoid any retranmissions
let ack_send = secure_channel::common::send_mrp_standalone_ack(
cmd_req.trans.exch,
cmd_req.trans.session,
);
if ack_send.is_err() {
error!("Error sending Standalone ACK, falling back to piggybacked ACK");
}
let r = AddNocReq::from_tlv(&cmd_req.data).map_err(|_| NocStatus::InvalidNOC)?;
let noc_value = Cert::new(r.noc_value.0).map_err(|_| NocStatus::InvalidNOC)?;
info!("Received NOC as: {}", noc_value);
let icac_value = if !r.icac_value.0.is_empty() {
let cert = Cert::new(r.icac_value.0).map_err(|_| NocStatus::InvalidNOC)?;
info!("Received ICAC as: {}", cert);
Some(cert)
} else {
None
};
let fabric = Fabric::new(
noc_data.key_pair,
noc_data.root_ca,
icac_value,
noc_value,
r.ipk_value.0,
r.vendor_id,
)
.map_err(|_| NocStatus::TableFull)?;
let fab_idx = self
.fabric_mgr
.add(fabric)
.map_err(|_| NocStatus::TableFull)?;
if self.add_acl(fab_idx, r.case_admin_subject).is_err() {
error!("Failed to add ACL, what to do?");
}
if self.failsafe.record_add_noc(fab_idx).is_err() {
error!("Failed to record NoC in the FailSafe, what to do?");
}
NocCluster::create_nocresponse(cmd_req.resp, NocStatus::Ok, fab_idx, "".to_owned());
cmd_req.trans.complete();
Ok(())
}
fn create_nocresponse(
tw: &mut TLVWriter,
status_code: NocStatus,
fab_idx: u8,
debug_txt: String,
) {
let cmd_data = NocResp {
status_code: status_code as u8,
fab_idx,
debug_txt,
};
let invoke_resp = ib::InvResp::cmd_new(
0,
ID,
Commands::NOCResp as u16,
EncodeValue::Value(&cmd_data),
);
let _ = invoke_resp.to_tlv(tw, TagType::Anonymous);
}
fn handle_command_updatefablabel(
&mut self,
cmd_req: &mut CommandReq,
) -> Result<(), IMStatusCode> {
cmd_enter!("Update Fabric Label");
let req = UpdateFabricLabelReq::from_tlv(&cmd_req.data)
.map_err(|_| IMStatusCode::InvalidDataType)?;
let label = req
.label
.to_string()
.map_err(|_| IMStatusCode::InvalidDataType)?;
let (result, fab_idx) =
if let SessionMode::Case(c) = cmd_req.trans.session.get_session_mode() {
if self.fabric_mgr.set_label(c.fab_idx, label).is_err() {
(NocStatus::LabelConflict, c.fab_idx)
} else {
(NocStatus::Ok, c.fab_idx)
}
} else {
// Update Fabric Label not allowed
(NocStatus::InvalidFabricIndex, 0)
};
NocCluster::create_nocresponse(cmd_req.resp, result, fab_idx, "".to_string());
cmd_req.trans.complete();
Ok(())
}
fn handle_command_rmfabric(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
cmd_enter!("Remove Fabric");
let req =
RemoveFabricReq::from_tlv(&cmd_req.data).map_err(|_| IMStatusCode::InvalidCommand)?;
if self.fabric_mgr.remove(req.fab_idx).is_ok() {
let _ = self.acl_mgr.delete_for_fabric(req.fab_idx);
cmd_req.trans.terminate();
} else {
NocCluster::create_nocresponse(
cmd_req.resp,
NocStatus::InvalidFabricIndex,
req.fab_idx,
"".to_string(),
);
}
Ok(())
}
fn handle_command_addnoc(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
cmd_enter!("AddNOC");
if let Err(e) = self._handle_command_addnoc(cmd_req) {
//TODO: Fab-idx 0?
NocCluster::create_nocresponse(cmd_req.resp, e, 0, "".to_owned());
cmd_req.trans.complete();
}
Ok(())
}
fn handle_command_attrequest(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
cmd_enter!("AttestationRequest");
let req = CommonReq::from_tlv(&cmd_req.data).map_err(|_| IMStatusCode::InvalidCommand)?;
info!("Received Attestation Nonce:{:?}", req.str);
let mut attest_challenge = [0u8; crypto::SYMM_KEY_LEN_BYTES];
attest_challenge.copy_from_slice(cmd_req.trans.session.get_att_challenge());
let cmd_data = |tag: TagType, t: &mut TLVWriter| {
let mut buf: [u8; RESP_MAX] = [0; RESP_MAX];
let mut attest_element = WriteBuf::new(&mut buf, RESP_MAX);
let _ = t.start_struct(tag);
let _ =
add_attestation_element(self.dev_att.as_ref(), req.str.0, &mut attest_element, t);
let _ = add_attestation_signature(
self.dev_att.as_ref(),
&mut attest_element,
&attest_challenge,
t,
);
let _ = t.end_container();
};
let resp = ib::InvResp::cmd_new(
0,
ID,
Commands::AttReqResp as u16,
EncodeValue::Closure(&cmd_data),
);
let _ = resp.to_tlv(cmd_req.resp, TagType::Anonymous);
cmd_req.trans.complete();
Ok(())
}
fn handle_command_certchainrequest(
&mut self,
cmd_req: &mut CommandReq,
) -> Result<(), IMStatusCode> {
cmd_enter!("CertChainRequest");
info!("Received data: {}", cmd_req.data);
let cert_type =
get_certchainrequest_params(&cmd_req.data).map_err(|_| IMStatusCode::InvalidCommand)?;
let mut buf: [u8; RESP_MAX] = [0; RESP_MAX];
let len = self
.dev_att
.get_devatt_data(cert_type, &mut buf)
.map_err(|_| IMStatusCode::Failure)?;
let buf = &buf[0..len];
let cmd_data = CertChainResp {
cert: OctetStr::new(buf),
};
let resp = ib::InvResp::cmd_new(
0,
ID,
Commands::CertChainResp as u16,
EncodeValue::Value(&cmd_data),
);
let _ = resp.to_tlv(cmd_req.resp, TagType::Anonymous);
cmd_req.trans.complete();
Ok(())
}
fn handle_command_csrrequest(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
cmd_enter!("CSRRequest");
let req = CommonReq::from_tlv(&cmd_req.data).map_err(|_| IMStatusCode::InvalidCommand)?;
info!("Received CSR Nonce:{:?}", req.str);
if !self.failsafe.is_armed() {
return Err(IMStatusCode::UnsupportedAccess);
}
let noc_keypair = KeyPair::new().map_err(|_| IMStatusCode::Failure)?;
let mut attest_challenge = [0u8; crypto::SYMM_KEY_LEN_BYTES];
attest_challenge.copy_from_slice(cmd_req.trans.session.get_att_challenge());
let cmd_data = |tag: TagType, t: &mut TLVWriter| {
let mut buf: [u8; RESP_MAX] = [0; RESP_MAX];
let mut nocsr_element = WriteBuf::new(&mut buf, RESP_MAX);
let _ = t.start_struct(tag);
let _ = add_nocsrelement(&noc_keypair, req.str.0, &mut nocsr_element, t);
let _ = add_attestation_signature(
self.dev_att.as_ref(),
&mut nocsr_element,
&attest_challenge,
t,
);
let _ = t.end_container();
};
let resp = ib::InvResp::cmd_new(
0,
ID,
Commands::CSRResp as u16,
EncodeValue::Closure(&cmd_data),
);
let _ = resp.to_tlv(cmd_req.resp, TagType::Anonymous);
let noc_data = Box::new(NocData::new(noc_keypair));
// Store this in the session data instead of cluster data, so it gets cleared
// if the session goes away for some reason
cmd_req.trans.session.set_data(noc_data);
cmd_req.trans.complete();
Ok(())
}
fn handle_command_addtrustedrootcert(
&mut self,
cmd_req: &mut CommandReq,
) -> Result<(), IMStatusCode> {
cmd_enter!("AddTrustedRootCert");
if !self.failsafe.is_armed() {
return Err(IMStatusCode::UnsupportedAccess);
}
// This may happen on CASE or PASE. For PASE, the existence of NOC Data is necessary
match cmd_req.trans.session.get_session_mode() {
SessionMode::Case(_) => error!("CASE: AddTrustedRootCert handling pending"), // For a CASE Session, we just return success for now,
SessionMode::Pase => {
let noc_data = cmd_req
.trans
.session
.get_data::<NocData>()
.ok_or(IMStatusCode::Failure)?;
let req =
CommonReq::from_tlv(&cmd_req.data).map_err(|_| IMStatusCode::InvalidCommand)?;
info!("Received Trusted Cert:{:x?}", req.str);
noc_data.root_ca = Cert::new(req.str.0).map_err(|_| IMStatusCode::Failure)?;
}
_ => (),
}
cmd_req.trans.complete();
Err(IMStatusCode::Success)
}
}
impl ClusterType for NocCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
fn handle_command(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
let cmd = cmd_req
.cmd
.path
.leaf
.map(num::FromPrimitive::from_u32)
.ok_or(IMStatusCode::UnsupportedCommand)?
.ok_or(IMStatusCode::UnsupportedCommand)?;
match cmd {
Commands::AddNOC => self.handle_command_addnoc(cmd_req),
Commands::CSRReq => self.handle_command_csrrequest(cmd_req),
Commands::AddTrustedRootCert => self.handle_command_addtrustedrootcert(cmd_req),
Commands::AttReq => self.handle_command_attrequest(cmd_req),
Commands::CertChainReq => self.handle_command_certchainrequest(cmd_req),
Commands::UpdateFabricLabel => self.handle_command_updatefablabel(cmd_req),
Commands::RemoveFabric => self.handle_command_rmfabric(cmd_req),
_ => Err(IMStatusCode::UnsupportedCommand),
}
}
fn read_custom_attribute(&self, encoder: &mut dyn Encoder, attr: &AttrDetails) {
match num::FromPrimitive::from_u16(attr.attr_id) {
Some(Attributes::CurrentFabricIndex) => {
encoder.encode(EncodeValue::Value(&attr.fab_idx))
}
Some(Attributes::Fabrics) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
let _ = tw.start_array(tag);
let _ = self.fabric_mgr.for_each(|entry, fab_idx| {
if !attr.fab_filter || attr.fab_idx == fab_idx {
let _ = entry
.get_fabric_desc(fab_idx)
.to_tlv(tw, TagType::Anonymous);
}
});
let _ = tw.end_container();
})),
Some(Attributes::CommissionedFabrics) => {
let count = self.fabric_mgr.used_count() as u8;
encoder.encode(EncodeValue::Value(&count))
}
_ => {
error!("Attribute not supported: this shouldn't happen");
}
}
}
}
fn add_attestation_element(
dev_att: &dyn DevAttDataFetcher,
att_nonce: &[u8],
write_buf: &mut WriteBuf,
t: &mut TLVWriter,
) -> Result<(), Error> {
let mut cert_dec: [u8; MAX_CERT_DECLARATION_LEN] = [0; MAX_CERT_DECLARATION_LEN];
let len = dev_att.get_devatt_data(dev_att::DataType::CertDeclaration, &mut cert_dec)?;
let cert_dec = &cert_dec[0..len];
let epoch = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() as u32;
let mut writer = TLVWriter::new(write_buf);
writer.start_struct(TagType::Anonymous)?;
writer.str16(TagType::Context(1), cert_dec)?;
writer.str8(TagType::Context(2), att_nonce)?;
writer.u32(TagType::Context(3), epoch)?;
writer.end_container()?;
t.str16(TagType::Context(0), write_buf.as_borrow_slice())?;
Ok(())
}
fn add_attestation_signature(
dev_att: &dyn DevAttDataFetcher,
attest_element: &mut WriteBuf,
attest_challenge: &[u8],
resp: &mut TLVWriter,
) -> Result<(), Error> {
let dac_key = {
let mut pubkey = [0_u8; crypto::EC_POINT_LEN_BYTES];
let mut privkey = [0_u8; crypto::BIGNUM_LEN_BYTES];
dev_att.get_devatt_data(dev_att::DataType::DACPubKey, &mut pubkey)?;
dev_att.get_devatt_data(dev_att::DataType::DACPrivKey, &mut privkey)?;
KeyPair::new_from_components(&pubkey, &privkey)
}?;
attest_element.copy_from_slice(attest_challenge)?;
let mut signature = [0u8; crypto::EC_SIGNATURE_LEN_BYTES];
dac_key.sign_msg(attest_element.as_borrow_slice(), &mut signature)?;
resp.str8(TagType::Context(1), &signature)
}
fn add_nocsrelement(
noc_keypair: &KeyPair,
csr_nonce: &[u8],
write_buf: &mut WriteBuf,
resp: &mut TLVWriter,
) -> Result<(), Error> {
let mut csr: [u8; MAX_CSR_LEN] = [0; MAX_CSR_LEN];
let csr = noc_keypair.get_csr(&mut csr)?;
let mut writer = TLVWriter::new(write_buf);
writer.start_struct(TagType::Anonymous)?;
writer.str8(TagType::Context(1), csr)?;
writer.str8(TagType::Context(2), csr_nonce)?;
writer.end_container()?;
resp.str8(TagType::Context(0), write_buf.as_borrow_slice())?;
Ok(())
}
#[derive(ToTLV)]
struct CertChainResp<'a> {
cert: OctetStr<'a>,
}
#[derive(ToTLV)]
struct NocResp {
status_code: u8,
fab_idx: u8,
debug_txt: String,
}
#[derive(FromTLV)]
#[tlvargs(lifetime = "'a")]
struct AddNocReq<'a> {
noc_value: OctetStr<'a>,
icac_value: OctetStr<'a>,
ipk_value: OctetStr<'a>,
case_admin_subject: u64,
vendor_id: u16,
}
#[derive(FromTLV)]
#[tlvargs(lifetime = "'a")]
struct CommonReq<'a> {
str: OctetStr<'a>,
}
#[derive(FromTLV)]
#[tlvargs(lifetime = "'a")]
struct UpdateFabricLabelReq<'a> {
label: UtfStr<'a>,
}
#[derive(FromTLV)]
struct CertChainReq {
cert_type: u8,
}
#[derive(FromTLV)]
struct RemoveFabricReq {
fab_idx: u8,
}
fn get_certchainrequest_params(data: &TLVElement) -> Result<DataType, Error> {
let cert_type = CertChainReq::from_tlv(data)?.cert_type;
const CERT_TYPE_DAC: u8 = 1;
const CERT_TYPE_PAI: u8 = 2;
info!("Received Cert Type:{:?}", cert_type);
match cert_type {
CERT_TYPE_DAC => Ok(dev_att::DataType::DAC),
CERT_TYPE_PAI => Ok(dev_att::DataType::PAI),
_ => Err(Error::Invalid),
}
}

View file

@ -1,53 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
data_model::objects::{Cluster, ClusterType},
error::Error,
};
pub const ID: u32 = 0x0031;
pub struct NwCommCluster {
base: Cluster,
}
impl ClusterType for NwCommCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
}
enum FeatureMap {
_Wifi = 0x01,
_Thread = 0x02,
Ethernet = 0x04,
}
impl NwCommCluster {
pub fn new() -> Result<Box<Self>, Error> {
let mut c = Box::new(Self {
base: Cluster::new(ID)?,
});
// TODO: Arch-Specific
c.base.set_feature_map(FeatureMap::Ethernet as u32)?;
Ok(c)
}
}

View file

@ -1,393 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::Arc;
use num_derive::FromPrimitive;
use crate::acl::{self, AclEntry, AclMgr};
use crate::data_model::objects::*;
use crate::error::*;
use crate::interaction_model::core::IMStatusCode;
use crate::interaction_model::messages::ib::{attr_list_write, ListOperation};
use crate::tlv::{FromTLV, TLVElement, TagType, ToTLV};
use log::{error, info};
pub const ID: u32 = 0x001F;
#[derive(FromPrimitive)]
pub enum Attributes {
Acl = 0,
Extension = 1,
SubjectsPerEntry = 2,
TargetsPerEntry = 3,
EntriesPerFabric = 4,
}
pub struct AccessControlCluster {
base: Cluster,
acl_mgr: Arc<AclMgr>,
}
impl AccessControlCluster {
pub fn new(acl_mgr: Arc<AclMgr>) -> Result<Box<Self>, Error> {
let mut c = Box::new(AccessControlCluster {
base: Cluster::new(ID)?,
acl_mgr,
});
c.base.add_attribute(attr_acl_new())?;
c.base.add_attribute(attr_extension_new())?;
c.base.add_attribute(attr_subjects_per_entry_new())?;
c.base.add_attribute(attr_targets_per_entry_new())?;
c.base.add_attribute(attr_entries_per_fabric_new())?;
Ok(c)
}
/// Write the ACL Attribute
///
/// This takes care of 4 things, add item, edit item, delete item, delete list.
/// Care about fabric-scoped behaviour is taken
fn write_acl_attr(
&mut self,
op: &ListOperation,
data: &TLVElement,
fab_idx: u8,
) -> Result<(), IMStatusCode> {
info!("Performing ACL operation {:?}", op);
let result = match op {
ListOperation::AddItem | ListOperation::EditItem(_) => {
let mut acl_entry =
AclEntry::from_tlv(data).map_err(|_| IMStatusCode::ConstraintError)?;
info!("ACL {:?}", acl_entry);
// Overwrite the fabric index with our accessing fabric index
acl_entry.fab_idx = Some(fab_idx);
if let ListOperation::EditItem(index) = op {
self.acl_mgr.edit(*index as u8, fab_idx, acl_entry)
} else {
self.acl_mgr.add(acl_entry)
}
}
ListOperation::DeleteItem(index) => self.acl_mgr.delete(*index as u8, fab_idx),
ListOperation::DeleteList => self.acl_mgr.delete_for_fabric(fab_idx),
};
match result {
Ok(_) => Ok(()),
Err(Error::NoSpace) => Err(IMStatusCode::ResourceExhausted),
_ => Err(IMStatusCode::ConstraintError),
}
}
}
impl ClusterType for AccessControlCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
fn read_custom_attribute(&self, encoder: &mut dyn Encoder, attr: &AttrDetails) {
match num::FromPrimitive::from_u16(attr.attr_id) {
Some(Attributes::Acl) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
let _ = tw.start_array(tag);
let _ = self.acl_mgr.for_each_acl(|entry| {
if !attr.fab_filter || Some(attr.fab_idx) == entry.fab_idx {
let _ = entry.to_tlv(tw, TagType::Anonymous);
}
});
let _ = tw.end_container();
})),
Some(Attributes::Extension) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
// Empty for now
let _ = tw.start_array(tag);
let _ = tw.end_container();
})),
_ => {
error!("Attribute not yet supported: this shouldn't happen");
}
}
}
fn write_attribute(
&mut self,
attr: &AttrDetails,
data: &TLVElement,
) -> Result<(), IMStatusCode> {
let result = if let Some(Attributes::Acl) = num::FromPrimitive::from_u16(attr.attr_id) {
attr_list_write(attr, data, |op, data| {
self.write_acl_attr(&op, data, attr.fab_idx)
})
} else {
error!("Attribute not yet supported: this shouldn't happen");
Err(IMStatusCode::NotFound)
};
if result.is_ok() {
self.base.cluster_changed();
}
result
}
}
fn attr_acl_new() -> Attribute {
Attribute::new(
Attributes::Acl as u16,
AttrValue::Custom,
Access::RWFA,
Quality::NONE,
)
}
fn attr_extension_new() -> Attribute {
Attribute::new(
Attributes::Extension as u16,
AttrValue::Custom,
Access::RWFA,
Quality::NONE,
)
}
fn attr_subjects_per_entry_new() -> Attribute {
Attribute::new(
Attributes::SubjectsPerEntry as u16,
AttrValue::Uint16(acl::SUBJECTS_PER_ENTRY as u16),
Access::RV,
Quality::FIXED,
)
}
fn attr_targets_per_entry_new() -> Attribute {
Attribute::new(
Attributes::TargetsPerEntry as u16,
AttrValue::Uint16(acl::TARGETS_PER_ENTRY as u16),
Access::RV,
Quality::FIXED,
)
}
fn attr_entries_per_fabric_new() -> Attribute {
Attribute::new(
Attributes::EntriesPerFabric as u16,
AttrValue::Uint16(acl::ENTRIES_PER_FABRIC as u16),
Access::RV,
Quality::FIXED,
)
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use crate::{
acl::{AclEntry, AclMgr, AuthMode},
data_model::{
core::read::AttrReadEncoder,
objects::{AttrDetails, ClusterType, Privilege},
},
interaction_model::messages::ib::ListOperation,
tlv::{get_root_node_struct, ElementType, TLVElement, TLVWriter, TagType, ToTLV},
utils::writebuf::WriteBuf,
};
use super::AccessControlCluster;
#[test]
/// Add an ACL entry
fn acl_cluster_add() {
let mut buf: [u8; 100] = [0; 100];
let buf_len = buf.len();
let mut writebuf = WriteBuf::new(&mut buf, buf_len);
let mut tw = TLVWriter::new(&mut writebuf);
let acl_mgr = Arc::new(AclMgr::new_with(false).unwrap());
let mut acl = AccessControlCluster::new(acl_mgr.clone()).unwrap();
let new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.to_tlv(&mut tw, TagType::Anonymous).unwrap();
let data = get_root_node_struct(writebuf.as_borrow_slice()).unwrap();
// Test, ACL has fabric index 2, but the accessing fabric is 1
// the fabric index in the TLV should be ignored and the ACL should be created with entry 1
let result = acl.write_acl_attr(&ListOperation::AddItem, &data, 1);
assert_eq!(result, Ok(()));
let verifier = AclEntry::new(1, Privilege::VIEW, AuthMode::Case);
acl_mgr
.for_each_acl(|a| {
assert_eq!(*a, verifier);
})
.unwrap();
}
#[test]
/// - The listindex used for edit should be relative to the current fabric
fn acl_cluster_edit() {
let mut buf: [u8; 100] = [0; 100];
let buf_len = buf.len();
let mut writebuf = WriteBuf::new(&mut buf, buf_len);
let mut tw = TLVWriter::new(&mut writebuf);
// Add 3 ACLs, belonging to fabric index 2, 1 and 2, in that order
let acl_mgr = Arc::new(AclMgr::new_with(false).unwrap());
let mut verifier = [
AclEntry::new(2, Privilege::VIEW, AuthMode::Case),
AclEntry::new(1, Privilege::VIEW, AuthMode::Case),
AclEntry::new(2, Privilege::ADMIN, AuthMode::Case),
];
for i in verifier {
acl_mgr.add(i).unwrap();
}
let mut acl = AccessControlCluster::new(acl_mgr.clone()).unwrap();
let new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.to_tlv(&mut tw, TagType::Anonymous).unwrap();
let data = get_root_node_struct(writebuf.as_borrow_slice()).unwrap();
// Test, Edit Fabric 2's index 1 - with accessing fabring as 2 - allow
let result = acl.write_acl_attr(&ListOperation::EditItem(1), &data, 2);
// Fabric 2's index 1, is actually our index 2, update the verifier
verifier[2] = new;
assert_eq!(result, Ok(()));
// Also validate in the acl_mgr that the entries are in the right order
let mut index = 0;
acl_mgr
.for_each_acl(|a| {
assert_eq!(*a, verifier[index]);
index += 1;
})
.unwrap();
}
#[test]
/// - The listindex used for delete should be relative to the current fabric
fn acl_cluster_delete() {
// Add 3 ACLs, belonging to fabric index 2, 1 and 2, in that order
let acl_mgr = Arc::new(AclMgr::new_with(false).unwrap());
let input = [
AclEntry::new(2, Privilege::VIEW, AuthMode::Case),
AclEntry::new(1, Privilege::VIEW, AuthMode::Case),
AclEntry::new(2, Privilege::ADMIN, AuthMode::Case),
];
for i in input {
acl_mgr.add(i).unwrap();
}
let mut acl = AccessControlCluster::new(acl_mgr.clone()).unwrap();
// data is don't-care actually
let data = TLVElement::new(TagType::Anonymous, ElementType::True);
// Test , Delete Fabric 1's index 0
let result = acl.write_acl_attr(&ListOperation::DeleteItem(0), &data, 1);
assert_eq!(result, Ok(()));
let verifier = [input[0], input[2]];
// Also validate in the acl_mgr that the entries are in the right order
let mut index = 0;
acl_mgr
.for_each_acl(|a| {
assert_eq!(*a, verifier[index]);
index += 1;
})
.unwrap();
}
#[test]
/// - acl read with and without fabric filtering
fn acl_cluster_read() {
let mut buf: [u8; 100] = [0; 100];
let buf_len = buf.len();
let mut writebuf = WriteBuf::new(&mut buf, buf_len);
// Add 3 ACLs, belonging to fabric index 2, 1 and 2, in that order
let acl_mgr = Arc::new(AclMgr::new_with(false).unwrap());
let input = [
AclEntry::new(2, Privilege::VIEW, AuthMode::Case),
AclEntry::new(1, Privilege::VIEW, AuthMode::Case),
AclEntry::new(2, Privilege::ADMIN, AuthMode::Case),
];
for i in input {
acl_mgr.add(i).unwrap();
}
let acl = AccessControlCluster::new(acl_mgr).unwrap();
// Test 1, all 3 entries are read in the response without fabric filtering
{
let mut tw = TLVWriter::new(&mut writebuf);
let mut encoder = AttrReadEncoder::new(&mut tw);
let attr_details = AttrDetails {
attr_id: 0,
list_index: None,
fab_idx: 1,
fab_filter: false,
};
acl.read_custom_attribute(&mut encoder, &attr_details);
assert_eq!(
&[
21, 53, 1, 36, 0, 0, 55, 1, 24, 54, 2, 21, 36, 1, 1, 36, 2, 2, 54, 3, 24, 54,
4, 24, 36, 254, 2, 24, 21, 36, 1, 1, 36, 2, 2, 54, 3, 24, 54, 4, 24, 36, 254,
1, 24, 21, 36, 1, 5, 36, 2, 2, 54, 3, 24, 54, 4, 24, 36, 254, 2, 24, 24, 24,
24
],
writebuf.as_borrow_slice()
);
}
writebuf.reset(0);
// Test 2, only single entry is read in the response with fabric filtering and fabric idx 1
{
let mut tw = TLVWriter::new(&mut writebuf);
let mut encoder = AttrReadEncoder::new(&mut tw);
let attr_details = AttrDetails {
attr_id: 0,
list_index: None,
fab_idx: 1,
fab_filter: true,
};
acl.read_custom_attribute(&mut encoder, &attr_details);
assert_eq!(
&[
21, 53, 1, 36, 0, 0, 55, 1, 24, 54, 2, 21, 36, 1, 1, 36, 2, 2, 54, 3, 24, 54,
4, 24, 36, 254, 1, 24, 24, 24, 24
],
writebuf.as_borrow_slice()
);
}
writebuf.reset(0);
// Test 3, only single entry is read in the response with fabric filtering and fabric idx 2
{
let mut tw = TLVWriter::new(&mut writebuf);
let mut encoder = AttrReadEncoder::new(&mut tw);
let attr_details = AttrDetails {
attr_id: 0,
list_index: None,
fab_idx: 2,
fab_filter: true,
};
acl.read_custom_attribute(&mut encoder, &attr_details);
assert_eq!(
&[
21, 53, 1, 36, 0, 0, 55, 1, 24, 54, 2, 21, 36, 1, 1, 36, 2, 2, 54, 3, 24, 54,
4, 24, 36, 254, 2, 24, 21, 36, 1, 5, 36, 2, 2, 54, 3, 24, 54, 4, 24, 36, 254,
2, 24, 24, 24, 24
],
writebuf.as_borrow_slice()
);
}
}
}

View file

@ -1,168 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use num_derive::FromPrimitive;
use crate::data_model::core::DataModel;
use crate::data_model::objects::*;
use crate::error::*;
use crate::interaction_model::messages::GenericPath;
use crate::tlv::{TLVWriter, TagType, ToTLV};
use log::error;
pub const ID: u32 = 0x001D;
#[derive(FromPrimitive)]
#[allow(clippy::enum_variant_names)]
pub enum Attributes {
DeviceTypeList = 0,
ServerList = 1,
ClientList = 2,
PartsList = 3,
}
pub struct DescriptorCluster {
base: Cluster,
endpoint_id: EndptId,
data_model: DataModel,
}
impl DescriptorCluster {
pub fn new(endpoint_id: EndptId, data_model: DataModel) -> Result<Box<Self>, Error> {
let mut c = Box::new(DescriptorCluster {
endpoint_id,
data_model,
base: Cluster::new(ID)?,
});
let attrs = [
Attribute::new(
Attributes::DeviceTypeList as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
),
Attribute::new(
Attributes::ServerList as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
),
Attribute::new(
Attributes::PartsList as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
),
Attribute::new(
Attributes::ClientList as u16,
AttrValue::Custom,
Access::RV,
Quality::NONE,
),
];
c.base.add_attributes(&attrs[..])?;
Ok(c)
}
fn encode_devtype_list(&self, tag: TagType, tw: &mut TLVWriter) {
let path = GenericPath {
endpoint: Some(self.endpoint_id),
cluster: None,
leaf: None,
};
let _ = tw.start_array(tag);
let dm = self.data_model.node.read().unwrap();
let _ = dm.for_each_endpoint(&path, |_, e| {
let dev_type = e.get_dev_type();
let _ = dev_type.to_tlv(tw, TagType::Anonymous);
Ok(())
});
let _ = tw.end_container();
}
fn encode_server_list(&self, tag: TagType, tw: &mut TLVWriter) {
let path = GenericPath {
endpoint: Some(self.endpoint_id),
cluster: None,
leaf: None,
};
let _ = tw.start_array(tag);
let dm = self.data_model.node.read().unwrap();
let _ = dm.for_each_cluster(&path, |_current_path, c| {
let _ = tw.u32(TagType::Anonymous, c.base().id());
Ok(())
});
let _ = tw.end_container();
}
fn encode_parts_list(&self, tag: TagType, tw: &mut TLVWriter) {
let path = GenericPath {
endpoint: None,
cluster: None,
leaf: None,
};
let _ = tw.start_array(tag);
if self.endpoint_id == 0 {
// TODO: If endpoint is another than 0, need to figure out what to do
let dm = self.data_model.node.read().unwrap();
let _ = dm.for_each_endpoint(&path, |current_path, _| {
if let Some(endpoint_id) = current_path.endpoint {
if endpoint_id != 0 {
let _ = tw.u16(TagType::Anonymous, endpoint_id);
}
}
Ok(())
});
}
let _ = tw.end_container();
}
fn encode_client_list(&self, tag: TagType, tw: &mut TLVWriter) {
// No Clients supported
let _ = tw.start_array(tag);
let _ = tw.end_container();
}
}
impl ClusterType for DescriptorCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
fn read_custom_attribute(&self, encoder: &mut dyn Encoder, attr: &AttrDetails) {
match num::FromPrimitive::from_u16(attr.attr_id) {
Some(Attributes::DeviceTypeList) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
self.encode_devtype_list(tag, tw)
})),
Some(Attributes::ServerList) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
self.encode_server_list(tag, tw)
})),
Some(Attributes::PartsList) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
self.encode_parts_list(tag, tw)
})),
Some(Attributes::ClientList) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
self.encode_client_list(tag, tw)
})),
_ => {
error!("Attribute not supported: this shouldn't happen");
}
}
}
}

View file

@ -1,148 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::{
array::TryFromSliceError, fmt, string::FromUtf8Error, sync::PoisonError, time::SystemTimeError,
};
use async_channel::{SendError, TryRecvError};
use log::error;
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Error {
AttributeNotFound,
AttributeIsCustom,
BufferTooSmall,
ClusterNotFound,
CommandNotFound,
Duplicate,
EndpointNotFound,
Crypto,
TLSStack,
MdnsError,
Network,
NoCommand,
NoEndpoint,
NoExchange,
NoFabricId,
NoHandler,
NoNetworkInterface,
NoNodeId,
NoMemory,
NoSession,
NoSpace,
NoSpaceAckTable,
NoSpaceRetransTable,
NoTagFound,
NotFound,
PacketPoolExhaust,
StdIoError,
SysTimeFail,
Invalid,
InvalidAAD,
InvalidData,
InvalidKeyLength,
InvalidOpcode,
InvalidPeerAddr,
// Invalid Auth Key in the Matter Certificate
InvalidAuthKey,
InvalidSignature,
InvalidState,
InvalidTime,
InvalidArgument,
RwLock,
TLVNotFound,
TLVTypeMismatch,
TruncatedPacket,
Utf8Fail,
}
impl From<std::io::Error> for Error {
fn from(_e: std::io::Error) -> Self {
// Keep things simple for now
Self::StdIoError
}
}
impl<T> From<PoisonError<T>> for Error {
fn from(_e: PoisonError<T>) -> Self {
Self::RwLock
}
}
#[cfg(feature = "crypto_openssl")]
impl From<openssl::error::ErrorStack> for Error {
fn from(e: openssl::error::ErrorStack) -> Self {
error!("Error in TLS: {}", e);
Self::TLSStack
}
}
#[cfg(feature = "crypto_mbedtls")]
impl From<mbedtls::Error> for Error {
fn from(e: mbedtls::Error) -> Self {
error!("Error in TLS: {}", e);
Self::TLSStack
}
}
#[cfg(feature = "crypto_rustcrypto")]
impl From<ccm::aead::Error> for Error {
fn from(_e: ccm::aead::Error) -> Self {
Self::Crypto
}
}
impl From<SystemTimeError> for Error {
fn from(_e: SystemTimeError) -> Self {
Self::SysTimeFail
}
}
impl From<TryFromSliceError> for Error {
fn from(_e: TryFromSliceError) -> Self {
Self::Invalid
}
}
impl<T> From<SendError<T>> for Error {
fn from(e: SendError<T>) -> Self {
error!("Error in channel send {}", e);
Self::Invalid
}
}
impl From<FromUtf8Error> for Error {
fn from(_e: FromUtf8Error) -> Self {
Self::Utf8Fail
}
}
impl From<TryRecvError> for Error {
fn from(e: TryRecvError) -> Self {
error!("Error in channel try_recv {}", e);
Self::Invalid
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl std::error::Error for Error {}

View file

@ -1,439 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::{Arc, Mutex, MutexGuard, RwLock};
use byteorder::{BigEndian, ByteOrder, LittleEndian};
use log::{error, info};
use owning_ref::RwLockReadGuardRef;
use crate::{
cert::Cert,
crypto::{self, crypto_dummy::KeyPairDummy, hkdf_sha256, CryptoKeyPair, HmacSha256, KeyPair},
error::Error,
group_keys::KeySet,
mdns::{self, Mdns},
sys::{Psm, SysMdnsService},
tlv::{OctetStr, TLVWriter, TagType, ToTLV, UtfStr},
};
const MAX_CERT_TLV_LEN: usize = 350;
const COMPRESSED_FABRIC_ID_LEN: usize = 8;
macro_rules! fb_key {
($index:ident, $key:ident) => {
&format!("fb{}{}", $index, $key)
};
}
const ST_VID: &str = "vid";
const ST_RCA: &str = "rca";
const ST_ICA: &str = "ica";
const ST_NOC: &str = "noc";
const ST_IPK: &str = "ipk";
const ST_LBL: &str = "label";
const ST_PBKEY: &str = "pubkey";
const ST_PRKEY: &str = "privkey";
#[allow(dead_code)]
pub struct Fabric {
node_id: u64,
fabric_id: u64,
vendor_id: u16,
key_pair: Box<dyn CryptoKeyPair>,
pub root_ca: Cert,
pub icac: Option<Cert>,
pub noc: Cert,
pub ipk: KeySet,
label: String,
compressed_id: [u8; COMPRESSED_FABRIC_ID_LEN],
mdns_service: Option<SysMdnsService>,
}
#[derive(ToTLV)]
#[tlvargs(lifetime = "'a", start = 1)]
pub struct FabricDescriptor<'a> {
root_public_key: OctetStr<'a>,
vendor_id: u16,
fabric_id: u64,
node_id: u64,
label: UtfStr<'a>,
// TODO: Instead of the direct value, we should consider GlobalElements::FabricIndex
#[tagval(0xFE)]
pub fab_idx: Option<u8>,
}
impl Fabric {
pub fn new(
key_pair: KeyPair,
root_ca: Cert,
icac: Option<Cert>,
noc: Cert,
ipk: &[u8],
vendor_id: u16,
) -> Result<Self, Error> {
let node_id = noc.get_node_id()?;
let fabric_id = noc.get_fabric_id()?;
let mut f = Self {
node_id,
fabric_id,
vendor_id,
key_pair: Box::new(key_pair),
root_ca,
icac,
noc,
ipk: KeySet::default(),
compressed_id: [0; COMPRESSED_FABRIC_ID_LEN],
label: "".into(),
mdns_service: None,
};
Fabric::get_compressed_id(f.root_ca.get_pubkey(), fabric_id, &mut f.compressed_id)?;
f.ipk = KeySet::new(ipk, &f.compressed_id)?;
let mut mdns_service_name = String::with_capacity(33);
for c in f.compressed_id {
mdns_service_name.push_str(&format!("{:02X}", c));
}
mdns_service_name.push('-');
let mut node_id_be: [u8; 8] = [0; 8];
BigEndian::write_u64(&mut node_id_be, node_id);
for c in node_id_be {
mdns_service_name.push_str(&format!("{:02X}", c));
}
info!("MDNS Service Name: {}", mdns_service_name);
f.mdns_service = Some(
Mdns::get()?.publish_service(&mdns_service_name, mdns::ServiceMode::Commissioned)?,
);
Ok(f)
}
pub fn dummy() -> Result<Self, Error> {
Ok(Self {
node_id: 0,
fabric_id: 0,
vendor_id: 0,
key_pair: Box::new(KeyPairDummy::new()?),
root_ca: Cert::default(),
icac: Some(Cert::default()),
noc: Cert::default(),
ipk: KeySet::default(),
label: "".into(),
compressed_id: [0; COMPRESSED_FABRIC_ID_LEN],
mdns_service: None,
})
}
fn get_compressed_id(root_pubkey: &[u8], fabric_id: u64, out: &mut [u8]) -> Result<(), Error> {
let root_pubkey = &root_pubkey[1..];
let mut fabric_id_be: [u8; 8] = [0; 8];
BigEndian::write_u64(&mut fabric_id_be, fabric_id);
const COMPRESSED_FABRIC_ID_INFO: [u8; 16] = [
0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x46, 0x61, 0x62, 0x72,
0x69, 0x63,
];
hkdf_sha256(&fabric_id_be, root_pubkey, &COMPRESSED_FABRIC_ID_INFO, out)
.map_err(|_| Error::NoSpace)
}
pub fn match_dest_id(&self, random: &[u8], target: &[u8]) -> Result<(), Error> {
let mut mac = HmacSha256::new(self.ipk.op_key())?;
mac.update(random)?;
mac.update(self.root_ca.get_pubkey())?;
let mut buf: [u8; 8] = [0; 8];
LittleEndian::write_u64(&mut buf, self.fabric_id);
mac.update(&buf)?;
LittleEndian::write_u64(&mut buf, self.node_id);
mac.update(&buf)?;
let mut id = [0_u8; crypto::SHA256_HASH_LEN_BYTES];
mac.finish(&mut id)?;
if id.as_slice() == target {
Ok(())
} else {
Err(Error::NotFound)
}
}
pub fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error> {
self.key_pair.sign_msg(msg, signature)
}
pub fn get_node_id(&self) -> u64 {
self.node_id
}
pub fn get_fabric_id(&self) -> u64 {
self.fabric_id
}
pub fn get_fabric_desc(&self, fab_idx: u8) -> FabricDescriptor {
FabricDescriptor {
root_public_key: OctetStr::new(self.root_ca.get_pubkey()),
vendor_id: self.vendor_id,
fabric_id: self.fabric_id,
node_id: self.node_id,
label: UtfStr(self.label.as_bytes()),
fab_idx: Some(fab_idx),
}
}
fn rm_store(&self, index: usize, psm: &MutexGuard<Psm>) {
psm.rm(fb_key!(index, ST_RCA));
psm.rm(fb_key!(index, ST_ICA));
psm.rm(fb_key!(index, ST_NOC));
psm.rm(fb_key!(index, ST_IPK));
psm.rm(fb_key!(index, ST_LBL));
psm.rm(fb_key!(index, ST_PBKEY));
psm.rm(fb_key!(index, ST_PRKEY));
psm.rm(fb_key!(index, ST_VID));
}
fn store(&self, index: usize, psm: &MutexGuard<Psm>) -> Result<(), Error> {
let mut key = [0u8; MAX_CERT_TLV_LEN];
let len = self.root_ca.as_tlv(&mut key)?;
psm.set_kv_slice(fb_key!(index, ST_RCA), &key[..len])?;
let len = if let Some(icac) = &self.icac {
icac.as_tlv(&mut key)?
} else {
0
};
psm.set_kv_slice(fb_key!(index, ST_ICA), &key[..len])?;
let len = self.noc.as_tlv(&mut key)?;
psm.set_kv_slice(fb_key!(index, ST_NOC), &key[..len])?;
psm.set_kv_slice(fb_key!(index, ST_IPK), self.ipk.epoch_key())?;
psm.set_kv_slice(fb_key!(index, ST_LBL), self.label.as_bytes())?;
let mut key = [0_u8; crypto::EC_POINT_LEN_BYTES];
let len = self.key_pair.get_public_key(&mut key)?;
let key = &key[..len];
psm.set_kv_slice(fb_key!(index, ST_PBKEY), key)?;
let mut key = [0_u8; crypto::BIGNUM_LEN_BYTES];
let len = self.key_pair.get_private_key(&mut key)?;
let key = &key[..len];
psm.set_kv_slice(fb_key!(index, ST_PRKEY), key)?;
psm.set_kv_u64(fb_key!(index, ST_VID), self.vendor_id.into())?;
Ok(())
}
fn load(index: usize, psm: &MutexGuard<Psm>) -> Result<Self, Error> {
let mut root_ca = Vec::new();
psm.get_kv_slice(fb_key!(index, ST_RCA), &mut root_ca)?;
let root_ca = Cert::new(root_ca.as_slice())?;
let mut icac = Vec::new();
psm.get_kv_slice(fb_key!(index, ST_ICA), &mut icac)?;
let icac = if !icac.is_empty() {
Some(Cert::new(icac.as_slice())?)
} else {
None
};
let mut noc = Vec::new();
psm.get_kv_slice(fb_key!(index, ST_NOC), &mut noc)?;
let noc = Cert::new(noc.as_slice())?;
let mut ipk = Vec::new();
psm.get_kv_slice(fb_key!(index, ST_IPK), &mut ipk)?;
let mut label = Vec::new();
psm.get_kv_slice(fb_key!(index, ST_LBL), &mut label)?;
let label = String::from_utf8(label).map_err(|_| {
error!("Couldn't read label");
Error::Invalid
})?;
let mut pub_key = Vec::new();
psm.get_kv_slice(fb_key!(index, ST_PBKEY), &mut pub_key)?;
let mut priv_key = Vec::new();
psm.get_kv_slice(fb_key!(index, ST_PRKEY), &mut priv_key)?;
let keypair = KeyPair::new_from_components(pub_key.as_slice(), priv_key.as_slice())?;
let mut vendor_id = 0;
psm.get_kv_u64(fb_key!(index, ST_VID), &mut vendor_id)?;
let f = Fabric::new(
keypair,
root_ca,
icac,
noc,
ipk.as_slice(),
vendor_id as u16,
);
f.map(|mut f| {
f.label = label;
f
})
}
}
pub const MAX_SUPPORTED_FABRICS: usize = 3;
#[derive(Default)]
pub struct FabricMgrInner {
// The outside world expects Fabric Index to be one more than the actual one
// since 0 is not allowed. Need to handle this cleanly somehow
pub fabrics: [Option<Fabric>; MAX_SUPPORTED_FABRICS],
}
pub struct FabricMgr {
inner: RwLock<FabricMgrInner>,
psm: Arc<Mutex<Psm>>,
}
impl FabricMgr {
pub fn new() -> Result<Self, Error> {
let dummy_fabric = Fabric::dummy()?;
let mut mgr = FabricMgrInner::default();
mgr.fabrics[0] = Some(dummy_fabric);
let mut fm = Self {
inner: RwLock::new(mgr),
psm: Psm::get()?,
};
fm.load()?;
Ok(fm)
}
fn store(&self, index: usize, fabric: &Fabric) -> Result<(), Error> {
let psm = self.psm.lock().unwrap();
fabric.store(index, &psm)
}
fn load(&mut self) -> Result<(), Error> {
let mut mgr = self.inner.write()?;
let psm = self.psm.lock().unwrap();
for i in 0..MAX_SUPPORTED_FABRICS {
let result = Fabric::load(i, &psm);
if let Ok(fabric) = result {
info!("Adding new fabric at index {}", i);
mgr.fabrics[i] = Some(fabric);
}
}
Ok(())
}
pub fn add(&self, f: Fabric) -> Result<u8, Error> {
let mut mgr = self.inner.write()?;
let index = mgr
.fabrics
.iter()
.position(|f| f.is_none())
.ok_or(Error::NoSpace)?;
self.store(index, &f)?;
mgr.fabrics[index] = Some(f);
Ok(index as u8)
}
pub fn remove(&self, fab_idx: u8) -> Result<(), Error> {
let fab_idx = fab_idx as usize;
let mut mgr = self.inner.write().unwrap();
let psm = self.psm.lock().unwrap();
if let Some(f) = &mgr.fabrics[fab_idx] {
f.rm_store(fab_idx, &psm);
mgr.fabrics[fab_idx] = None;
Ok(())
} else {
Err(Error::NotFound)
}
}
pub fn match_dest_id(&self, random: &[u8], target: &[u8]) -> Result<usize, Error> {
let mgr = self.inner.read()?;
for i in 0..MAX_SUPPORTED_FABRICS {
if let Some(fabric) = &mgr.fabrics[i] {
if fabric.match_dest_id(random, target).is_ok() {
return Ok(i);
}
}
}
Err(Error::NotFound)
}
pub fn get_fabric<'ret, 'me: 'ret>(
&'me self,
idx: usize,
) -> Result<RwLockReadGuardRef<'ret, FabricMgrInner, Option<Fabric>>, Error> {
Ok(RwLockReadGuardRef::new(self.inner.read()?).map(|fm| &fm.fabrics[idx]))
}
pub fn is_empty(&self) -> bool {
let mgr = self.inner.read().unwrap();
for i in 1..MAX_SUPPORTED_FABRICS {
if mgr.fabrics[i].is_some() {
return false;
}
}
true
}
pub fn used_count(&self) -> usize {
let mgr = self.inner.read().unwrap();
let mut count = 0;
for i in 1..MAX_SUPPORTED_FABRICS {
if mgr.fabrics[i].is_some() {
count += 1;
}
}
count
}
// Parameters to T are the Fabric and its Fabric Index
pub fn for_each<T>(&self, mut f: T) -> Result<(), Error>
where
T: FnMut(&Fabric, u8),
{
let mgr = self.inner.read().unwrap();
for i in 1..MAX_SUPPORTED_FABRICS {
if let Some(fabric) = &mgr.fabrics[i] {
f(fabric, i as u8)
}
}
Ok(())
}
pub fn set_label(&self, index: u8, label: String) -> Result<(), Error> {
let index = index as usize;
let mut mgr = self.inner.write()?;
if !label.is_empty() {
for i in 1..MAX_SUPPORTED_FABRICS {
if let Some(fabric) = &mgr.fabrics[i] {
if fabric.label == label {
return Err(Error::Invalid);
}
}
}
}
if let Some(fabric) = &mut mgr.fabrics[index] {
let old = fabric.label.clone();
fabric.label = label;
let psm = self.psm.lock().unwrap();
if fabric.store(index, &psm).is_err() {
fabric.label = old;
return Err(Error::StdIoError);
}
}
Ok(())
}
}

View file

@ -1,88 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use super::core::IMStatusCode;
use super::core::OpCode;
use super::messages::ib;
use super::messages::msg;
use super::messages::msg::InvReq;
use super::InteractionModel;
use super::Transaction;
use crate::{
error::*,
tlv::{get_root_node_struct, print_tlv_list, FromTLV, TLVElement, TLVWriter, TagType},
transport::{packet::Packet, proto_demux::ResponseRequired},
};
use log::error;
#[macro_export]
macro_rules! cmd_enter {
($e:expr) => {{
use colored::Colorize;
info! {"{} {}", "Handling Command".cyan(), $e.cyan()}
}};
}
pub struct CommandReq<'a, 'b, 'c, 'd, 'e> {
pub cmd: ib::CmdPath,
pub data: TLVElement<'a>,
pub resp: &'a mut TLVWriter<'b, 'c>,
pub trans: &'a mut Transaction<'d, 'e>,
}
impl InteractionModel {
pub fn handle_invoke_req(
&mut self,
trans: &mut Transaction,
rx_buf: &[u8],
proto_tx: &mut Packet,
) -> Result<ResponseRequired, Error> {
if InteractionModel::req_timeout_handled(trans, proto_tx)? {
return Ok(ResponseRequired::Yes);
}
proto_tx.set_proto_opcode(OpCode::InvokeResponse as u8);
let mut tw = TLVWriter::new(proto_tx.get_writebuf()?);
let root = get_root_node_struct(rx_buf)?;
let inv_req = InvReq::from_tlv(&root)?;
let timed_tx = trans.get_timeout().map(|_| true);
let timed_request = inv_req.timed_request.filter(|a| *a);
// Either both should be None, or both should be Some(true)
if timed_tx != timed_request {
InteractionModel::create_status_response(proto_tx, IMStatusCode::TimedRequestMisMatch)?;
return Ok(ResponseRequired::Yes);
}
tw.start_struct(TagType::Anonymous)?;
// Suppress Response -> TODO: Need to revisit this for cases where we send a command back
tw.bool(
TagType::Context(msg::InvRespTag::SupressResponse as u8),
false,
)?;
self.consumer
.consume_invoke_cmd(&inv_req, trans, &mut tw)
.map_err(|e| {
error!("Error in handling command: {:?}", e);
print_tlv_list(rx_buf);
e
})?;
tw.end_container()?;
Ok(ResponseRequired::Yes)
}
}

View file

@ -1,278 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::time::{Duration, SystemTime};
use crate::{
error::*,
interaction_model::messages::msg::StatusResp,
tlv::{self, get_root_node_struct, FromTLV, TLVElement, TLVWriter, TagType, ToTLV},
transport::{
exchange::Exchange,
packet::Packet,
proto_demux::{self, ProtoCtx, ResponseRequired},
session::SessionHandle,
},
};
use colored::Colorize;
use log::{error, info};
use num;
use num_derive::FromPrimitive;
use super::InteractionModel;
use super::Transaction;
use super::TransactionState;
use super::{messages::msg::TimedReq, InteractionConsumer};
/* Handle messages related to the Interation Model
*/
/* Interaction Model ID as per the Matter Spec */
const PROTO_ID_INTERACTION_MODEL: usize = 0x01;
#[derive(FromPrimitive, Debug, Copy, Clone, PartialEq)]
pub enum OpCode {
Reserved = 0,
StatusResponse = 1,
ReadRequest = 2,
SubscribeRequest = 3,
SubscriptResponse = 4,
ReportData = 5,
WriteRequest = 6,
WriteResponse = 7,
InvokeRequest = 8,
InvokeResponse = 9,
TimedRequest = 10,
}
impl<'a, 'b> Transaction<'a, 'b> {
pub fn new(session: &'a mut SessionHandle<'b>, exch: &'a mut Exchange) -> Self {
Self {
state: TransactionState::Ongoing,
session,
exch,
}
}
/// Terminates the transaction, no communication (even ACKs) happens hence forth
pub fn terminate(&mut self) {
self.state = TransactionState::Terminate
}
pub fn is_terminate(&self) -> bool {
self.state == TransactionState::Terminate
}
/// Marks the transaction as completed from the application's perspective
pub fn complete(&mut self) {
self.state = TransactionState::Complete
}
pub fn is_complete(&self) -> bool {
self.state == TransactionState::Complete
}
pub fn set_timeout(&mut self, timeout: u64) {
self.exch
.set_data_time(SystemTime::now().checked_add(Duration::from_millis(timeout)));
}
pub fn get_timeout(&mut self) -> Option<SystemTime> {
self.exch.get_data_time()
}
pub fn has_timed_out(&self) -> bool {
if let Some(timeout) = self.exch.get_data_time() {
if SystemTime::now() > timeout {
return true;
}
}
false
}
}
impl InteractionModel {
pub fn new(consumer: Box<dyn InteractionConsumer>) -> InteractionModel {
InteractionModel { consumer }
}
pub fn handle_subscribe_req(
&mut self,
trans: &mut Transaction,
rx_buf: &[u8],
proto_tx: &mut Packet,
) -> Result<ResponseRequired, Error> {
let mut tw = TLVWriter::new(proto_tx.get_writebuf()?);
let (opcode, resp) = self.consumer.consume_subscribe(rx_buf, trans, &mut tw)?;
proto_tx.set_proto_opcode(opcode as u8);
Ok(resp)
}
pub fn handle_status_resp(
&mut self,
trans: &mut Transaction,
rx_buf: &[u8],
proto_tx: &mut Packet,
) -> Result<ResponseRequired, Error> {
let mut tw = TLVWriter::new(proto_tx.get_writebuf()?);
let root = get_root_node_struct(rx_buf)?;
let req = StatusResp::from_tlv(&root)?;
let (opcode, resp) = self.consumer.consume_status_report(&req, trans, &mut tw)?;
proto_tx.set_proto_opcode(opcode as u8);
Ok(resp)
}
pub fn handle_timed_req(
&mut self,
trans: &mut Transaction,
rx_buf: &[u8],
proto_tx: &mut Packet,
) -> Result<ResponseRequired, Error> {
proto_tx.set_proto_opcode(OpCode::StatusResponse as u8);
let root = get_root_node_struct(rx_buf)?;
let req = TimedReq::from_tlv(&root)?;
trans.set_timeout(req.timeout.into());
let status = StatusResp {
status: IMStatusCode::Success,
};
let mut tw = TLVWriter::new(proto_tx.get_writebuf()?);
let _ = status.to_tlv(&mut tw, TagType::Anonymous);
Ok(ResponseRequired::Yes)
}
/// Handle Request Timeouts
/// This API checks if a request was a timed request, and if so, and if the timeout has
/// expired, it will generate the appropriate response as expected
pub(super) fn req_timeout_handled(
trans: &mut Transaction,
proto_tx: &mut Packet,
) -> Result<bool, Error> {
if trans.has_timed_out() {
trans.complete();
InteractionModel::create_status_response(proto_tx, IMStatusCode::Timeout)?;
Ok(true)
} else {
Ok(false)
}
}
pub(super) fn create_status_response(
proto_tx: &mut Packet,
status: IMStatusCode,
) -> Result<(), Error> {
proto_tx.set_proto_opcode(OpCode::StatusResponse as u8);
let mut tw = TLVWriter::new(proto_tx.get_writebuf()?);
let status = StatusResp { status };
status.to_tlv(&mut tw, TagType::Anonymous)
}
}
impl proto_demux::HandleProto for InteractionModel {
fn handle_proto_id(&mut self, ctx: &mut ProtoCtx) -> Result<ResponseRequired, Error> {
let mut trans = Transaction::new(&mut ctx.exch_ctx.sess, ctx.exch_ctx.exch);
let proto_opcode: OpCode =
num::FromPrimitive::from_u8(ctx.rx.get_proto_opcode()).ok_or(Error::Invalid)?;
ctx.tx.set_proto_id(PROTO_ID_INTERACTION_MODEL as u16);
let buf = ctx.rx.as_borrow_slice();
info!("{} {:?}", "Received command".cyan(), proto_opcode);
tlv::print_tlv_list(buf);
let result = match proto_opcode {
OpCode::InvokeRequest => self.handle_invoke_req(&mut trans, buf, &mut ctx.tx)?,
OpCode::ReadRequest => self.handle_read_req(&mut trans, buf, &mut ctx.tx)?,
OpCode::WriteRequest => self.handle_write_req(&mut trans, buf, &mut ctx.tx)?,
OpCode::TimedRequest => self.handle_timed_req(&mut trans, buf, &mut ctx.tx)?,
OpCode::SubscribeRequest => self.handle_subscribe_req(&mut trans, buf, &mut ctx.tx)?,
OpCode::StatusResponse => self.handle_status_resp(&mut trans, buf, &mut ctx.tx)?,
_ => {
error!("Opcode Not Handled: {:?}", proto_opcode);
return Err(Error::InvalidOpcode);
}
};
if result == ResponseRequired::Yes {
info!("Sending response");
tlv::print_tlv_list(ctx.tx.as_borrow_slice());
}
if trans.is_terminate() {
ctx.exch_ctx.exch.terminate();
} else if trans.is_complete() {
ctx.exch_ctx.exch.close();
}
Ok(result)
}
fn get_proto_id(&self) -> usize {
PROTO_ID_INTERACTION_MODEL
}
}
#[derive(FromPrimitive, Debug, Clone, Copy, PartialEq)]
pub enum IMStatusCode {
Success = 0,
Failure = 1,
InvalidSubscription = 0x7D,
UnsupportedAccess = 0x7E,
UnsupportedEndpoint = 0x7F,
InvalidAction = 0x80,
UnsupportedCommand = 0x81,
InvalidCommand = 0x85,
UnsupportedAttribute = 0x86,
ConstraintError = 0x87,
UnsupportedWrite = 0x88,
ResourceExhausted = 0x89,
NotFound = 0x8b,
UnreportableAttribute = 0x8c,
InvalidDataType = 0x8d,
UnsupportedRead = 0x8f,
DataVersionMismatch = 0x92,
Timeout = 0x94,
Busy = 0x9c,
UnsupportedCluster = 0xc3,
NoUpstreamSubscription = 0xc5,
NeedsTimedInteraction = 0xc6,
UnsupportedEvent = 0xc7,
PathsExhausted = 0xc8,
TimedRequestMisMatch = 0xc9,
FailSafeRequired = 0xca,
}
impl From<Error> for IMStatusCode {
fn from(e: Error) -> Self {
match e {
Error::EndpointNotFound => IMStatusCode::UnsupportedEndpoint,
Error::ClusterNotFound => IMStatusCode::UnsupportedCluster,
Error::AttributeNotFound => IMStatusCode::UnsupportedAttribute,
Error::CommandNotFound => IMStatusCode::UnsupportedCommand,
_ => IMStatusCode::Failure,
}
}
}
impl FromTLV<'_> for IMStatusCode {
fn from_tlv(t: &TLVElement) -> Result<Self, Error> {
num::FromPrimitive::from_u16(t.u16()?).ok_or(Error::Invalid)
}
}
impl ToTLV for IMStatusCode {
fn to_tlv(&self, tw: &mut TLVWriter, tag_type: TagType) -> Result<(), Error> {
tw.u16(tag_type, *self as u16)
}
}

View file

@ -1,87 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
error::Error,
tlv::TLVWriter,
transport::{exchange::Exchange, proto_demux::ResponseRequired, session::SessionHandle},
};
use self::{
core::OpCode,
messages::msg::{InvReq, StatusResp, WriteReq},
};
#[derive(PartialEq)]
pub enum TransactionState {
Ongoing,
Complete,
Terminate,
}
pub struct Transaction<'a, 'b> {
pub state: TransactionState,
pub session: &'a mut SessionHandle<'b>,
pub exch: &'a mut Exchange,
}
pub trait InteractionConsumer {
fn consume_invoke_cmd(
&self,
req: &InvReq,
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(), Error>;
fn consume_read_attr(
&self,
// TODO: This handling is different from the other APIs here, identify
// consistent options for this trait
req: &[u8],
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(), Error>;
fn consume_write_attr(
&self,
req: &WriteReq,
trans: &mut Transaction,
tw: &mut TLVWriter,
) -> Result<(), Error>;
fn consume_status_report(
&self,
_req: &StatusResp,
_trans: &mut Transaction,
_tw: &mut TLVWriter,
) -> Result<(OpCode, ResponseRequired), Error>;
fn consume_subscribe(
&self,
_req: &[u8],
_trans: &mut Transaction,
_tw: &mut TLVWriter,
) -> Result<(OpCode, ResponseRequired), Error>;
}
pub struct InteractionModel {
consumer: Box<dyn InteractionConsumer>,
}
pub mod command;
pub mod core;
pub mod messages;
pub mod read;
pub mod write;

View file

@ -1,42 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
error::Error,
interaction_model::core::OpCode,
tlv::TLVWriter,
transport::{packet::Packet, proto_demux::ResponseRequired},
};
use super::{InteractionModel, Transaction};
impl InteractionModel {
pub fn handle_read_req(
&mut self,
trans: &mut Transaction,
rx_buf: &[u8],
proto_tx: &mut Packet,
) -> Result<ResponseRequired, Error> {
proto_tx.set_proto_opcode(OpCode::ReportData as u8);
let proto_tx_wb = proto_tx.get_writebuf()?;
let mut tw = TLVWriter::new(proto_tx_wb);
self.consumer.consume_read_attr(rx_buf, trans, &mut tw)?;
Ok(ResponseRequired::Yes)
}
}

View file

@ -1,58 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use log::error;
use crate::{
error::Error,
tlv::{get_root_node_struct, FromTLV, TLVWriter, TagType},
transport::{packet::Packet, proto_demux::ResponseRequired},
};
use super::{core::OpCode, messages::msg::WriteReq, InteractionModel, Transaction};
impl InteractionModel {
pub fn handle_write_req(
&mut self,
trans: &mut Transaction,
rx_buf: &[u8],
proto_tx: &mut Packet,
) -> Result<ResponseRequired, Error> {
if InteractionModel::req_timeout_handled(trans, proto_tx)? {
return Ok(ResponseRequired::Yes);
}
proto_tx.set_proto_opcode(OpCode::WriteResponse as u8);
let mut tw = TLVWriter::new(proto_tx.get_writebuf()?);
let root = get_root_node_struct(rx_buf)?;
let write_req = WriteReq::from_tlv(&root)?;
let supress_response = write_req.supress_response.unwrap_or_default();
tw.start_struct(TagType::Anonymous)?;
self.consumer
.consume_write_attr(&write_req, trans, &mut tw)?;
tw.end_container()?;
trans.complete();
if supress_response {
error!("Supress response is set, is this the expected handling?");
Ok(ResponseRequired::No)
} else {
Ok(ResponseRequired::Yes)
}
}
}

View file

@ -1,132 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::{Arc, Mutex, Once};
use crate::{
error::Error,
sys::{sys_publish_service, SysMdnsService},
transport::udp::MATTER_PORT,
};
#[derive(Default)]
/// The mDNS service handler
pub struct MdnsInner {
/// Vendor ID
vid: u16,
/// Product ID
pid: u16,
/// Device name
device_name: String,
}
pub struct Mdns {
inner: Mutex<MdnsInner>,
}
const SHORT_DISCRIMINATOR_MASK: u16 = 0xF00;
const SHORT_DISCRIMINATOR_SHIFT: u16 = 8;
static mut G_MDNS: Option<Arc<Mdns>> = None;
static INIT: Once = Once::new();
pub enum ServiceMode {
/// The commissioned state
Commissioned,
/// The commissionable state with the discriminator that should be used
Commissionable(u16),
}
impl Mdns {
fn new() -> Self {
Self {
inner: Mutex::new(MdnsInner {
..Default::default()
}),
}
}
/// Get a handle to the globally unique mDNS instance
pub fn get() -> Result<Arc<Self>, Error> {
unsafe {
INIT.call_once(|| {
G_MDNS = Some(Arc::new(Mdns::new()));
});
Ok(G_MDNS.as_ref().ok_or(Error::Invalid)?.clone())
}
}
/// Set mDNS service specific values
/// Values like vid, pid, discriminator etc
// TODO: More things like device-type etc can be added here
pub fn set_values(&self, vid: u16, pid: u16, device_name: &str) {
let mut inner = self.inner.lock().unwrap();
inner.vid = vid;
inner.pid = pid;
inner.device_name = device_name.chars().take(32).collect();
}
/// Publish a mDNS service
/// name - is the service name (comma separated subtypes may follow)
/// mode - the current service mode
#[allow(clippy::needless_pass_by_value)]
pub fn publish_service(&self, name: &str, mode: ServiceMode) -> Result<SysMdnsService, Error> {
match mode {
ServiceMode::Commissioned => {
sys_publish_service(name, "_matter._tcp", MATTER_PORT, &[])
}
ServiceMode::Commissionable(discriminator) => {
let inner = self.inner.lock().unwrap();
let short = compute_short_discriminator(discriminator);
let serv_type = format!("_matterc._udp,_S{},_L{}", short, discriminator);
let str_discriminator = format!("{}", discriminator);
let txt_kvs = [
["D", &str_discriminator],
["CM", "1"],
["DN", &inner.device_name],
["VP", &format!("{}+{}", inner.vid, inner.pid)],
["SII", "5000"], /* Sleepy Idle Interval */
["SAI", "300"], /* Sleepy Active Interval */
["PH", "33"], /* Pairing Hint */
["PI", ""], /* Pairing Instruction */
];
sys_publish_service(name, &serv_type, MATTER_PORT, &txt_kvs)
}
}
}
}
fn compute_short_discriminator(discriminator: u16) -> u16 {
(discriminator & SHORT_DISCRIMINATOR_MASK) >> SHORT_DISCRIMINATOR_SHIFT
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn can_compute_short_discriminator() {
let discriminator: u16 = 0b0000_1111_0000_0000;
let short = compute_short_discriminator(discriminator);
assert_eq!(short, 0b1111);
let discriminator: u16 = 840;
let short = compute_short_discriminator(discriminator);
assert_eq!(short, 3);
}
}

View file

@ -1,79 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::Arc;
use crate::{
error::*,
fabric::FabricMgr,
secure_channel::common::*,
tlv,
transport::proto_demux::{self, ProtoCtx, ResponseRequired},
};
use log::{error, info};
use num;
use super::{case::Case, pake::PaseMgr};
/* Handle messages related to the Secure Channel
*/
pub struct SecureChannel {
case: Case,
pase: PaseMgr,
}
impl SecureChannel {
pub fn new(pase: PaseMgr, fabric_mgr: Arc<FabricMgr>) -> SecureChannel {
SecureChannel {
pase,
case: Case::new(fabric_mgr),
}
}
}
impl proto_demux::HandleProto for SecureChannel {
fn handle_proto_id(&mut self, ctx: &mut ProtoCtx) -> Result<ResponseRequired, Error> {
let proto_opcode: OpCode =
num::FromPrimitive::from_u8(ctx.rx.get_proto_opcode()).ok_or(Error::Invalid)?;
ctx.tx.set_proto_id(PROTO_ID_SECURE_CHANNEL as u16);
info!("Received Opcode: {:?}", proto_opcode);
info!("Received Data:");
tlv::print_tlv_list(ctx.rx.as_borrow_slice());
let result = match proto_opcode {
OpCode::MRPStandAloneAck => Ok(ResponseRequired::No),
OpCode::PBKDFParamRequest => self.pase.pbkdfparamreq_handler(ctx),
OpCode::PASEPake1 => self.pase.pasepake1_handler(ctx),
OpCode::PASEPake3 => self.pase.pasepake3_handler(ctx),
OpCode::CASESigma1 => self.case.casesigma1_handler(ctx),
OpCode::CASESigma3 => self.case.casesigma3_handler(ctx),
_ => {
error!("OpCode Not Handled: {:?}", proto_opcode);
Err(Error::InvalidOpcode)
}
};
if result == Ok(ResponseRequired::Yes) {
info!("Sending response");
tlv::print_tlv_list(ctx.tx.as_borrow_slice());
}
result
}
fn get_proto_id(&self) -> usize {
PROTO_ID_SECURE_CHANNEL
}
}

View file

@ -1,54 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::Error;
// This trait allows us to switch between crypto providers like OpenSSL and mbedTLS for Spake2
// Currently this is only validate for a verifier(responder)
// A verifier will typically do:
// Step 1: w0 and L
// set_w0_from_w0s
// set_L
// Step 2: get_pB
// Step 3: get_TT_as_verifier(pA)
// Step 4: Computation of cA and cB happens outside since it doesn't use either BigNum or EcPoint
pub trait CryptoSpake2 {
fn new() -> Result<Self, Error>
where
Self: Sized;
fn set_w0_from_w0s(&mut self, w0s: &[u8]) -> Result<(), Error>;
fn set_w1_from_w1s(&mut self, w1s: &[u8]) -> Result<(), Error>;
fn set_w0(&mut self, w0: &[u8]) -> Result<(), Error>;
fn set_w1(&mut self, w1: &[u8]) -> Result<(), Error>;
#[allow(non_snake_case)]
fn set_L(&mut self, l: &[u8]) -> Result<(), Error>;
#[allow(non_snake_case)]
fn set_L_from_w1s(&mut self, w1s: &[u8]) -> Result<(), Error>;
#[allow(non_snake_case)]
fn get_pB(&mut self, pB: &mut [u8]) -> Result<(), Error>;
#[allow(non_snake_case)]
fn get_TT_as_verifier(
&mut self,
context: &[u8],
pA: &[u8],
pB: &[u8],
out: &mut [u8],
) -> Result<(), Error>;
}

View file

@ -1,365 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::{
sync::{Arc, Mutex},
time::{Duration, SystemTime},
};
use super::{
common::{create_sc_status_report, SCStatusCodes},
spake2p::{Spake2P, VerifierData},
};
use crate::{
crypto,
error::Error,
mdns::{self, Mdns},
secure_channel::common::OpCode,
sys::SysMdnsService,
tlv::{self, get_root_node_struct, FromTLV, OctetStr, TLVElement, TLVWriter, TagType, ToTLV},
transport::{
exchange::ExchangeCtx,
network::Address,
proto_demux::{ProtoCtx, ResponseRequired},
queue::{Msg, WorkQ},
session::{CloneData, SessionMode},
},
};
use log::{error, info};
use rand::prelude::*;
enum PaseMgrState {
Enabled(PAKE, SysMdnsService),
Disabled,
}
pub struct PaseMgrInternal {
state: PaseMgrState,
}
#[derive(Clone)]
// Could this lock be avoided?
pub struct PaseMgr(Arc<Mutex<PaseMgrInternal>>);
impl PaseMgr {
pub fn new() -> Self {
Self(Arc::new(Mutex::new(PaseMgrInternal {
state: PaseMgrState::Disabled,
})))
}
pub fn enable_pase_session(
&mut self,
verifier: VerifierData,
discriminator: u16,
) -> Result<(), Error> {
let mut s = self.0.lock().unwrap();
let name: u64 = rand::thread_rng().gen_range(0..0xFFFFFFFFFFFFFFFF);
let name = format!("{:016X}", name);
let mdns = Mdns::get()?
.publish_service(&name, mdns::ServiceMode::Commissionable(discriminator))?;
s.state = PaseMgrState::Enabled(PAKE::new(verifier), mdns);
Ok(())
}
pub fn disable_pase_session(&mut self) {
let mut s = self.0.lock().unwrap();
s.state = PaseMgrState::Disabled;
}
/// If the PASE Session is enabled, execute the closure,
/// if not enabled, generate SC Status Report
fn if_enabled<F>(&mut self, ctx: &mut ProtoCtx, f: F) -> Result<(), Error>
where
F: FnOnce(&mut PAKE, &mut ProtoCtx) -> Result<(), Error>,
{
let mut s = self.0.lock().unwrap();
if let PaseMgrState::Enabled(pake, _) = &mut s.state {
f(pake, ctx)
} else {
error!("PASE Not enabled");
create_sc_status_report(&mut ctx.tx, SCStatusCodes::InvalidParameter, None)
}
}
pub fn pbkdfparamreq_handler(&mut self, ctx: &mut ProtoCtx) -> Result<ResponseRequired, Error> {
ctx.tx.set_proto_opcode(OpCode::PBKDFParamResponse as u8);
self.if_enabled(ctx, |pake, ctx| pake.handle_pbkdfparamrequest(ctx))?;
Ok(ResponseRequired::Yes)
}
pub fn pasepake1_handler(&mut self, ctx: &mut ProtoCtx) -> Result<ResponseRequired, Error> {
ctx.tx.set_proto_opcode(OpCode::PASEPake2 as u8);
self.if_enabled(ctx, |pake, ctx| pake.handle_pasepake1(ctx))?;
Ok(ResponseRequired::Yes)
}
pub fn pasepake3_handler(&mut self, ctx: &mut ProtoCtx) -> Result<ResponseRequired, Error> {
self.if_enabled(ctx, |pake, ctx| pake.handle_pasepake3(ctx))?;
self.disable_pase_session();
Ok(ResponseRequired::Yes)
}
}
impl Default for PaseMgr {
fn default() -> Self {
Self::new()
}
}
// This file basically deals with the handlers for the PASE secure channel protocol
// TLV extraction and encoding is done in this file.
// We create a Spake2p object and set it up in the exchange-data. This object then
// handles Spake2+ specific stuff.
const PASE_DISCARD_TIMEOUT_SECS: Duration = Duration::from_secs(60);
const SPAKE2_SESSION_KEYS_INFO: [u8; 11] = *b"SessionKeys";
struct SessionData {
start_time: SystemTime,
exch_id: u16,
peer_addr: Address,
spake2p: Box<Spake2P>,
}
impl SessionData {
fn is_sess_expired(&self) -> Result<bool, Error> {
if SystemTime::now().duration_since(self.start_time)? > PASE_DISCARD_TIMEOUT_SECS {
Ok(true)
} else {
Ok(false)
}
}
}
enum PakeState {
Idle,
InProgress(SessionData),
}
impl PakeState {
fn take(&mut self) -> Result<SessionData, Error> {
let new = std::mem::replace(self, PakeState::Idle);
if let PakeState::InProgress(s) = new {
Ok(s)
} else {
Err(Error::InvalidSignature)
}
}
fn is_idle(&self) -> bool {
std::mem::discriminant(self) == std::mem::discriminant(&PakeState::Idle)
}
fn take_sess_data(&mut self, exch_ctx: &ExchangeCtx) -> Result<SessionData, Error> {
let sd = self.take()?;
if sd.exch_id != exch_ctx.exch.get_id() || sd.peer_addr != exch_ctx.sess.get_peer_addr() {
Err(Error::InvalidState)
} else {
Ok(sd)
}
}
fn make_in_progress(&mut self, spake2p: Box<Spake2P>, exch_ctx: &ExchangeCtx) {
*self = PakeState::InProgress(SessionData {
start_time: SystemTime::now(),
spake2p,
exch_id: exch_ctx.exch.get_id(),
peer_addr: exch_ctx.sess.get_peer_addr(),
});
}
fn set_sess_data(&mut self, sd: SessionData) {
*self = PakeState::InProgress(sd);
}
}
impl Default for PakeState {
fn default() -> Self {
Self::Idle
}
}
pub struct PAKE {
pub verifier: VerifierData,
state: PakeState,
}
impl PAKE {
pub fn new(verifier: VerifierData) -> Self {
// TODO: Can any PBKDF2 calculation be pre-computed here
PAKE {
verifier,
state: Default::default(),
}
}
#[allow(non_snake_case)]
pub fn handle_pasepake3(&mut self, ctx: &mut ProtoCtx) -> Result<(), Error> {
let mut sd = self.state.take_sess_data(&ctx.exch_ctx)?;
let cA = extract_pasepake_1_or_3_params(ctx.rx.as_borrow_slice())?;
let (status_code, Ke) = sd.spake2p.handle_cA(cA);
if status_code == SCStatusCodes::SessionEstablishmentSuccess {
// Get the keys
let Ke = Ke.ok_or(Error::Invalid)?;
let mut session_keys: [u8; 48] = [0; 48];
crypto::hkdf_sha256(&[], Ke, &SPAKE2_SESSION_KEYS_INFO, &mut session_keys)
.map_err(|_x| Error::NoSpace)?;
// Create a session
let data = sd.spake2p.get_app_data();
let peer_sessid: u16 = (data & 0xffff) as u16;
let local_sessid: u16 = ((data >> 16) & 0xffff) as u16;
let mut clone_data = CloneData::new(
0,
0,
peer_sessid,
local_sessid,
ctx.exch_ctx.sess.get_peer_addr(),
SessionMode::Pase,
);
clone_data.dec_key.copy_from_slice(&session_keys[0..16]);
clone_data.enc_key.copy_from_slice(&session_keys[16..32]);
clone_data
.att_challenge
.copy_from_slice(&session_keys[32..48]);
// Queue a transport mgr request to add a new session
WorkQ::get()?.sync_send(Msg::NewSession(clone_data))?;
}
create_sc_status_report(&mut ctx.tx, status_code, None)?;
ctx.exch_ctx.exch.close();
Ok(())
}
#[allow(non_snake_case)]
pub fn handle_pasepake1(&mut self, ctx: &mut ProtoCtx) -> Result<(), Error> {
let mut sd = self.state.take_sess_data(&ctx.exch_ctx)?;
let pA = extract_pasepake_1_or_3_params(ctx.rx.as_borrow_slice())?;
let mut pB: [u8; 65] = [0; 65];
let mut cB: [u8; 32] = [0; 32];
sd.spake2p.start_verifier(&self.verifier)?;
sd.spake2p.handle_pA(pA, &mut pB, &mut cB)?;
let mut tw = TLVWriter::new(ctx.tx.get_writebuf()?);
let resp = Pake1Resp {
pb: OctetStr(&pB),
cb: OctetStr(&cB),
};
resp.to_tlv(&mut tw, TagType::Anonymous)?;
self.state.set_sess_data(sd);
Ok(())
}
pub fn handle_pbkdfparamrequest(&mut self, ctx: &mut ProtoCtx) -> Result<(), Error> {
if !self.state.is_idle() {
let sd = self.state.take()?;
if sd.is_sess_expired()? {
info!("Previous session expired, clearing it");
self.state = PakeState::Idle;
} else {
info!("Previous session in-progress, denying new request");
// little-endian timeout (here we've hardcoded 500ms)
create_sc_status_report(&mut ctx.tx, SCStatusCodes::Busy, Some(&[0xf4, 0x01]))?;
return Ok(());
}
}
let root = tlv::get_root_node(ctx.rx.as_borrow_slice())?;
let a = PBKDFParamReq::from_tlv(&root)?;
if a.passcode_id != 0 {
error!("Can't yet handle passcode_id != 0");
return Err(Error::Invalid);
}
let mut our_random: [u8; 32] = [0; 32];
rand::thread_rng().fill_bytes(&mut our_random);
let local_sessid = ctx.exch_ctx.sess.reserve_new_sess_id();
let spake2p_data: u32 = ((local_sessid as u32) << 16) | a.initiator_ssid as u32;
let mut spake2p = Box::new(Spake2P::new());
spake2p.set_app_data(spake2p_data);
// Generate response
let mut tw = TLVWriter::new(ctx.tx.get_writebuf()?);
let mut resp = PBKDFParamResp {
init_random: a.initiator_random,
our_random: OctetStr(&our_random),
local_sessid,
params: None,
};
if !a.has_params {
let params_resp = PBKDFParamRespParams {
count: self.verifier.count,
salt: OctetStr(&self.verifier.salt),
};
resp.params = Some(params_resp);
}
resp.to_tlv(&mut tw, TagType::Anonymous)?;
spake2p.set_context(ctx.rx.as_borrow_slice(), ctx.tx.as_borrow_slice())?;
self.state.make_in_progress(spake2p, &ctx.exch_ctx);
Ok(())
}
}
#[derive(ToTLV)]
#[tlvargs(start = 1)]
struct Pake1Resp<'a> {
pb: OctetStr<'a>,
cb: OctetStr<'a>,
}
#[derive(ToTLV)]
#[tlvargs(start = 1)]
struct PBKDFParamRespParams<'a> {
count: u32,
salt: OctetStr<'a>,
}
#[derive(ToTLV)]
#[tlvargs(start = 1)]
struct PBKDFParamResp<'a> {
init_random: OctetStr<'a>,
our_random: OctetStr<'a>,
local_sessid: u16,
params: Option<PBKDFParamRespParams<'a>>,
}
#[allow(non_snake_case)]
fn extract_pasepake_1_or_3_params(buf: &[u8]) -> Result<&[u8], Error> {
let root = get_root_node_struct(buf)?;
let pA = root.find_tag(1)?.slice()?;
Ok(pA)
}
#[derive(FromTLV)]
#[tlvargs(lifetime = "'a", start = 1)]
struct PBKDFParamReq<'a> {
initiator_random: OctetStr<'a>,
initiator_ssid: u16,
passcode_id: u16,
has_params: bool,
}

View file

@ -1,31 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#[cfg(target_os = "macos")]
mod sys_macos;
#[cfg(target_os = "macos")]
pub use self::sys_macos::*;
#[cfg(target_os = "linux")]
mod sys_linux;
#[cfg(target_os = "linux")]
pub use self::sys_linux::*;
#[cfg(any(target_os = "macos", target_os = "linux"))]
mod posix;
#[cfg(any(target_os = "macos", target_os = "linux"))]
pub use self::posix::*;

View file

@ -1,96 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::{
convert::TryInto,
fs::{remove_file, DirBuilder, File},
io::{Read, Write},
sync::{Arc, Mutex, Once},
};
use crate::error::Error;
pub const SPAKE2_ITERATION_COUNT: u32 = 2000;
// The Packet Pool that is allocated from. POSIX systems can use
// higher values unlike embedded systems
pub const MAX_PACKET_POOL_SIZE: usize = 25;
pub struct Psm {}
static mut G_PSM: Option<Arc<Mutex<Psm>>> = None;
static INIT: Once = Once::new();
const PSM_DIR: &str = "/tmp/matter_psm";
macro_rules! psm_path {
($key:ident) => {
format!("{}/{}", PSM_DIR, $key)
};
}
impl Psm {
fn new() -> Result<Self, Error> {
let result = DirBuilder::new().create(PSM_DIR);
if let Err(e) = result {
if e.kind() != std::io::ErrorKind::AlreadyExists {
return Err(e.into());
}
}
Ok(Self {})
}
pub fn get() -> Result<Arc<Mutex<Self>>, Error> {
unsafe {
INIT.call_once(|| {
G_PSM = Some(Arc::new(Mutex::new(Psm::new().unwrap())));
});
Ok(G_PSM.as_ref().ok_or(Error::Invalid)?.clone())
}
}
pub fn set_kv_slice(&self, key: &str, val: &[u8]) -> Result<(), Error> {
let mut f = File::create(psm_path!(key))?;
f.write_all(val)?;
Ok(())
}
pub fn get_kv_slice(&self, key: &str, val: &mut Vec<u8>) -> Result<usize, Error> {
let mut f = File::open(psm_path!(key))?;
let len = f.read_to_end(val)?;
Ok(len)
}
pub fn set_kv_u64(&self, key: &str, val: u64) -> Result<(), Error> {
let mut f = File::create(psm_path!(key))?;
f.write_all(&val.to_be_bytes())?;
Ok(())
}
pub fn get_kv_u64(&self, key: &str, val: &mut u64) -> Result<(), Error> {
let mut f = File::open(psm_path!(key))?;
let mut vec = Vec::new();
let _ = f.read_to_end(&mut vec)?;
*val = u64::from_be_bytes(vec.as_slice().try_into()?);
Ok(())
}
pub fn rm(&self, key: &str) {
let _ = remove_file(psm_path!(key));
}
}

View file

@ -1,58 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::Error;
use lazy_static::lazy_static;
use libmdns::{Responder, Service};
use log::info;
use std::sync::{Arc, Mutex};
use std::vec::Vec;
#[allow(dead_code)]
pub struct SysMdnsService {
service: Service,
}
lazy_static! {
static ref RESPONDER: Arc<Mutex<Responder>> = Arc::new(Mutex::new(Responder::new().unwrap()));
}
/// Publish a mDNS service
/// name - can be a service name (comma separate subtypes may follow)
/// regtype - registration type (e.g. _matter_.tcp etc)
/// port - the port
pub fn sys_publish_service(
name: &str,
regtype: &str,
port: u16,
txt_kvs: &[[&str; 2]],
) -> Result<SysMdnsService, Error> {
info!("mDNS Registration Type {}", regtype);
info!("mDNS properties {:?}", txt_kvs);
let mut properties = Vec::new();
for kvs in txt_kvs {
info!("mDNS TXT key {} val {}", kvs[0], kvs[1]);
properties.push(format!("{}={}", kvs[0], kvs[1]));
}
let properties: Vec<&str> = properties.iter().map(|entry| entry.as_str()).collect();
let responder = RESPONDER.lock().map_err(|_| Error::MdnsError)?;
let service = responder.register(regtype.to_owned(), name.to_owned(), port, &properties);
Ok(SysMdnsService { service })
}

View file

@ -1,46 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::Error;
use astro_dnssd::{DNSServiceBuilder, RegisteredDnsService};
use log::info;
#[allow(dead_code)]
pub struct SysMdnsService {
s: RegisteredDnsService,
}
/// Publish a mDNS service
/// name - can be a service name (comma separate subtypes may follow)
/// regtype - registration type (e.g. _matter_.tcp etc)
/// port - the port
pub fn sys_publish_service(
name: &str,
regtype: &str,
port: u16,
txt_kvs: &[[&str; 2]],
) -> Result<SysMdnsService, Error> {
let mut builder = DNSServiceBuilder::new(regtype, port).with_name(name);
info!("mDNS Registration Type {}", regtype);
for kvs in txt_kvs {
info!("mDNS TXT key {} val {}", kvs[0], kvs[1]);
builder = builder.with_key_value(kvs[0].to_string(), kvs[1].to_string());
}
let s = builder.register().map_err(|_| Error::MdnsError)?;
Ok(SysMdnsService { s })
}

View file

@ -1,604 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use boxslab::{BoxSlab, Slab};
use colored::*;
use log::{error, info, trace};
use std::any::Any;
use std::fmt;
use std::time::SystemTime;
use crate::error::Error;
use crate::secure_channel;
use heapless::LinearMap;
use super::packet::PacketPool;
use super::session::CloneData;
use super::{mrp::ReliableMessage, packet::Packet, session::SessionHandle, session::SessionMgr};
pub struct ExchangeCtx<'a> {
pub exch: &'a mut Exchange,
pub sess: SessionHandle<'a>,
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum Role {
Initiator = 0,
Responder = 1,
}
impl Default for Role {
fn default() -> Self {
Role::Initiator
}
}
/// State of the exchange
#[derive(Debug, PartialEq)]
enum State {
/// The exchange is open and active
Open,
/// The exchange is closed, but keys are active since retransmissions/acks may be pending
Close,
/// The exchange is terminated, keys are destroyed, no communication can happen
Terminate,
}
impl Default for State {
fn default() -> Self {
State::Open
}
}
// Instead of just doing an Option<>, we create some special handling
// where the commonly used higher layer data store does't have to do a Box
#[derive(Debug)]
pub enum DataOption {
Boxed(Box<dyn Any>),
Time(SystemTime),
None,
}
impl Default for DataOption {
fn default() -> Self {
DataOption::None
}
}
#[derive(Debug, Default)]
pub struct Exchange {
id: u16,
sess_idx: usize,
role: Role,
state: State,
mrp: ReliableMessage,
// Currently I see this primarily used in PASE and CASE. If that is the limited use
// of this, we might move this into a separate data structure, so as not to burden
// all 'exchanges'.
data: DataOption,
}
impl Exchange {
pub fn new(id: u16, sess_idx: usize, role: Role) -> Exchange {
Exchange {
id,
sess_idx,
role,
state: State::Open,
mrp: ReliableMessage::new(),
..Default::default()
}
}
pub fn terminate(&mut self) {
self.data = DataOption::None;
self.state = State::Terminate;
}
pub fn close(&mut self) {
self.data = DataOption::None;
self.state = State::Close;
}
pub fn is_state_open(&self) -> bool {
self.state == State::Open
}
pub fn is_purgeable(&self) -> bool {
// No Users, No pending ACKs/Retrans
self.state == State::Terminate || (self.state == State::Close && self.mrp.is_empty())
}
pub fn get_id(&self) -> u16 {
self.id
}
pub fn get_role(&self) -> Role {
self.role
}
pub fn is_data_none(&self) -> bool {
matches!(self.data, DataOption::None)
}
pub fn set_data_boxed(&mut self, data: Box<dyn Any>) {
self.data = DataOption::Boxed(data);
}
pub fn clear_data_boxed(&mut self) {
self.data = DataOption::None;
}
pub fn get_data_boxed<T: Any>(&mut self) -> Option<&mut T> {
if let DataOption::Boxed(a) = &mut self.data {
a.downcast_mut::<T>()
} else {
None
}
}
pub fn take_data_boxed<T: Any>(&mut self) -> Option<Box<T>> {
let old = std::mem::replace(&mut self.data, DataOption::None);
if let DataOption::Boxed(d) = old {
d.downcast::<T>().ok()
} else {
self.data = old;
None
}
}
pub fn set_data_time(&mut self, expiry_ts: Option<SystemTime>) {
if let Some(t) = expiry_ts {
self.data = DataOption::Time(t);
}
}
pub fn get_data_time(&self) -> Option<SystemTime> {
match self.data {
DataOption::Time(t) => Some(t),
_ => None,
}
}
pub fn send(
&mut self,
mut proto_tx: BoxSlab<PacketPool>,
session: &mut SessionHandle,
) -> Result<(), Error> {
if self.state == State::Terminate {
info!("Skipping tx for terminated exchange {}", self.id);
return Ok(());
}
trace!("payload: {:x?}", proto_tx.as_borrow_slice());
info!(
"{} with proto id: {} opcode: {}",
"Sending".blue(),
proto_tx.get_proto_id(),
proto_tx.get_proto_opcode(),
);
proto_tx.proto.exch_id = self.id;
if self.role == Role::Initiator {
proto_tx.proto.set_initiator();
}
session.pre_send(&mut proto_tx)?;
self.mrp.pre_send(&mut proto_tx)?;
session.send(proto_tx)
}
}
impl fmt::Display for Exchange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"exch_id: {:?}, sess_index: {}, role: {:?}, data: {:?}, mrp: {:?}, state: {:?}",
self.id, self.sess_idx, self.role, self.data, self.mrp, self.state
)
}
}
pub fn get_role(is_initiator: bool) -> Role {
if is_initiator {
Role::Initiator
} else {
Role::Responder
}
}
pub fn get_complementary_role(is_initiator: bool) -> Role {
if is_initiator {
Role::Responder
} else {
Role::Initiator
}
}
const MAX_EXCHANGES: usize = 8;
#[derive(Default)]
pub struct ExchangeMgr {
// keys: exch-id
exchanges: LinearMap<u16, Exchange, MAX_EXCHANGES>,
sess_mgr: SessionMgr,
}
pub const MAX_MRP_ENTRIES: usize = 4;
impl ExchangeMgr {
pub fn new(sess_mgr: SessionMgr) -> Self {
Self {
sess_mgr,
exchanges: Default::default(),
}
}
pub fn get_sess_mgr(&mut self) -> &mut SessionMgr {
&mut self.sess_mgr
}
pub fn _get_with_id(
exchanges: &mut LinearMap<u16, Exchange, MAX_EXCHANGES>,
exch_id: u16,
) -> Option<&mut Exchange> {
exchanges.get_mut(&exch_id)
}
pub fn get_with_id(&mut self, exch_id: u16) -> Option<&mut Exchange> {
ExchangeMgr::_get_with_id(&mut self.exchanges, exch_id)
}
fn _get(
exchanges: &mut LinearMap<u16, Exchange, MAX_EXCHANGES>,
sess_idx: usize,
id: u16,
role: Role,
create_new: bool,
) -> Result<&mut Exchange, Error> {
// I don't prefer that we scan the list twice here (once for contains_key and other)
if !exchanges.contains_key(&(id)) {
if create_new {
// If an exchange doesn't exist, create a new one
info!("Creating new exchange");
let e = Exchange::new(id, sess_idx, role);
if exchanges.insert(id, e).is_err() {
return Err(Error::NoSpace);
}
} else {
return Err(Error::NoSpace);
}
}
// At this point, we would either have inserted the record if 'create_new' was set
// or it existed already
if let Some(result) = exchanges.get_mut(&id) {
if result.get_role() == role && sess_idx == result.sess_idx {
Ok(result)
} else {
Err(Error::NoExchange)
}
} else {
error!("This should never happen");
Err(Error::NoSpace)
}
}
/// The Exchange Mgr receive is like a big processing function
pub fn recv(&mut self) -> Result<Option<(BoxSlab<PacketPool>, ExchangeCtx)>, Error> {
// Get the session
let (mut proto_rx, index) = self.sess_mgr.recv()?;
let index = if let Some(s) = index {
s
} else {
// The sessions were full, evict one session, and re-perform post-recv
let evict_index = self.sess_mgr.get_lru();
self.evict_session(evict_index)?;
info!("Reattempting session creation");
self.sess_mgr.post_recv(&proto_rx)?.ok_or(Error::Invalid)?
};
let mut session = self.sess_mgr.get_session_handle(index);
// Decrypt the message
session.recv(&mut proto_rx)?;
// Get the exchange
let exch = ExchangeMgr::_get(
&mut self.exchanges,
index,
proto_rx.proto.exch_id,
get_complementary_role(proto_rx.proto.is_initiator()),
// We create a new exchange, only if the peer is the initiator
proto_rx.proto.is_initiator(),
)?;
// Message Reliability Protocol
exch.mrp.recv(&proto_rx)?;
if exch.is_state_open() {
Ok(Some((
proto_rx,
ExchangeCtx {
exch,
sess: session,
},
)))
} else {
// Instead of an error, we send None here, because it is likely that
// we just processed an acknowledgement that cleared the exchange
Ok(None)
}
}
pub fn send(&mut self, exch_id: u16, proto_tx: BoxSlab<PacketPool>) -> Result<(), Error> {
let exchange =
ExchangeMgr::_get_with_id(&mut self.exchanges, exch_id).ok_or(Error::NoExchange)?;
let mut session = self.sess_mgr.get_session_handle(exchange.sess_idx);
exchange.send(proto_tx, &mut session)
}
pub fn purge(&mut self) {
let mut to_purge: LinearMap<u16, (), MAX_EXCHANGES> = LinearMap::new();
for (exch_id, exchange) in self.exchanges.iter() {
if exchange.is_purgeable() {
let _ = to_purge.insert(*exch_id, ());
}
}
for (exch_id, _) in to_purge.iter() {
self.exchanges.remove(exch_id);
}
}
pub fn pending_acks(&mut self, expired_entries: &mut LinearMap<u16, (), MAX_MRP_ENTRIES>) {
for (exch_id, exchange) in self.exchanges.iter() {
if exchange.mrp.is_ack_ready() {
expired_entries.insert(*exch_id, ()).unwrap();
}
}
}
pub fn evict_session(&mut self, index: usize) -> Result<(), Error> {
info!("Sessions full, vacating session with index: {}", index);
// If we enter here, we have an LRU session that needs to be reclaimed
// As per the spec, we need to send a CLOSE here
let mut session = self.sess_mgr.get_session_handle(index);
let mut tx = Slab::<PacketPool>::try_new(Packet::new_tx()?).ok_or(Error::NoSpace)?;
secure_channel::common::create_sc_status_report(
&mut tx,
secure_channel::common::SCStatusCodes::CloseSession,
None,
)?;
if let Some((_, exchange)) = self.exchanges.iter_mut().find(|(_, e)| e.sess_idx == index) {
// Send Close_session on this exchange, and then close the session
// Should this be done for all exchanges?
error!("Sending Close Session");
exchange.send(tx, &mut session)?;
// TODO: This wouldn't actually send it out, because 'transport' isn't owned yet.
}
let remove_exchanges: Vec<u16> = self
.exchanges
.iter()
.filter_map(|(eid, e)| {
if e.sess_idx == index {
Some(*eid)
} else {
None
}
})
.collect();
info!(
"Terminating the following exchanges: {:?}",
remove_exchanges
);
for exch_id in remove_exchanges {
// Remove from exchange list
self.exchanges.remove(&exch_id);
}
self.sess_mgr.remove(index);
Ok(())
}
pub fn add_session(&mut self, clone_data: &CloneData) -> Result<SessionHandle, Error> {
let sess_idx = match self.sess_mgr.clone_session(clone_data) {
Ok(idx) => idx,
Err(Error::NoSpace) => {
let evict_index = self.sess_mgr.get_lru();
self.evict_session(evict_index)?;
self.sess_mgr.clone_session(clone_data)?
}
Err(e) => {
return Err(e);
}
};
Ok(self.sess_mgr.get_session_handle(sess_idx))
}
}
impl fmt::Display for ExchangeMgr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "{{ Session Mgr: {},", self.sess_mgr)?;
writeln!(f, " Exchanges: [")?;
for s in &self.exchanges {
writeln!(f, "{{ {}, }},", s.1)?;
}
writeln!(f, " ]")?;
write!(f, "}}")
}
}
#[cfg(test)]
#[allow(clippy::bool_assert_comparison)]
mod tests {
use crate::{
error::Error,
transport::{
network::{Address, NetworkInterface},
session::{CloneData, SessionMgr, SessionMode, MAX_SESSIONS},
},
};
use super::{ExchangeMgr, Role};
#[test]
fn test_purge() {
let sess_mgr = SessionMgr::new();
let mut mgr = ExchangeMgr::new(sess_mgr);
let _ = ExchangeMgr::_get(&mut mgr.exchanges, 1, 2, Role::Responder, true).unwrap();
let _ = ExchangeMgr::_get(&mut mgr.exchanges, 1, 3, Role::Responder, true).unwrap();
mgr.purge();
assert_eq!(
ExchangeMgr::_get(&mut mgr.exchanges, 1, 2, Role::Responder, false).is_ok(),
true
);
assert_eq!(
ExchangeMgr::_get(&mut mgr.exchanges, 1, 3, Role::Responder, false).is_ok(),
true
);
// Close e1
let e1 = ExchangeMgr::_get(&mut mgr.exchanges, 1, 2, Role::Responder, false).unwrap();
e1.close();
mgr.purge();
assert_eq!(
ExchangeMgr::_get(&mut mgr.exchanges, 1, 2, Role::Responder, false).is_ok(),
false
);
assert_eq!(
ExchangeMgr::_get(&mut mgr.exchanges, 1, 3, Role::Responder, false).is_ok(),
true
);
}
fn get_clone_data(peer_sess_id: u16, local_sess_id: u16) -> CloneData {
CloneData::new(
12341234,
43211234,
peer_sess_id,
local_sess_id,
Address::default(),
SessionMode::Pase,
)
}
fn fill_sessions(mgr: &mut ExchangeMgr, count: usize) {
let mut local_sess_id = 1;
let mut peer_sess_id = 100;
for _ in 1..count {
let clone_data = get_clone_data(peer_sess_id, local_sess_id);
match mgr.add_session(&clone_data) {
Ok(s) => assert_eq!(peer_sess_id, s.get_peer_sess_id()),
Err(Error::NoSpace) => break,
_ => {
panic!("Couldn't, create session");
}
}
local_sess_id += 1;
peer_sess_id += 1;
}
}
pub struct DummyNetwork;
impl DummyNetwork {
pub fn new() -> Self {
Self {}
}
}
impl NetworkInterface for DummyNetwork {
fn recv(&self, _in_buf: &mut [u8]) -> Result<(usize, Address), Error> {
Ok((0, Address::default()))
}
fn send(&self, _out_buf: &[u8], _addr: Address) -> Result<usize, Error> {
Ok(0)
}
}
#[test]
/// We purposefuly overflow the sessions
/// and when the overflow happens, we confirm that
/// - The sessions are evicted in LRU
/// - The exchanges associated with those sessions are evicted too
fn test_sess_evict() {
let mut sess_mgr = SessionMgr::new();
let transport = Box::new(DummyNetwork::new());
sess_mgr.add_network_interface(transport).unwrap();
let mut mgr = ExchangeMgr::new(sess_mgr);
fill_sessions(&mut mgr, MAX_SESSIONS + 1);
// Sessions are now full from local session id 1 to 16
// Create exchanges for sessions 2 (i.e. session index 1) and 3 (session index 2)
// Exchange IDs are 20 and 30 respectively
let _ = ExchangeMgr::_get(&mut mgr.exchanges, 1, 20, Role::Responder, true).unwrap();
let _ = ExchangeMgr::_get(&mut mgr.exchanges, 2, 30, Role::Responder, true).unwrap();
// Confirm that session ids 1 to MAX_SESSIONS exists
for i in 1..(MAX_SESSIONS + 1) {
assert_eq!(mgr.sess_mgr.get_with_id(i as u16).is_none(), false);
}
// Confirm that the exchanges are around
assert_eq!(mgr.get_with_id(20).is_none(), false);
assert_eq!(mgr.get_with_id(30).is_none(), false);
let mut old_local_sess_id = 1;
let mut new_local_sess_id = 100;
let mut new_peer_sess_id = 200;
for i in 1..(MAX_SESSIONS + 1) {
// Now purposefully overflow the sessions by adding another session
let session = mgr
.add_session(&get_clone_data(new_peer_sess_id, new_local_sess_id))
.unwrap();
assert_eq!(session.get_peer_sess_id(), new_peer_sess_id);
// This should have evicted session with local sess_id
assert_eq!(mgr.sess_mgr.get_with_id(old_local_sess_id).is_none(), true);
new_local_sess_id += 1;
new_peer_sess_id += 1;
old_local_sess_id += 1;
match i {
1 => {
// Both exchanges should exist
assert_eq!(mgr.get_with_id(20).is_none(), false);
assert_eq!(mgr.get_with_id(30).is_none(), false);
}
2 => {
// Exchange 20 would have been evicted
assert_eq!(mgr.get_with_id(20).is_none(), true);
assert_eq!(mgr.get_with_id(30).is_none(), false);
}
3 => {
// Exchange 20 and 30 would have been evicted
assert_eq!(mgr.get_with_id(20).is_none(), true);
assert_eq!(mgr.get_with_id(30).is_none(), true);
}
_ => {}
}
}
// println!("Session mgr {}", mgr.sess_mgr);
}
}

View file

@ -1,175 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use async_channel::Receiver;
use boxslab::{BoxSlab, Slab};
use heapless::LinearMap;
use log::{debug, error, info};
use crate::error::*;
use crate::transport::mrp::ReliableMessage;
use crate::transport::packet::PacketPool;
use crate::transport::{exchange, packet::Packet, proto_demux, queue, session, udp};
use super::proto_demux::ProtoCtx;
use super::queue::Msg;
pub struct Mgr {
exch_mgr: exchange::ExchangeMgr,
proto_demux: proto_demux::ProtoDemux,
rx_q: Receiver<Msg>,
}
impl Mgr {
pub fn new() -> Result<Mgr, Error> {
let mut sess_mgr = session::SessionMgr::new();
let udp_transport = Box::new(udp::UdpListener::new()?);
sess_mgr.add_network_interface(udp_transport)?;
Ok(Mgr {
proto_demux: proto_demux::ProtoDemux::new(),
exch_mgr: exchange::ExchangeMgr::new(sess_mgr),
rx_q: queue::WorkQ::init()?,
})
}
// Allows registration of different protocols with the Transport/Protocol Demux
pub fn register_protocol(
&mut self,
proto_id_handle: Box<dyn proto_demux::HandleProto>,
) -> Result<(), Error> {
self.proto_demux.register(proto_id_handle)
}
fn send_to_exchange(
&mut self,
exch_id: u16,
proto_tx: BoxSlab<PacketPool>,
) -> Result<(), Error> {
self.exch_mgr.send(exch_id, proto_tx)
}
fn handle_rxtx(&mut self) -> Result<(), Error> {
let result = self.exch_mgr.recv().map_err(|e| {
error!("Error in recv: {:?}", e);
e
})?;
if result.is_none() {
// Nothing to process, return quietly
return Ok(());
}
// result contains something worth processing, we can safely unwrap
// as we already checked for none above
let (rx, exch_ctx) = result.unwrap();
debug!("Exchange is {:?}", exch_ctx.exch);
let tx = Self::new_tx()?;
let mut proto_ctx = ProtoCtx::new(exch_ctx, rx, tx);
// Proto Dispatch
match self.proto_demux.handle(&mut proto_ctx) {
Ok(r) => {
if let proto_demux::ResponseRequired::No = r {
// We need to send the Ack if reliability is enabled, in this case
return Ok(());
}
}
Err(e) => {
error!("Error in proto_demux {:?}", e);
return Err(e);
}
}
let ProtoCtx {
exch_ctx,
rx: _,
tx,
} = proto_ctx;
// tx_ctx now contains the response payload, send the packet
let exch_id = exch_ctx.exch.get_id();
self.send_to_exchange(exch_id, tx).map_err(|e| {
error!("Error in sending msg {:?}", e);
e
})?;
Ok(())
}
fn handle_queue_msgs(&mut self) -> Result<(), Error> {
if let Ok(msg) = self.rx_q.try_recv() {
match msg {
Msg::NewSession(clone_data) => {
// If a new session was created, add it
let _ = self
.exch_mgr
.add_session(&clone_data)
.map_err(|e| error!("Error adding new session {:?}", e));
}
_ => {
error!("Queue Message Type not yet handled {:?}", msg);
}
}
}
Ok(())
}
pub fn start(&mut self) -> Result<(), Error> {
loop {
// Handle network operations
if self.handle_rxtx().is_err() {
error!("Error in handle_rxtx");
continue;
}
if self.handle_queue_msgs().is_err() {
error!("Error in handle_queue_msg");
continue;
}
// Handle any pending acknowledgement send
let mut acks_to_send: LinearMap<u16, (), { exchange::MAX_MRP_ENTRIES }> =
LinearMap::new();
self.exch_mgr.pending_acks(&mut acks_to_send);
for exch_id in acks_to_send.keys() {
info!("Sending MRP Standalone ACK for exch {}", exch_id);
let mut proto_tx = match Self::new_tx() {
Ok(p) => p,
Err(e) => {
error!("Error creating proto_tx {:?}", e);
break;
}
};
ReliableMessage::prepare_ack(*exch_id, &mut proto_tx);
if let Err(e) = self.send_to_exchange(*exch_id, proto_tx) {
error!("Error in sending Ack {:?}", e);
}
}
// Handle exchange purging
// This need not be done in each turn of the loop, maybe once in 5 times or so?
self.exch_mgr.purge();
info!("Exchange Mgr: {}", self.exch_mgr);
}
}
fn new_tx() -> Result<BoxSlab<PacketPool>, Error> {
Slab::<PacketPool>::try_new(Packet::new_tx()?).ok_or(Error::PacketPoolExhaust)
}
}

View file

@ -1,55 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::{
fmt::{Debug, Display},
net::{IpAddr, Ipv4Addr, SocketAddr},
};
use crate::error::Error;
#[derive(PartialEq, Copy, Clone)]
pub enum Address {
Udp(SocketAddr),
}
impl Default for Address {
fn default() -> Self {
Address::Udp(SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 8080))
}
}
impl Display for Address {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Address::Udp(addr) => writeln!(f, "{}", addr),
}
}
}
impl Debug for Address {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Address::Udp(addr) => writeln!(f, "{}", addr),
}
}
}
pub trait NetworkInterface {
fn recv(&self, in_buf: &mut [u8]) -> Result<(usize, Address), Error>;
fn send(&self, out_buf: &[u8], addr: Address) -> Result<usize, Error>;
}

View file

@ -1,239 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use log::{error, trace};
use std::sync::Mutex;
use boxslab::box_slab;
use crate::{
error::Error,
sys::MAX_PACKET_POOL_SIZE,
utils::{parsebuf::ParseBuf, writebuf::WriteBuf},
};
use super::{
network::Address,
plain_hdr::{self, PlainHdr},
proto_hdr::{self, ProtoHdr},
};
pub const MAX_RX_BUF_SIZE: usize = 1583;
type Buffer = [u8; MAX_RX_BUF_SIZE];
// TODO: I am not very happy with this construction, need to find another way to do this
pub struct BufferPool {
buffers: [Option<Buffer>; MAX_PACKET_POOL_SIZE],
}
impl BufferPool {
const INIT: Option<Buffer> = None;
fn get() -> &'static Mutex<BufferPool> {
static mut BUFFER_HOLDER: Option<Mutex<BufferPool>> = None;
static ONCE: Once = Once::new();
unsafe {
ONCE.call_once(|| {
BUFFER_HOLDER = Some(Mutex::new(BufferPool {
buffers: [BufferPool::INIT; MAX_PACKET_POOL_SIZE],
}));
});
BUFFER_HOLDER.as_ref().unwrap()
}
}
pub fn alloc() -> Option<(usize, &'static mut Buffer)> {
trace!("Buffer Alloc called\n");
let mut pool = BufferPool::get().lock().unwrap();
for i in 0..MAX_PACKET_POOL_SIZE {
if pool.buffers[i].is_none() {
pool.buffers[i] = Some([0; MAX_RX_BUF_SIZE]);
// Sigh! to by-pass the borrow-checker telling us we are stealing a mutable reference
// from under the lock
// In this case the lock only protects against the setting of Some/None,
// the objects then are independently accessed in a unique way
let buffer = unsafe { &mut *(pool.buffers[i].as_mut().unwrap() as *mut Buffer) };
return Some((i, buffer));
}
}
None
}
pub fn free(index: usize) {
trace!("Buffer Free called\n");
let mut pool = BufferPool::get().lock().unwrap();
if pool.buffers[index].is_some() {
pool.buffers[index] = None;
}
}
}
#[derive(PartialEq)]
enum RxState {
Uninit,
PlainDecode,
ProtoDecode,
}
enum Direction<'a> {
Tx(WriteBuf<'a>),
Rx(ParseBuf<'a>, RxState),
}
pub struct Packet<'a> {
pub plain: PlainHdr,
pub proto: ProtoHdr,
pub peer: Address,
data: Direction<'a>,
buffer_index: usize,
}
impl<'a> Packet<'a> {
const HDR_RESERVE: usize = plain_hdr::max_plain_hdr_len() + proto_hdr::max_proto_hdr_len();
pub fn new_rx() -> Result<Self, Error> {
let (buffer_index, buffer) = BufferPool::alloc().ok_or(Error::NoSpace)?;
let buf_len = buffer.len();
Ok(Self {
plain: Default::default(),
proto: Default::default(),
buffer_index,
peer: Address::default(),
data: Direction::Rx(ParseBuf::new(buffer, buf_len), RxState::Uninit),
})
}
pub fn new_tx() -> Result<Self, Error> {
let (buffer_index, buffer) = BufferPool::alloc().ok_or(Error::NoSpace)?;
let buf_len = buffer.len();
let mut wb = WriteBuf::new(buffer, buf_len);
wb.reserve(Packet::HDR_RESERVE)?;
let mut p = Self {
plain: Default::default(),
proto: Default::default(),
buffer_index,
peer: Address::default(),
data: Direction::Tx(wb),
};
// Reliability on by default
p.proto.set_reliable();
Ok(p)
}
pub fn as_borrow_slice(&mut self) -> &mut [u8] {
match &mut self.data {
Direction::Rx(pb, _) => pb.as_borrow_slice(),
Direction::Tx(wb) => wb.as_mut_slice(),
}
}
pub fn get_parsebuf(&mut self) -> Result<&mut ParseBuf<'a>, Error> {
if let Direction::Rx(pbuf, _) = &mut self.data {
Ok(pbuf)
} else {
Err(Error::Invalid)
}
}
pub fn get_writebuf(&mut self) -> Result<&mut WriteBuf<'a>, Error> {
if let Direction::Tx(wbuf) = &mut self.data {
Ok(wbuf)
} else {
Err(Error::Invalid)
}
}
pub fn get_proto_id(&self) -> u16 {
self.proto.proto_id
}
pub fn set_proto_id(&mut self, proto_id: u16) {
self.proto.proto_id = proto_id;
}
pub fn get_proto_opcode(&self) -> u8 {
self.proto.proto_opcode
}
pub fn set_proto_opcode(&mut self, proto_opcode: u8) {
self.proto.proto_opcode = proto_opcode;
}
pub fn set_reliable(&mut self) {
self.proto.set_reliable()
}
pub fn unset_reliable(&mut self) {
self.proto.unset_reliable()
}
pub fn is_reliable(&mut self) -> bool {
self.proto.is_reliable()
}
pub fn proto_decode(&mut self, peer_nodeid: u64, dec_key: Option<&[u8]>) -> Result<(), Error> {
match &mut self.data {
Direction::Rx(pb, state) => {
if *state == RxState::PlainDecode {
*state = RxState::ProtoDecode;
self.proto
.decrypt_and_decode(&self.plain, pb, peer_nodeid, dec_key)
} else {
error!("Invalid state for proto_decode");
Err(Error::InvalidState)
}
}
_ => Err(Error::InvalidState),
}
}
pub fn is_plain_hdr_decoded(&self) -> Result<bool, Error> {
match &self.data {
Direction::Rx(_, state) => match state {
RxState::Uninit => Ok(false),
_ => Ok(true),
},
_ => Err(Error::InvalidState),
}
}
pub fn plain_hdr_decode(&mut self) -> Result<(), Error> {
match &mut self.data {
Direction::Rx(pb, state) => {
if *state == RxState::Uninit {
*state = RxState::PlainDecode;
self.plain.decode(pb)
} else {
error!("Invalid state for plain_decode");
Err(Error::InvalidState)
}
}
_ => Err(Error::InvalidState),
}
}
}
impl<'a> Drop for Packet<'a> {
fn drop(&mut self) {
BufferPool::free(self.buffer_index);
trace!("Dropping Packet......");
}
}
box_slab!(PacketPool, Packet<'static>, MAX_PACKET_POOL_SIZE);

View file

@ -1,95 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use boxslab::BoxSlab;
use crate::error::*;
use super::exchange::ExchangeCtx;
use super::packet::PacketPool;
const MAX_PROTOCOLS: usize = 4;
#[derive(PartialEq, Debug)]
pub enum ResponseRequired {
Yes,
No,
}
pub struct ProtoDemux {
proto_id_handlers: [Option<Box<dyn HandleProto>>; MAX_PROTOCOLS],
}
/// This is the context in which a receive packet is being processed
pub struct ProtoCtx<'a> {
/// This is the exchange context, that includes the exchange and the session
pub exch_ctx: ExchangeCtx<'a>,
/// This is the received buffer for this transaction
pub rx: BoxSlab<PacketPool>,
/// This is the transmit buffer for this transaction
pub tx: BoxSlab<PacketPool>,
}
impl<'a> ProtoCtx<'a> {
pub fn new(
exch_ctx: ExchangeCtx<'a>,
rx: BoxSlab<PacketPool>,
tx: BoxSlab<PacketPool>,
) -> Self {
Self { exch_ctx, rx, tx }
}
}
pub trait HandleProto {
fn handle_proto_id(&mut self, proto_ctx: &mut ProtoCtx) -> Result<ResponseRequired, Error>;
fn get_proto_id(&self) -> usize;
fn handle_session_event(&self) -> Result<(), Error> {
Ok(())
}
}
impl Default for ProtoDemux {
fn default() -> Self {
Self::new()
}
}
impl ProtoDemux {
pub fn new() -> ProtoDemux {
ProtoDemux {
proto_id_handlers: [None, None, None, None],
}
}
pub fn register(&mut self, proto_id_handle: Box<dyn HandleProto>) -> Result<(), Error> {
let proto_id = proto_id_handle.get_proto_id();
self.proto_id_handlers[proto_id] = Some(proto_id_handle);
Ok(())
}
pub fn handle(&mut self, proto_ctx: &mut ProtoCtx) -> Result<ResponseRequired, Error> {
let proto_id = proto_ctx.rx.get_proto_id() as usize;
if proto_id >= MAX_PROTOCOLS {
return Err(Error::Invalid);
}
return self.proto_id_handlers[proto_id]
.as_mut()
.ok_or(Error::NoHandler)?
.handle_proto_id(proto_ctx);
}
}

View file

@ -1,67 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::Once;
use async_channel::{bounded, Receiver, Sender};
use crate::error::Error;
use super::session::CloneData;
#[derive(Debug)]
pub enum Msg {
Tx(),
Rx(),
NewSession(CloneData),
}
#[derive(Clone)]
pub struct WorkQ {
tx: Sender<Msg>,
}
static mut G_WQ: Option<WorkQ> = None;
static INIT: Once = Once::new();
impl WorkQ {
pub fn init() -> Result<Receiver<Msg>, Error> {
let (tx, rx) = bounded::<Msg>(3);
WorkQ::configure(tx);
Ok(rx)
}
fn configure(tx: Sender<Msg>) {
unsafe {
INIT.call_once(|| {
G_WQ = Some(WorkQ { tx });
});
}
}
pub fn get() -> Result<WorkQ, Error> {
unsafe { G_WQ.as_ref().cloned().ok_or(Error::Invalid) }
}
pub fn sync_send(&self, msg: Msg) -> Result<(), Error> {
smol::block_on(self.send(msg))
}
pub async fn send(&self, msg: Msg) -> Result<(), Error> {
self.tx.send(msg).await.map_err(|e| e.into())
}
}

View file

@ -1,57 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::*;
use smol::net::{Ipv6Addr, UdpSocket};
use super::network::{Address, NetworkInterface};
// We could get rid of the smol here, but keeping it around in case we have to process
// any other events in this thread's context
pub struct UdpListener {
socket: UdpSocket,
}
// Currently matches with the one in connectedhomeip repo
pub const MAX_RX_BUF_SIZE: usize = 1583;
/* The Matter Port */
pub const MATTER_PORT: u16 = 5540;
impl UdpListener {
pub fn new() -> Result<UdpListener, Error> {
Ok(UdpListener {
socket: smol::block_on(UdpSocket::bind((Ipv6Addr::UNSPECIFIED, MATTER_PORT)))?,
})
}
}
impl NetworkInterface for UdpListener {
fn recv(&self, in_buf: &mut [u8]) -> Result<(usize, Address), Error> {
let (size, addr) = smol::block_on(self.socket.recv_from(in_buf)).map_err(|e| {
println!("Error on the network: {:?}", e);
Error::Network
})?;
Ok((size, Address::Udp(addr)))
}
fn send(&self, out_buf: &[u8], addr: Address) -> Result<usize, Error> {
match addr {
Address::Udp(addr) => Ok(smol::block_on(self.socket.send_to(out_buf, addr))?),
}
}
}

View file

@ -1,250 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::sync::{Arc, Mutex, Once};
use matter::{
data_model::objects::{
Access, AttrDetails, AttrValue, Attribute, Cluster, ClusterType, EncodeValue, Encoder,
Quality,
},
error::Error,
interaction_model::{
command::CommandReq,
core::IMStatusCode,
messages::ib::{self, attr_list_write, ListOperation},
},
tlv::{TLVElement, TLVWriter, TagType, ToTLV},
};
use num_derive::FromPrimitive;
pub const ID: u32 = 0xABCD;
#[derive(FromPrimitive)]
pub enum Commands {
EchoReq = 0x00,
EchoResp = 0x01,
}
/// This is used in the tests to validate any settings that may have happened
/// to the custom data parts of the cluster
pub struct TestChecker {
pub write_list: [Option<u16>; WRITE_LIST_MAX],
}
static mut G_TEST_CHECKER: Option<Arc<Mutex<TestChecker>>> = None;
static INIT: Once = Once::new();
impl TestChecker {
fn new() -> Self {
Self {
write_list: [None; WRITE_LIST_MAX],
}
}
/// Get a handle to the globally unique mDNS instance
pub fn get() -> Result<Arc<Mutex<Self>>, Error> {
unsafe {
INIT.call_once(|| {
G_TEST_CHECKER = Some(Arc::new(Mutex::new(Self::new())));
});
Ok(G_TEST_CHECKER.as_ref().ok_or(Error::Invalid)?.clone())
}
}
}
pub const WRITE_LIST_MAX: usize = 5;
pub struct EchoCluster {
pub base: Cluster,
pub multiplier: u8,
}
#[derive(FromPrimitive)]
pub enum Attributes {
Att1 = 0,
Att2 = 1,
AttWrite = 2,
AttCustom = 3,
AttWriteList = 4,
}
pub const ATTR_CUSTOM_VALUE: u32 = 0xcafebeef;
pub const ATTR_WRITE_DEFAULT_VALUE: u16 = 0xcafe;
impl ClusterType for EchoCluster {
fn base(&self) -> &Cluster {
&self.base
}
fn base_mut(&mut self) -> &mut Cluster {
&mut self.base
}
fn read_custom_attribute(&self, encoder: &mut dyn Encoder, attr: &AttrDetails) {
match num::FromPrimitive::from_u16(attr.attr_id) {
Some(Attributes::AttCustom) => encoder.encode(EncodeValue::Closure(&|tag, tw| {
let _ = tw.u32(tag, ATTR_CUSTOM_VALUE);
})),
Some(Attributes::AttWriteList) => {
let tc_handle = TestChecker::get().unwrap();
let tc = tc_handle.lock().unwrap();
encoder.encode(EncodeValue::Closure(&|tag, tw| {
let _ = tw.start_array(tag);
for i in tc.write_list.iter().flatten() {
let _ = tw.u16(TagType::Anonymous, *i);
}
let _ = tw.end_container();
}))
}
_ => (),
}
}
fn write_attribute(
&mut self,
attr: &AttrDetails,
data: &TLVElement,
) -> Result<(), IMStatusCode> {
match num::FromPrimitive::from_u16(attr.attr_id) {
Some(Attributes::AttWriteList) => {
attr_list_write(attr, data, |op, data| self.write_attr_list(&op, data))
}
_ => self.base.write_attribute_from_tlv(attr.attr_id, data),
}
}
fn handle_command(&mut self, cmd_req: &mut CommandReq) -> Result<(), IMStatusCode> {
let cmd = cmd_req
.cmd
.path
.leaf
.map(num::FromPrimitive::from_u32)
.ok_or(IMStatusCode::UnsupportedCommand)?
.ok_or(IMStatusCode::UnsupportedCommand)?;
match cmd {
// This will generate an echo response on the same endpoint
// with data multiplied by the multiplier
Commands::EchoReq => {
let a = cmd_req.data.u8().unwrap();
let mut echo_response = cmd_req.cmd;
echo_response.path.leaf = Some(Commands::EchoResp as u32);
let cmd_data = |tag: TagType, t: &mut TLVWriter| {
let _ = t.start_struct(tag);
// Echo = input * self.multiplier
let _ = t.u8(TagType::Context(0), a * self.multiplier);
let _ = t.end_container();
};
let invoke_resp = ib::InvResp::Cmd(ib::CmdData::new(
echo_response,
EncodeValue::Closure(&cmd_data),
));
let _ = invoke_resp.to_tlv(cmd_req.resp, TagType::Anonymous);
cmd_req.trans.complete();
}
_ => {
return Err(IMStatusCode::UnsupportedCommand);
}
}
Ok(())
}
}
impl EchoCluster {
pub fn new(multiplier: u8) -> Result<Box<Self>, Error> {
let mut c = Box::new(Self {
base: Cluster::new(ID)?,
multiplier,
});
c.base.add_attribute(Attribute::new(
Attributes::Att1 as u16,
AttrValue::Uint16(0x1234),
Access::RV,
Quality::NONE,
))?;
c.base.add_attribute(Attribute::new(
Attributes::Att2 as u16,
AttrValue::Uint16(0x5678),
Access::RV,
Quality::NONE,
))?;
c.base.add_attribute(Attribute::new(
Attributes::AttWrite as u16,
AttrValue::Uint16(ATTR_WRITE_DEFAULT_VALUE),
Access::WRITE | Access::NEED_ADMIN,
Quality::NONE,
))?;
c.base.add_attribute(Attribute::new(
Attributes::AttCustom as u16,
AttrValue::Custom,
Access::READ | Access::NEED_VIEW,
Quality::NONE,
))?;
c.base.add_attribute(Attribute::new(
Attributes::AttWriteList as u16,
AttrValue::Custom,
Access::WRITE | Access::NEED_ADMIN,
Quality::NONE,
))?;
Ok(c)
}
fn write_attr_list(
&mut self,
op: &ListOperation,
data: &TLVElement,
) -> Result<(), IMStatusCode> {
let tc_handle = TestChecker::get().unwrap();
let mut tc = tc_handle.lock().unwrap();
match op {
ListOperation::AddItem => {
let data = data.u16().map_err(|_| IMStatusCode::Failure)?;
for i in 0..WRITE_LIST_MAX {
if tc.write_list[i].is_none() {
tc.write_list[i] = Some(data);
return Ok(());
}
}
Err(IMStatusCode::ResourceExhausted)
}
ListOperation::EditItem(index) => {
let data = data.u16().map_err(|_| IMStatusCode::Failure)?;
if tc.write_list[*index as usize].is_some() {
tc.write_list[*index as usize] = Some(data);
Ok(())
} else {
Err(IMStatusCode::InvalidAction)
}
}
ListOperation::DeleteItem(index) => {
if tc.write_list[*index as usize].is_some() {
tc.write_list[*index as usize] = None;
Ok(())
} else {
Err(IMStatusCode::InvalidAction)
}
}
ListOperation::DeleteList => {
for i in 0..WRITE_LIST_MAX {
tc.write_list[i] = None;
}
Ok(())
}
}
}
}

View file

@ -1,201 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::common::echo_cluster;
use boxslab::Slab;
use matter::{
acl::{AclEntry, AclMgr, AuthMode},
data_model::{
cluster_basic_information::BasicInfoConfig,
core::DataModel,
device_types::device_type_add_on_off_light,
objects::Privilege,
sdm::dev_att::{DataType, DevAttDataFetcher},
},
error::Error,
fabric::FabricMgr,
interaction_model::{core::OpCode, InteractionModel},
secure_channel::pake::PaseMgr,
tlv::{TLVWriter, TagType, ToTLV},
transport::packet::Packet,
transport::{
exchange::{self, Exchange, ExchangeCtx},
network::Address,
packet::PacketPool,
proto_demux::ProtoCtx,
session::{CloneData, NocCatIds, SessionMgr, SessionMode},
},
transport::{proto_demux::HandleProto, session::CaseDetails},
utils::writebuf::WriteBuf,
};
use std::{
net::{Ipv4Addr, SocketAddr},
sync::Arc,
};
pub struct DummyDevAtt {}
impl DevAttDataFetcher for DummyDevAtt {
fn get_devatt_data(&self, _data_type: DataType, _data: &mut [u8]) -> Result<usize, Error> {
Ok(2)
}
}
/// An Interaction Model Engine to facilitate easy testing
pub struct ImEngine {
pub dm: DataModel,
pub acl_mgr: Arc<AclMgr>,
pub im: Box<InteractionModel>,
// By default, a new exchange is created for every run, if you wish to instead using a specific
// exchange, set this variable. This is helpful in situations where you have to run multiple
// actions in the same transaction (exchange)
pub exch: Option<Exchange>,
}
pub struct ImInput<'a> {
action: OpCode,
data: &'a dyn ToTLV,
peer_id: u64,
cat_ids: NocCatIds,
}
pub const IM_ENGINE_PEER_ID: u64 = 445566;
impl<'a> ImInput<'a> {
pub fn new(action: OpCode, data: &'a dyn ToTLV) -> Self {
Self {
action,
data,
peer_id: IM_ENGINE_PEER_ID,
cat_ids: Default::default(),
}
}
pub fn set_peer_node_id(&mut self, peer: u64) {
self.peer_id = peer;
}
pub fn set_cat_ids(&mut self, cat_ids: &NocCatIds) {
self.cat_ids = *cat_ids;
}
}
impl ImEngine {
/// Create the interaction model engine
pub fn new() -> Self {
let dev_det = BasicInfoConfig {
vid: 10,
pid: 11,
hw_ver: 12,
sw_ver: 13,
sw_ver_str: "13".to_string(),
serial_no: "aabbccdd".to_string(),
device_name: "Test Device".to_string(),
};
let dev_att = Box::new(DummyDevAtt {});
let fabric_mgr = Arc::new(FabricMgr::new().unwrap());
let acl_mgr = Arc::new(AclMgr::new_with(false).unwrap());
let pase_mgr = PaseMgr::new();
acl_mgr.erase_all();
let mut default_acl = AclEntry::new(1, Privilege::ADMIN, AuthMode::Case);
// Only allow the standard peer node id of the IM Engine
default_acl.add_subject(IM_ENGINE_PEER_ID).unwrap();
acl_mgr.add(default_acl).unwrap();
let dm = DataModel::new(dev_det, dev_att, fabric_mgr, acl_mgr.clone(), pase_mgr).unwrap();
{
let mut d = dm.node.write().unwrap();
let light_endpoint = device_type_add_on_off_light(&mut d).unwrap();
d.add_cluster(0, echo_cluster::EchoCluster::new(2).unwrap())
.unwrap();
d.add_cluster(light_endpoint, echo_cluster::EchoCluster::new(3).unwrap())
.unwrap();
}
let im = Box::new(InteractionModel::new(Box::new(dm.clone())));
Self {
dm,
acl_mgr,
im,
exch: None,
}
}
/// Run a transaction through the interaction model engine
pub fn process<'a>(&mut self, input: &ImInput, data_out: &'a mut [u8]) -> (u8, &'a mut [u8]) {
let mut new_exch = Exchange::new(1, 0, exchange::Role::Responder);
// Choose whether to use a new exchange, or use the one from the ImEngine configuration
let exch = self.exch.as_mut().unwrap_or(&mut new_exch);
let mut sess_mgr: SessionMgr = Default::default();
let clone_data = CloneData::new(
123456,
input.peer_id,
10,
30,
Address::Udp(SocketAddr::new(
std::net::IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
5542,
)),
SessionMode::Case(CaseDetails::new(1, &input.cat_ids)),
);
let sess_idx = sess_mgr.clone_session(&clone_data).unwrap();
let sess = sess_mgr.get_session_handle(sess_idx);
let exch_ctx = ExchangeCtx { exch, sess };
let mut rx = Slab::<PacketPool>::try_new(Packet::new_rx().unwrap()).unwrap();
let tx = Slab::<PacketPool>::try_new(Packet::new_tx().unwrap()).unwrap();
// Create fake rx packet
rx.set_proto_id(0x01);
rx.set_proto_opcode(input.action as u8);
rx.peer = Address::default();
{
let mut buf = [0u8; 400];
let buf_len = buf.len();
let mut wb = WriteBuf::new(&mut buf, buf_len);
let mut tw = TLVWriter::new(&mut wb);
input.data.to_tlv(&mut tw, TagType::Anonymous).unwrap();
let input_data = wb.as_borrow_slice();
let in_data_len = input_data.len();
let rx_buf = rx.as_borrow_slice();
rx_buf[..in_data_len].copy_from_slice(input_data);
rx.get_parsebuf().unwrap().set_len(in_data_len);
}
let mut ctx = ProtoCtx::new(exch_ctx, rx, tx);
self.im.handle_proto_id(&mut ctx).unwrap();
let out_data_len = ctx.tx.as_borrow_slice().len();
data_out[..out_data_len].copy_from_slice(ctx.tx.as_borrow_slice());
let response = ctx.tx.get_proto_opcode();
(response, &mut data_out[..out_data_len])
}
}
// Create an Interaction Model, Data Model and run a rx/tx transaction through it
pub fn im_engine<'a>(
action: OpCode,
data: &dyn ToTLV,
data_out: &'a mut [u8],
) -> (DataModel, u8, &'a mut [u8]) {
let mut engine = ImEngine::new();
let input = ImInput::new(action, data);
let (response, output) = engine.process(&input, data_out);
(engine.dm, response, output)
}

View file

@ -1,224 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use matter::{
data_model::{
cluster_basic_information as basic_info, cluster_on_off as onoff,
objects::{EncodeValue, GlobalElements},
sdm::{admin_commissioning as adm_comm, general_commissioning as gen_comm, noc},
system_model::{access_control as acl, descriptor},
},
interaction_model::{
core::{IMStatusCode, OpCode},
messages::{
ib::{AttrData, AttrPath, AttrResp},
msg::{ReadReq, ReportDataMsg, StatusResp, SubscribeResp},
},
messages::{msg::SubscribeReq, GenericPath},
},
tlv::{self, ElementType, FromTLV, TLVElement, TagType, ToTLV},
transport::{
exchange::{self, Exchange},
udp::MAX_RX_BUF_SIZE,
},
};
use crate::{
attr_data,
common::{
attributes::*,
echo_cluster as echo,
im_engine::{ImEngine, ImInput},
},
};
pub struct LongRead {
im_engine: ImEngine,
}
impl LongRead {
pub fn new() -> Self {
let mut im_engine = ImEngine::new();
// Use the same exchange for all parts of the transaction
im_engine.exch = Some(Exchange::new(1, 0, exchange::Role::Responder));
Self { im_engine }
}
pub fn process<'a>(
&mut self,
action: OpCode,
data: &dyn ToTLV,
data_out: &'a mut [u8],
) -> (u8, &'a mut [u8]) {
let input = ImInput::new(action, data);
let (response, output) = self.im_engine.process(&input, data_out);
(response, output)
}
}
fn wildcard_read_resp(part: u8) -> Vec<AttrResp<'static>> {
// For brevity, we only check the AttrPath, not the actual 'data'
let dont_care = ElementType::U8(0);
let part1 = vec![
attr_data!(0, 29, GlobalElements::FeatureMap, dont_care),
attr_data!(0, 29, GlobalElements::AttributeList, dont_care),
attr_data!(0, 29, descriptor::Attributes::DeviceTypeList, dont_care),
attr_data!(0, 29, descriptor::Attributes::ServerList, dont_care),
attr_data!(0, 29, descriptor::Attributes::PartsList, dont_care),
attr_data!(0, 29, descriptor::Attributes::ClientList, dont_care),
attr_data!(0, 40, GlobalElements::FeatureMap, dont_care),
attr_data!(0, 40, GlobalElements::AttributeList, dont_care),
attr_data!(0, 40, basic_info::Attributes::DMRevision, dont_care),
attr_data!(0, 40, basic_info::Attributes::VendorId, dont_care),
attr_data!(0, 40, basic_info::Attributes::ProductId, dont_care),
attr_data!(0, 40, basic_info::Attributes::HwVer, dont_care),
attr_data!(0, 40, basic_info::Attributes::SwVer, dont_care),
attr_data!(0, 40, basic_info::Attributes::SwVerString, dont_care),
attr_data!(0, 40, basic_info::Attributes::SerialNo, dont_care),
attr_data!(0, 48, GlobalElements::FeatureMap, dont_care),
attr_data!(0, 48, GlobalElements::AttributeList, dont_care),
attr_data!(0, 48, gen_comm::Attributes::BreadCrumb, dont_care),
attr_data!(0, 48, gen_comm::Attributes::RegConfig, dont_care),
attr_data!(0, 48, gen_comm::Attributes::LocationCapability, dont_care),
attr_data!(
0,
48,
gen_comm::Attributes::BasicCommissioningInfo,
dont_care
),
attr_data!(0, 49, GlobalElements::FeatureMap, dont_care),
attr_data!(0, 49, GlobalElements::AttributeList, dont_care),
attr_data!(0, 60, GlobalElements::FeatureMap, dont_care),
attr_data!(0, 60, GlobalElements::AttributeList, dont_care),
attr_data!(0, 60, adm_comm::Attributes::WindowStatus, dont_care),
attr_data!(0, 60, adm_comm::Attributes::AdminFabricIndex, dont_care),
attr_data!(0, 60, adm_comm::Attributes::AdminVendorId, dont_care),
attr_data!(0, 62, GlobalElements::FeatureMap, dont_care),
attr_data!(0, 62, GlobalElements::AttributeList, dont_care),
attr_data!(0, 62, noc::Attributes::CurrentFabricIndex, dont_care),
attr_data!(0, 62, noc::Attributes::Fabrics, dont_care),
attr_data!(0, 62, noc::Attributes::SupportedFabrics, dont_care),
attr_data!(0, 62, noc::Attributes::CommissionedFabrics, dont_care),
attr_data!(0, 31, GlobalElements::FeatureMap, dont_care),
attr_data!(0, 31, GlobalElements::AttributeList, dont_care),
attr_data!(0, 31, acl::Attributes::Acl, dont_care),
attr_data!(0, 31, acl::Attributes::Extension, dont_care),
attr_data!(0, 31, acl::Attributes::SubjectsPerEntry, dont_care),
attr_data!(0, 31, acl::Attributes::TargetsPerEntry, dont_care),
attr_data!(0, 31, acl::Attributes::EntriesPerFabric, dont_care),
attr_data!(0, echo::ID, GlobalElements::FeatureMap, dont_care),
attr_data!(0, echo::ID, GlobalElements::AttributeList, dont_care),
attr_data!(0, echo::ID, echo::Attributes::Att1, dont_care),
attr_data!(0, echo::ID, echo::Attributes::Att2, dont_care),
attr_data!(0, echo::ID, echo::Attributes::AttCustom, dont_care),
attr_data!(1, 29, GlobalElements::FeatureMap, dont_care),
attr_data!(1, 29, GlobalElements::AttributeList, dont_care),
attr_data!(1, 29, descriptor::Attributes::DeviceTypeList, dont_care),
];
let part2 = vec![
attr_data!(1, 29, descriptor::Attributes::ServerList, dont_care),
attr_data!(1, 29, descriptor::Attributes::PartsList, dont_care),
attr_data!(1, 29, descriptor::Attributes::ClientList, dont_care),
attr_data!(1, 6, GlobalElements::FeatureMap, dont_care),
attr_data!(1, 6, GlobalElements::AttributeList, dont_care),
attr_data!(1, 6, onoff::Attributes::OnOff, dont_care),
attr_data!(1, echo::ID, GlobalElements::FeatureMap, dont_care),
attr_data!(1, echo::ID, GlobalElements::AttributeList, dont_care),
attr_data!(1, echo::ID, echo::Attributes::Att1, dont_care),
attr_data!(1, echo::ID, echo::Attributes::Att2, dont_care),
attr_data!(1, echo::ID, echo::Attributes::AttCustom, dont_care),
];
if part == 1 {
part1
} else {
part2
}
}
#[test]
fn test_long_read_success() {
// Read the entire attribute database, which requires 2 reads to complete
let _ = env_logger::try_init();
let mut lr = LongRead::new();
let mut output = [0_u8; MAX_RX_BUF_SIZE + 100];
let wc_path = GenericPath::new(None, None, None);
let read_all = [AttrPath::new(&wc_path)];
let read_req = ReadReq::new(true).set_attr_requests(&read_all);
let expected_part1 = wildcard_read_resp(1);
let (out_code, out_data) = lr.process(OpCode::ReadRequest, &read_req, &mut output);
let root = tlv::get_root_node_struct(out_data).unwrap();
let report_data = ReportDataMsg::from_tlv(&root).unwrap();
assert_attr_report_skip_data(&report_data, &expected_part1);
assert_eq!(report_data.more_chunks, Some(true));
assert_eq!(out_code, OpCode::ReportData as u8);
// Ask for the next read by sending a status report
let status_report = StatusResp {
status: IMStatusCode::Success,
};
let expected_part2 = wildcard_read_resp(2);
let (out_code, out_data) = lr.process(OpCode::StatusResponse, &status_report, &mut output);
let root = tlv::get_root_node_struct(out_data).unwrap();
let report_data = ReportDataMsg::from_tlv(&root).unwrap();
assert_attr_report_skip_data(&report_data, &expected_part2);
assert_eq!(report_data.more_chunks, None);
assert_eq!(out_code, OpCode::ReportData as u8);
}
#[test]
fn test_long_read_subscription_success() {
// Subscribe to the entire attribute database, which requires 2 reads to complete
let _ = env_logger::try_init();
let mut lr = LongRead::new();
let mut output = [0_u8; MAX_RX_BUF_SIZE + 100];
let wc_path = GenericPath::new(None, None, None);
let read_all = [AttrPath::new(&wc_path)];
let subs_req = SubscribeReq::new(true, 1, 20).set_attr_requests(&read_all);
let expected_part1 = wildcard_read_resp(1);
let (out_code, out_data) = lr.process(OpCode::SubscribeRequest, &subs_req, &mut output);
let root = tlv::get_root_node_struct(out_data).unwrap();
let report_data = ReportDataMsg::from_tlv(&root).unwrap();
assert_attr_report_skip_data(&report_data, &expected_part1);
assert_eq!(report_data.more_chunks, Some(true));
assert_eq!(out_code, OpCode::ReportData as u8);
// Ask for the next read by sending a status report
let status_report = StatusResp {
status: IMStatusCode::Success,
};
let expected_part2 = wildcard_read_resp(2);
let (out_code, out_data) = lr.process(OpCode::StatusResponse, &status_report, &mut output);
let root = tlv::get_root_node_struct(out_data).unwrap();
let report_data = ReportDataMsg::from_tlv(&root).unwrap();
assert_attr_report_skip_data(&report_data, &expected_part2);
assert_eq!(report_data.more_chunks, None);
assert_eq!(out_code, OpCode::ReportData as u8);
// Finally confirm subscription
let (out_code, out_data) = lr.process(OpCode::StatusResponse, &status_report, &mut output);
tlv::print_tlv_list(out_data);
let root = tlv::get_root_node_struct(out_data).unwrap();
let subs_resp = SubscribeResp::from_tlv(&root).unwrap();
assert_eq!(out_code, OpCode::SubscriptResponse as u8);
assert_eq!(subs_resp.subs_id, 1);
}

View file

@ -1,284 +0,0 @@
/*
*
* Copyright (c) 2023 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use core::time;
use std::thread;
use matter::{
data_model::{
core::DataModel,
objects::{AttrValue, EncodeValue},
},
interaction_model::{
core::{IMStatusCode, OpCode},
messages::{ib::CmdData, ib::CmdPath, msg::InvReq, GenericPath},
messages::{
ib::{AttrData, AttrPath, AttrStatus},
msg::{self, StatusResp, TimedReq, WriteReq, WriteResp},
},
},
tlv::{self, FromTLV, TLVArray, TLVWriter, ToTLV},
transport::exchange::{self, Exchange},
};
use crate::{
common::{
commands::*,
echo_cluster,
im_engine::{ImEngine, ImInput},
},
echo_req, echo_resp,
};
fn handle_timed_reqs<'a>(
opcode: OpCode,
request: &dyn ToTLV,
timeout: u16,
delay: u16,
output: &'a mut [u8],
) -> (u8, DataModel, &'a [u8]) {
let mut im_engine = ImEngine::new();
// Use the same exchange for all parts of the transaction
im_engine.exch = Some(Exchange::new(1, 0, exchange::Role::Responder));
if timeout != 0 {
// Send Timed Req
let mut tmp_buf = [0u8; 400];
let timed_req = TimedReq { timeout };
let im_input = ImInput::new(OpCode::TimedRequest, &timed_req);
let (_, out_buf) = im_engine.process(&im_input, &mut tmp_buf);
tlv::print_tlv_list(out_buf);
} else {
println!("Skipping timed request");
}
// Process any delays
let delay = time::Duration::from_millis(delay.into());
thread::sleep(delay);
// Send Write Req
let input = ImInput::new(opcode, request);
let (resp_opcode, output) = im_engine.process(&input, output);
(resp_opcode, im_engine.dm, output)
}
enum WriteResponse<'a> {
TransactionError,
TransactionSuccess(&'a [AttrStatus]),
}
// Helper for handling Write Attribute sequences
fn handle_timed_write_reqs(
input: &[AttrData],
expected: &WriteResponse,
timeout: u16,
delay: u16,
) -> DataModel {
let mut out_buf = [0u8; 400];
let write_req = WriteReq::new(false, input);
let (resp_opcode, dm, out_buf) = handle_timed_reqs(
OpCode::WriteRequest,
&write_req,
timeout,
delay,
&mut out_buf,
);
tlv::print_tlv_list(out_buf);
let root = tlv::get_root_node_struct(out_buf).unwrap();
match expected {
WriteResponse::TransactionSuccess(t) => {
assert_eq!(
num::FromPrimitive::from_u8(resp_opcode),
Some(OpCode::WriteResponse)
);
let resp = WriteResp::from_tlv(&root).unwrap();
assert_eq!(resp.write_responses, t);
}
WriteResponse::TransactionError => {
assert_eq!(
num::FromPrimitive::from_u8(resp_opcode),
Some(OpCode::StatusResponse)
);
let status_resp = StatusResp::from_tlv(&root).unwrap();
assert_eq!(status_resp.status, IMStatusCode::Timeout);
}
}
dm
}
#[test]
fn test_timed_write_fail_and_success() {
// - 1 Timed Attr Write Transaction should fail due to timeout
// - 1 Timed Attr Write Transaction should succeed
let val0 = 10;
let _ = env_logger::try_init();
let attr_data0 = |tag, t: &mut TLVWriter| {
let _ = t.u16(tag, val0);
};
let ep_att = GenericPath::new(
None,
Some(echo_cluster::ID),
Some(echo_cluster::Attributes::AttWrite as u32),
);
let input = &[AttrData::new(
None,
AttrPath::new(&ep_att),
EncodeValue::Closure(&attr_data0),
)];
let ep0_att = GenericPath::new(
Some(0),
Some(echo_cluster::ID),
Some(echo_cluster::Attributes::AttWrite as u32),
);
let ep1_att = GenericPath::new(
Some(1),
Some(echo_cluster::ID),
Some(echo_cluster::Attributes::AttWrite as u32),
);
let expected = &[
AttrStatus::new(&ep0_att, IMStatusCode::Success, 0),
AttrStatus::new(&ep1_att, IMStatusCode::Success, 0),
];
// Test with incorrect handling
handle_timed_write_reqs(input, &WriteResponse::TransactionError, 400, 500);
// Test with correct handling
let dm = handle_timed_write_reqs(input, &WriteResponse::TransactionSuccess(expected), 400, 0);
assert_eq!(
AttrValue::Uint16(val0),
dm.read_attribute_raw(
0,
echo_cluster::ID,
echo_cluster::Attributes::AttWrite as u16
)
.unwrap()
);
assert_eq!(
AttrValue::Uint16(val0),
dm.read_attribute_raw(
0,
echo_cluster::ID,
echo_cluster::Attributes::AttWrite as u16
)
.unwrap()
);
}
enum TimedInvResponse<'a> {
TransactionError(IMStatusCode),
TransactionSuccess(&'a [ExpectedInvResp]),
}
// Helper for handling Invoke Command sequences
fn handle_timed_commands(
input: &[CmdData],
expected: &TimedInvResponse,
timeout: u16,
delay: u16,
set_timed_request: bool,
) -> DataModel {
let mut out_buf = [0u8; 400];
let req = InvReq {
suppress_response: Some(false),
timed_request: Some(set_timed_request),
inv_requests: Some(TLVArray::Slice(input)),
};
let (resp_opcode, dm, out_buf) =
handle_timed_reqs(OpCode::InvokeRequest, &req, timeout, delay, &mut out_buf);
tlv::print_tlv_list(out_buf);
let root = tlv::get_root_node_struct(out_buf).unwrap();
match expected {
TimedInvResponse::TransactionSuccess(t) => {
assert_eq!(
num::FromPrimitive::from_u8(resp_opcode),
Some(OpCode::InvokeResponse)
);
let resp = msg::InvResp::from_tlv(&root).unwrap();
assert_inv_response(&resp, t)
}
TimedInvResponse::TransactionError(e) => {
assert_eq!(
num::FromPrimitive::from_u8(resp_opcode),
Some(OpCode::StatusResponse)
);
let status_resp = StatusResp::from_tlv(&root).unwrap();
assert_eq!(status_resp.status, *e);
}
}
dm
}
#[test]
fn test_timed_cmd_success() {
// A timed request that works
let _ = env_logger::try_init();
let input = &[echo_req!(0, 5), echo_req!(1, 10)];
let expected = &[echo_resp!(0, 10), echo_resp!(1, 30)];
handle_timed_commands(
input,
&TimedInvResponse::TransactionSuccess(expected),
400,
0,
true,
);
}
#[test]
fn test_timed_cmd_timeout() {
// A timed request that is executed after t imeout
let _ = env_logger::try_init();
let input = &[echo_req!(0, 5), echo_req!(1, 10)];
handle_timed_commands(
input,
&TimedInvResponse::TransactionError(IMStatusCode::Timeout),
400,
500,
true,
);
}
#[test]
fn test_timed_cmd_timedout_mismatch() {
// A timed request with timeout mismatch
let _ = env_logger::try_init();
let input = &[echo_req!(0, 5), echo_req!(1, 10)];
handle_timed_commands(
input,
&TimedInvResponse::TransactionError(IMStatusCode::TimedRequestMisMatch),
400,
0,
false,
);
let input = &[echo_req!(0, 5), echo_req!(1, 10)];
handle_timed_commands(
input,
&TimedInvResponse::TransactionError(IMStatusCode::TimedRequestMisMatch),
0,
0,
true,
);
}

View file

@ -1,195 +0,0 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use boxslab::Slab;
use matter::error::Error;
use matter::interaction_model::core::OpCode;
use matter::interaction_model::messages::msg::InvReq;
use matter::interaction_model::messages::msg::WriteReq;
use matter::interaction_model::InteractionConsumer;
use matter::interaction_model::InteractionModel;
use matter::interaction_model::Transaction;
use matter::tlv::TLVWriter;
use matter::transport::exchange::Exchange;
use matter::transport::exchange::ExchangeCtx;
use matter::transport::network::Address;
use matter::transport::packet::Packet;
use matter::transport::packet::PacketPool;
use matter::transport::proto_demux::HandleProto;
use matter::transport::proto_demux::ProtoCtx;
use matter::transport::proto_demux::ResponseRequired;
use matter::transport::session::SessionMgr;
use std::net::Ipv4Addr;
use std::net::SocketAddr;
use std::sync::{Arc, Mutex};
struct Node {
pub endpoint: u16,
pub cluster: u32,
pub command: u32,
pub variable: u8,
}
struct DataModel {
node: Arc<Mutex<Node>>,
}
impl DataModel {
pub fn new(node: Node) -> Self {
DataModel {
node: Arc::new(Mutex::new(node)),
}
}
}
impl Clone for DataModel {
fn clone(&self) -> Self {
Self {
node: self.node.clone(),
}
}
}
impl InteractionConsumer for DataModel {
fn consume_invoke_cmd(
&self,
inv_req_msg: &InvReq,
_trans: &mut Transaction,
_tlvwriter: &mut TLVWriter,
) -> Result<(), Error> {
if let Some(inv_requests) = &inv_req_msg.inv_requests {
for i in inv_requests.iter() {
let data = if let Some(data) = i.data.unwrap_tlv() {
data
} else {
continue;
};
let cmd_path_ib = i.path;
let mut common_data = self.node.lock().unwrap();
common_data.endpoint = cmd_path_ib.path.endpoint.unwrap_or(1);
common_data.cluster = cmd_path_ib.path.cluster.unwrap_or(0);
common_data.command = cmd_path_ib.path.leaf.unwrap_or(0);
data.confirm_struct().unwrap();
common_data.variable = data.find_tag(0).unwrap().u8().unwrap();
}
}
Ok(())
}
fn consume_read_attr(
&self,
_req: &[u8],
_trans: &mut Transaction,
_tlvwriter: &mut TLVWriter,
) -> Result<(), Error> {
Ok(())
}
fn consume_write_attr(
&self,
_req: &WriteReq,
_trans: &mut Transaction,
_tlvwriter: &mut TLVWriter,
) -> Result<(), Error> {
Ok(())
}
fn consume_status_report(
&self,
_req: &matter::interaction_model::messages::msg::StatusResp,
_trans: &mut Transaction,
_tw: &mut TLVWriter,
) -> Result<(OpCode, ResponseRequired), Error> {
Ok((OpCode::StatusResponse, ResponseRequired::No))
}
fn consume_subscribe(
&self,
_req: &[u8],
_trans: &mut Transaction,
_tw: &mut TLVWriter,
) -> Result<(OpCode, matter::transport::proto_demux::ResponseRequired), Error> {
Ok((OpCode::StatusResponse, ResponseRequired::No))
}
}
fn handle_data(action: OpCode, data_in: &[u8], data_out: &mut [u8]) -> (DataModel, usize) {
let data_model = DataModel::new(Node {
endpoint: 0,
cluster: 0,
command: 0,
variable: 0,
});
let mut interaction_model = InteractionModel::new(Box::new(data_model.clone()));
let mut exch: Exchange = Default::default();
let mut sess_mgr: SessionMgr = Default::default();
let sess_idx = sess_mgr
.get_or_add(
0,
Address::Udp(SocketAddr::new(
std::net::IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
5542,
)),
None,
false,
)
.unwrap();
let sess = sess_mgr.get_session_handle(sess_idx);
let exch_ctx = ExchangeCtx {
exch: &mut exch,
sess,
};
let mut rx = Slab::<PacketPool>::try_new(Packet::new_rx().unwrap()).unwrap();
let tx = Slab::<PacketPool>::try_new(Packet::new_tx().unwrap()).unwrap();
// Create fake rx packet
rx.set_proto_id(0x01);
rx.set_proto_opcode(action as u8);
rx.peer = Address::default();
let in_data_len = data_in.len();
let rx_buf = rx.as_borrow_slice();
rx_buf[..in_data_len].copy_from_slice(data_in);
let mut ctx = ProtoCtx::new(exch_ctx, rx, tx);
interaction_model.handle_proto_id(&mut ctx).unwrap();
let out_len = ctx.tx.as_borrow_slice().len();
data_out[..out_len].copy_from_slice(ctx.tx.as_borrow_slice());
(data_model, out_len)
}
#[test]
fn test_valid_invoke_cmd() -> Result<(), Error> {
// An invoke command for endpoint 0, cluster 49, command 12 and a u8 variable value of 0x05
let b = [
0x15, 0x28, 0x00, 0x28, 0x01, 0x36, 0x02, 0x15, 0x37, 0x00, 0x25, 0x00, 0x00, 0x00, 0x26,
0x01, 0x31, 0x00, 0x00, 0x00, 0x26, 0x02, 0x0c, 0x00, 0x00, 0x00, 0x18, 0x35, 0x01, 0x24,
0x00, 0x05, 0x18, 0x18, 0x18, 0x18,
];
let mut out_buf: [u8; 20] = [0; 20];
let (data_model, _) = handle_data(OpCode::InvokeRequest, &b, &mut out_buf);
let data = data_model.node.lock().unwrap();
assert_eq!(data.endpoint, 0);
assert_eq!(data.cluster, 49);
assert_eq!(data.command, 12);
assert_eq!(data.variable, 5);
Ok(())
}

View file

@ -1,13 +0,0 @@
[package]
name = "matter_macro_derive"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
proc-macro = true
[dependencies]
syn = { version = "1", features = ["extra-traits"]}
quote = "1"
proc-macro2 = "1"

View file

@ -0,0 +1,21 @@
[package]
name = "rs-matter-macros"
version = "0.1.0"
edition = "2021"
authors = ["Kedar Sovani <kedars@gmail.com>", "Ivan Markov", "Project CHIP Authors"]
description = "Native Rust implementation of the Matter (Smart-Home) ecosystem - Proc-macros"
repository = "https://github.com/project-chip/matter-rs"
readme = "README.md"
keywords = ["matter", "smart", "smart-home", "IoT", "ESP32"]
categories = ["embedded", "network-programming"]
license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
proc-macro = true
[dependencies]
syn = { version = "1", features = ["extra-traits"]}
quote = "1"
proc-macro2 = "1"
proc-macro-crate = "1.3"

View file

@ -0,0 +1,5 @@
# rs-matter-macros: The Rust Implementation of Matter Library - Proc-macros
Proc-macros for implementing the `ToTLV` and `FromTLV` traits.
NOTE: The macros are re-exported by the `rs-matter` crate which should be used instead of adding a direct dependency on the `rs-matter-macros` crate.

View file

@ -16,7 +16,7 @@
*/
use proc_macro::TokenStream;
use proc_macro2::Span;
use proc_macro2::{Ident, Span};
use quote::{format_ident, quote};
use syn::Lit::{Int, Str};
use syn::NestedMeta::{Lit, Meta};
@ -106,6 +106,18 @@ fn parse_tag_val(field: &syn::Field) -> Option<u8> {
None
}
fn get_crate_name() -> String {
let found_crate = proc_macro_crate::crate_name("rs-matter").unwrap_or_else(|err| {
eprintln!("Warning: defaulting to `crate` {err}");
proc_macro_crate::FoundCrate::Itself
});
match found_crate {
proc_macro_crate::FoundCrate::Itself => String::from("crate"),
proc_macro_crate::FoundCrate::Name(name) => name,
}
}
/// Generate a ToTlv implementation for a structure
fn gen_totlv_for_struct(
fields: &syn::FieldsNamed,
@ -138,11 +150,20 @@ fn gen_totlv_for_struct(
let expanded = quote! {
impl #generics ToTLV for #struct_name #generics {
fn to_tlv(&self, tw: &mut TLVWriter, tag_type: TagType) -> Result<(), Error> {
tw. #datatype (tag_type)?;
#(
self.#idents.to_tlv(tw, TagType::Context(#tags))?;
)*
tw.end_container()
let anchor = tw.get_tail();
if let Err(err) = (|| {
tw. #datatype (tag_type)?;
#(
self.#idents.to_tlv(tw, TagType::Context(#tags))?;
)*
tw.end_container()
})() {
tw.rewind_to(anchor);
Err(err)
} else {
Ok(())
}
}
}
};
@ -178,18 +199,29 @@ fn gen_totlv_for_enum(
tag_start += 1;
}
let krate = Ident::new(&get_crate_name(), Span::call_site());
let expanded = quote! {
impl #generics ToTLV for #enum_name #generics {
fn to_tlv(&self, tw: &mut TLVWriter, tag_type: TagType) -> Result<(), Error> {
tw.start_struct(tag_type)?;
match self {
#(
Self::#variant_names(c) => { c.to_tlv(tw, TagType::Context(#tags))?; },
)*
}
tw.end_container()
}
}
impl #generics #krate::tlv::ToTLV for #enum_name #generics {
fn to_tlv(&self, tw: &mut #krate::tlv::TLVWriter, tag_type: #krate::tlv::TagType) -> Result<(), #krate::error::Error> {
let anchor = tw.get_tail();
if let Err(err) = (|| {
tw.start_struct(tag_type)?;
match self {
#(
Self::#variant_names(c) => { c.to_tlv(tw, #krate::tlv::TagType::Context(#tags))?; },
)*
}
tw.end_container()
})() {
tw.rewind_to(anchor);
Err(err)
} else {
Ok(())
}
}
}
};
// panic!("Expanded to {}", expanded);
@ -279,17 +311,19 @@ fn gen_fromtlv_for_struct(
}
}
let krate = Ident::new(&get_crate_name(), Span::call_site());
// Currently we don't use find_tag() because the tags come in sequential
// order. If ever the tags start coming out of order, we can use find_tag()
// instead
let expanded = if !tlvargs.unordered {
quote! {
impl #generics FromTLV <#lifetime> for #struct_name #generics {
fn from_tlv(t: &TLVElement<#lifetime>) -> Result<Self, Error> {
let mut t_iter = t.#datatype ()?.enter().ok_or(Error::Invalid)?;
impl #generics #krate::tlv::FromTLV <#lifetime> for #struct_name #generics {
fn from_tlv(t: &#krate::tlv::TLVElement<#lifetime>) -> Result<Self, #krate::error::Error> {
let mut t_iter = t.#datatype ()?.enter().ok_or_else(|| #krate::error::Error::new(#krate::error::ErrorCode::Invalid))?;
let mut item = t_iter.next();
#(
let #idents = if Some(true) == item.map(|x| x.check_ctx_tag(#tags)) {
let #idents = if Some(true) == item.as_ref().map(|x| x.check_ctx_tag(#tags)) {
let backup = item;
item = t_iter.next();
#types::from_tlv(&backup.unwrap())
@ -306,8 +340,8 @@ fn gen_fromtlv_for_struct(
}
} else {
quote! {
impl #generics FromTLV <#lifetime> for #struct_name #generics {
fn from_tlv(t: &TLVElement<#lifetime>) -> Result<Self, Error> {
impl #generics #krate::tlv::FromTLV <#lifetime> for #struct_name #generics {
fn from_tlv(t: &#krate::tlv::TLVElement<#lifetime>) -> Result<Self, #krate::error::Error> {
#(
let #idents = if let Ok(s) = t.find_tag(#tags as u32) {
#types::from_tlv(&s)
@ -357,20 +391,22 @@ fn gen_fromtlv_for_enum(
tag_start += 1;
}
let krate = Ident::new(&get_crate_name(), Span::call_site());
let expanded = quote! {
impl #generics FromTLV <#lifetime> for #enum_name #generics {
fn from_tlv(t: &TLVElement<#lifetime>) -> Result<Self, Error> {
let mut t_iter = t.confirm_struct()?.enter().ok_or(Error::Invalid)?;
let mut item = t_iter.next().ok_or(Error::Invalid)?;
impl #generics #krate::tlv::FromTLV <#lifetime> for #enum_name #generics {
fn from_tlv(t: &#krate::tlv::TLVElement<#lifetime>) -> Result<Self, #krate::error::Error> {
let mut t_iter = t.confirm_struct()?.enter().ok_or_else(|| #krate::error::Error::new(#krate::error::ErrorCode::Invalid))?;
let mut item = t_iter.next().ok_or_else(|| Error::new(#krate::error::ErrorCode::Invalid))?;
if let TagType::Context(tag) = item.get_tag() {
match tag {
#(
#tags => Ok(Self::#variant_names(#types::from_tlv(&item)?)),
)*
_ => Err(Error::Invalid),
_ => Err(#krate::error::Error::new(#krate::error::ErrorCode::Invalid)),
}
} else {
Err(Error::TLVTypeMismatch)
Err(#krate::error::Error::new(#krate::error::ErrorCode::TLVTypeMismatch))
}
}
}

106
rs-matter/Cargo.toml Normal file
View file

@ -0,0 +1,106 @@
[package]
name = "rs-matter"
version = "0.1.1"
edition = "2021"
authors = ["Kedar Sovani <kedars@gmail.com>", "Ivan Markov", "Project CHIP Authors"]
description = "Native Rust implementation of the Matter (Smart-Home) ecosystem"
repository = "https://github.com/project-chip/matter-rs"
readme = "README.md"
keywords = ["matter", "smart", "smart-home", "IoT", "ESP32"]
categories = ["embedded", "network-programming"]
license = "Apache-2.0"
[features]
default = ["os", "mbedtls"]
os = ["std", "backtrace", "env_logger", "nix", "critical-section/std", "embassy-sync/std", "embassy-time/std"]
esp-idf = ["std", "rustcrypto", "esp-idf-sys"]
std = ["alloc", "rand", "async-io", "esp-idf-sys?/std", "embassy-time/generic-queue-16"]
backtrace = []
alloc = []
nightly = []
openssl = ["alloc", "dep:openssl", "foreign-types", "hmac", "sha2"]
mbedtls = ["alloc", "dep:mbedtls"]
rustcrypto = ["alloc", "sha2", "hmac", "pbkdf2", "hkdf", "aes", "ccm", "p256", "elliptic-curve", "crypto-bigint", "x509-cert", "rand_core"]
embassy-net = ["dep:embassy-net", "dep:embassy-net-driver", "smoltcp"]
zeroconf = ["dep:zeroconf"]
[dependencies]
rs-matter-macros = { version = "0.1", path = "../rs-matter-macros" }
bitflags = { version = "1.3", default-features = false }
byteorder = { version = "1.4.3", default-features = false }
heapless = "0.7.16"
num = { version = "0.4", default-features = false }
num-derive = "0.3.3"
num-traits = { version = "0.2.15", default-features = false }
strum = { version = "0.24", features = ["derive"], default-features = false }
log = { version = "0.4.17", features = ["max_level_debug", "release_max_level_debug"] }
no-std-net = "0.6"
subtle = { version = "2.4.1", default-features = false }
safemem = { version = "0.3.3", default-features = false }
owo-colors = "3"
time = { version = "0.3", default-features = false }
verhoeff = { version = "1", default-features = false }
embassy-futures = "0.1"
embassy-time = "0.1.1"
embassy-sync = "0.2"
critical-section = "1.1.1"
domain = { version = "0.7.2", default_features = false, features = ["heapless"] }
portable-atomic = "1"
qrcodegen-no-heap = "1.8"
# embassy-net dependencies
embassy-net = { version = "0.1", features = ["igmp", "proto-ipv6", "udp"], optional = true }
embassy-net-driver = { version = "0.1", optional = true }
smoltcp = { version = "0.10", default-features = false, optional = true }
# STD-only dependencies
rand = { version = "0.8.5", optional = true }
async-io = { version = "=1.12", optional = true } # =1.12 for compatibility with ESP IDF
# crypto
openssl = { version = "0.10.55", optional = true }
foreign-types = { version = "0.3.2", optional = true }
# rust-crypto
sha2 = { version = "0.10", default-features = false, optional = true }
hmac = { version = "0.12", optional = true }
pbkdf2 = { version = "0.12", optional = true }
hkdf = { version = "0.12", optional = true }
aes = { version = "0.8", optional = true }
ccm = { version = "0.5", default-features = false, features = ["alloc"], optional = true }
p256 = { version = "0.13.0", default-features = false, features = ["arithmetic", "ecdh", "ecdsa"], optional = true }
elliptic-curve = { version = "0.13.2", optional = true }
crypto-bigint = { version = "0.4", default-features = false, optional = true }
rand_core = { version = "0.6", default-features = false, optional = true }
x509-cert = { version = "0.2.0", default-features = false, features = ["pem"], optional = true } # TODO: requires `alloc`
[target.'cfg(target_os = "macos")'.dependencies]
astro-dnssd = { version = "0.3" }
[target.'cfg(target_os = "linux")'.dependencies]
zeroconf = { version = "0.12", optional = true }
[target.'cfg(not(target_os = "espidf"))'.dependencies]
mbedtls = { version = "0.9", optional = true }
env_logger = { version = "0.10.0", optional = true }
nix = { version = "0.26", features = ["net"], optional = true }
[target.'cfg(target_os = "espidf")'.dependencies]
esp-idf-sys = { version = "0.33", optional = true, default-features = false, features = ["native"] }
[build-dependencies]
embuild = "0.31.2"
[target.'cfg(target_os = "espidf")'.dev-dependencies]
esp-idf-sys = { version = "0.33", default-features = false, features = ["binstart"] }
esp-idf-hal = { version = "0.41", features = ["embassy-sync", "critical-section"] }
esp-idf-svc = { version = "0.46", features = ["embassy-time-driver"] }
embedded-svc = { version = "0.25" }
[[example]]
name = "onoff_light"
path = "../examples/onoff_light/src/main.rs"
# [[example]]
# name = "speaker"
# path = "../examples/speaker/src/main.rs"

3
rs-matter/README.md Normal file
View file

@ -0,0 +1,3 @@
# rs-matter: The Rust Implementation of Matter Library
This is the actual `rs-matter` library crate. See [the main README file](../README.md) for more information.

11
rs-matter/build.rs Normal file
View file

@ -0,0 +1,11 @@
use std::env::var;
// Necessary because of this issue: https://github.com/rust-lang/cargo/issues/9641
fn main() -> Result<(), Box<dyn std::error::Error>> {
if var("TARGET").unwrap().ends_with("-espidf") {
embuild::build::CfgArgs::output_propagated("ESP_IDF")?;
embuild::build::LinkArgs::output_propagated("ESP_IDF")?;
}
Ok(())
}

View file

@ -15,19 +15,15 @@
* limitations under the License.
*/
use std::{
fmt::Display,
sync::{Arc, Mutex, MutexGuard, RwLock},
};
use core::{cell::RefCell, fmt::Display};
use crate::{
data_model::objects::{Access, ClusterId, EndptId, Privilege},
error::Error,
error::{Error, ErrorCode},
fabric,
interaction_model::messages::GenericPath,
sys::Psm,
tlv::{FromTLV, TLVElement, TLVList, TLVWriter, TagType, ToTLV},
transport::session::MAX_CAT_IDS_PER_NOC,
tlv::{self, FromTLV, Nullable, TLVElement, TLVList, TLVWriter, TagType, ToTLV},
transport::session::{Session, SessionMode, MAX_CAT_IDS_PER_NOC},
utils::writebuf::WriteBuf,
};
use log::error;
@ -54,7 +50,7 @@ impl FromTLV<'_> for AuthMode {
{
num::FromPrimitive::from_u32(t.u32()?)
.filter(|a| *a != AuthMode::Invalid)
.ok_or(Error::Invalid)
.ok_or_else(|| ErrorCode::Invalid.into())
}
}
@ -116,7 +112,7 @@ impl AccessorSubjects {
return Ok(());
}
}
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
}
/// Match the match_subject with any of the current subjects
@ -146,7 +142,7 @@ impl AccessorSubjects {
}
impl Display for AccessorSubjects {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::result::Result<(), std::fmt::Error> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
write!(f, "[")?;
for i in self.0 {
if is_noc_cat(i) {
@ -160,7 +156,7 @@ impl Display for AccessorSubjects {
}
/// The Accessor Object
pub struct Accessor {
pub struct Accessor<'a> {
/// The fabric index of the accessor
pub fab_idx: u8,
/// Accessor's subject: could be node-id, NoC CAT, group id
@ -168,15 +164,37 @@ pub struct Accessor {
/// The Authmode of this session
auth_mode: AuthMode,
// TODO: Is this the right place for this though, or should we just use a global-acl-handle-get
acl_mgr: Arc<AclMgr>,
acl_mgr: &'a RefCell<AclMgr>,
}
impl Accessor {
pub fn new(
impl<'a> Accessor<'a> {
pub fn for_session(session: &Session, acl_mgr: &'a RefCell<AclMgr>) -> Self {
match session.get_session_mode() {
SessionMode::Case(c) => {
let mut subject =
AccessorSubjects::new(session.get_peer_node_id().unwrap_or_default());
for i in c.cat_ids {
if i != 0 {
let _ = subject.add_catid(i);
}
}
Accessor::new(c.fab_idx, subject, AuthMode::Case, acl_mgr)
}
SessionMode::Pase => {
Accessor::new(0, AccessorSubjects::new(1), AuthMode::Pase, acl_mgr)
}
SessionMode::PlainText => {
Accessor::new(0, AccessorSubjects::new(1), AuthMode::Invalid, acl_mgr)
}
}
}
pub const fn new(
fab_idx: u8,
subjects: AccessorSubjects,
auth_mode: AuthMode,
acl_mgr: Arc<AclMgr>,
acl_mgr: &'a RefCell<AclMgr>,
) -> Self {
Self {
fab_idx,
@ -188,9 +206,9 @@ impl Accessor {
}
#[derive(Debug)]
pub struct AccessDesc<'a> {
pub struct AccessDesc {
/// The object to be acted upon
path: &'a GenericPath,
path: GenericPath,
/// The target permissions
target_perms: Option<Access>,
// The operation being done
@ -200,8 +218,8 @@ pub struct AccessDesc<'a> {
/// Access Request Object
pub struct AccessReq<'a> {
accessor: &'a Accessor,
object: AccessDesc<'a>,
accessor: &'a Accessor<'a>,
object: AccessDesc,
}
impl<'a> AccessReq<'a> {
@ -209,7 +227,7 @@ impl<'a> AccessReq<'a> {
///
/// An access request specifies the _accessor_ attempting to access _path_
/// with _operation_
pub fn new(accessor: &'a Accessor, path: &'a GenericPath, operation: Access) -> Self {
pub fn new(accessor: &'a Accessor, path: GenericPath, operation: Access) -> Self {
AccessReq {
accessor,
object: AccessDesc {
@ -220,6 +238,10 @@ impl<'a> AccessReq<'a> {
}
}
pub fn operation(&self) -> Access {
self.object.operation
}
/// Add target's permissions to the request
///
/// The permissions that are associated with the target (identified by the
@ -234,11 +256,11 @@ impl<'a> AccessReq<'a> {
/// _accessor_ the necessary privileges to access the target as per its
/// permissions
pub fn allow(&self) -> bool {
self.accessor.acl_mgr.allow(self)
self.accessor.acl_mgr.borrow().allow(self)
}
}
#[derive(FromTLV, ToTLV, Copy, Clone, Debug, PartialEq)]
#[derive(FromTLV, ToTLV, Clone, Debug, PartialEq)]
pub struct Target {
cluster: Option<ClusterId>,
endpoint: Option<EndptId>,
@ -260,8 +282,16 @@ impl Target {
}
type Subjects = [Option<u64>; SUBJECTS_PER_ENTRY];
type Targets = [Option<Target>; TARGETS_PER_ENTRY];
#[derive(ToTLV, FromTLV, Copy, Clone, Debug, PartialEq)]
type Targets = Nullable<[Option<Target>; TARGETS_PER_ENTRY]>;
impl Targets {
fn init_notnull() -> Self {
const INIT_TARGETS: Option<Target> = None;
Nullable::NotNull([INIT_TARGETS; TARGETS_PER_ENTRY])
}
}
#[derive(ToTLV, FromTLV, Clone, Debug, PartialEq)]
#[tlvargs(start = 1)]
pub struct AclEntry {
privilege: Privilege,
@ -276,14 +306,12 @@ pub struct AclEntry {
impl AclEntry {
pub fn new(fab_idx: u8, privilege: Privilege, auth_mode: AuthMode) -> Self {
const INIT_SUBJECTS: Option<u64> = None;
const INIT_TARGETS: Option<Target> = None;
let privilege = privilege;
Self {
fab_idx: Some(fab_idx),
privilege,
auth_mode,
subjects: [INIT_SUBJECTS; SUBJECTS_PER_ENTRY],
targets: [INIT_TARGETS; TARGETS_PER_ENTRY],
targets: Targets::init_notnull(),
}
}
@ -292,7 +320,7 @@ impl AclEntry {
.subjects
.iter()
.position(|s| s.is_none())
.ok_or(Error::NoSpace)?;
.ok_or(ErrorCode::NoSpace)?;
self.subjects[index] = Some(subject);
Ok(())
}
@ -302,12 +330,20 @@ impl AclEntry {
}
pub fn add_target(&mut self, target: Target) -> Result<(), Error> {
if self.targets.is_null() {
self.targets = Targets::init_notnull();
}
let index = self
.targets
.as_ref()
.notnull()
.unwrap()
.iter()
.position(|s| s.is_none())
.ok_or(Error::NoSpace)?;
self.targets[index] = Some(target);
.ok_or(ErrorCode::NoSpace)?;
self.targets.as_mut().notnull().unwrap()[index] = Some(target);
Ok(())
}
@ -336,12 +372,17 @@ impl AclEntry {
fn match_access_desc(&self, object: &AccessDesc) -> bool {
let mut allow = false;
let mut entries_exist = false;
for t in self.targets.iter().flatten() {
entries_exist = true;
if (t.endpoint.is_none() || t.endpoint == object.path.endpoint)
&& (t.cluster.is_none() || t.cluster == object.path.cluster)
{
allow = true
match self.targets.as_ref().notnull() {
None => allow = true, // Allow if targets are NULL
Some(targets) => {
for t in targets.iter().flatten() {
entries_exist = true;
if (t.endpoint.is_none() || t.endpoint == object.path.endpoint)
&& (t.cluster.is_none() || t.cluster == object.path.cluster)
{
allow = true
}
}
}
}
if !entries_exist {
@ -367,35 +408,151 @@ impl AclEntry {
}
const MAX_ACL_ENTRIES: usize = ENTRIES_PER_FABRIC * fabric::MAX_SUPPORTED_FABRICS;
type AclEntries = [Option<AclEntry>; MAX_ACL_ENTRIES];
#[derive(ToTLV, FromTLV, Debug)]
struct AclMgrInner {
type AclEntries = heapless::Vec<Option<AclEntry>, MAX_ACL_ENTRIES>;
pub struct AclMgr {
entries: AclEntries,
changed: bool,
}
const ACL_KV_ENTRY: &str = "acl";
const ACL_KV_MAX_SIZE: usize = 300;
impl AclMgrInner {
pub fn store(&self, psm: &MutexGuard<Psm>) -> Result<(), Error> {
let mut acl_tlvs = [0u8; ACL_KV_MAX_SIZE];
let mut wb = WriteBuf::new(&mut acl_tlvs, ACL_KV_MAX_SIZE);
let mut tw = TLVWriter::new(&mut wb);
self.entries.to_tlv(&mut tw, TagType::Anonymous)?;
psm.set_kv_slice(ACL_KV_ENTRY, wb.as_slice())
impl AclMgr {
#[inline(always)]
pub const fn new() -> Self {
Self {
entries: AclEntries::new(),
changed: false,
}
}
pub fn load(psm: &MutexGuard<Psm>) -> Result<Self, Error> {
let mut acl_tlvs = Vec::new();
psm.get_kv_slice(ACL_KV_ENTRY, &mut acl_tlvs)?;
let root = TLVList::new(&acl_tlvs)
.iter()
.next()
.ok_or(Error::Invalid)?;
pub fn erase_all(&mut self) -> Result<(), Error> {
self.entries.clear();
self.changed = true;
Ok(Self {
entries: AclEntries::from_tlv(&root)?,
})
Ok(())
}
pub fn add(&mut self, entry: AclEntry) -> Result<(), Error> {
let cnt = self
.entries
.iter()
.flatten()
.filter(|a| a.fab_idx == entry.fab_idx)
.count();
if cnt >= ENTRIES_PER_FABRIC {
Err(ErrorCode::NoSpace)?;
}
let slot = self.entries.iter().position(|a| a.is_none());
if slot.is_some() || self.entries.len() < MAX_ACL_ENTRIES {
if let Some(index) = slot {
self.entries[index] = Some(entry);
} else {
self.entries
.push(Some(entry))
.map_err(|_| ErrorCode::NoSpace)
.unwrap();
}
self.changed = true;
}
Ok(())
}
// Since the entries are fabric-scoped, the index is only for entries with the matching fabric index
pub fn edit(&mut self, index: u8, fab_idx: u8, new: AclEntry) -> Result<(), Error> {
let old = self.for_index_in_fabric(index, fab_idx)?;
*old = Some(new);
self.changed = true;
Ok(())
}
pub fn delete(&mut self, index: u8, fab_idx: u8) -> Result<(), Error> {
let old = self.for_index_in_fabric(index, fab_idx)?;
*old = None;
self.changed = true;
Ok(())
}
pub fn delete_for_fabric(&mut self, fab_idx: u8) -> Result<(), Error> {
for entry in &mut self.entries {
if entry
.as_ref()
.map(|e| e.fab_idx == Some(fab_idx))
.unwrap_or(false)
{
*entry = None;
self.changed = true;
}
}
Ok(())
}
pub fn for_each_acl<T>(&self, mut f: T) -> Result<(), Error>
where
T: FnMut(&AclEntry) -> Result<(), Error>,
{
for entry in self.entries.iter().flatten() {
f(entry)?;
}
Ok(())
}
pub fn allow(&self, req: &AccessReq) -> bool {
// PASE Sessions have implicit access grant
if req.accessor.auth_mode == AuthMode::Pase {
return true;
}
for e in self.entries.iter().flatten() {
if e.allow(req) {
return true;
}
}
error!(
"ACL Disallow for subjects {} fab idx {}",
req.accessor.subjects, req.accessor.fab_idx
);
error!("{}", self);
false
}
pub fn load(&mut self, data: &[u8]) -> Result<(), Error> {
let root = TLVList::new(data).iter().next().ok_or(ErrorCode::Invalid)?;
tlv::from_tlv(&mut self.entries, &root)?;
self.changed = false;
Ok(())
}
pub fn store<'a>(&mut self, buf: &'a mut [u8]) -> Result<Option<&'a [u8]>, Error> {
if self.changed {
let mut wb = WriteBuf::new(buf);
let mut tw = TLVWriter::new(&mut wb);
self.entries
.as_slice()
.to_tlv(&mut tw, TagType::Anonymous)?;
self.changed = false;
let len = tw.get_tail();
Ok(Some(&buf[..len]))
} else {
Ok(None)
}
}
pub fn is_changed(&self) -> bool {
self.changed
}
/// Traverse fabric specific entries to find the index
@ -411,180 +568,25 @@ impl AclMgrInner {
for (curr_index, entry) in self
.entries
.iter_mut()
.filter(|e| e.filter(|e1| e1.fab_idx == Some(fab_idx)).is_some())
.filter(|e| {
e.as_ref()
.filter(|e1| e1.fab_idx == Some(fab_idx))
.is_some()
})
.enumerate()
{
if curr_index == index as usize {
return Ok(entry);
}
}
Err(Error::NotFound)
Err(ErrorCode::NotFound.into())
}
}
pub struct AclMgr {
inner: RwLock<AclMgrInner>,
// The Option<> is solely because test execution is faster
// Doing this here adds the least overhead during ACL verification
psm: Option<Arc<Mutex<Psm>>>,
}
impl AclMgr {
pub fn new() -> Result<Self, Error> {
AclMgr::new_with(true)
}
pub fn new_with(psm_support: bool) -> Result<Self, Error> {
const INIT: Option<AclEntry> = None;
let mut psm = None;
let inner = if !psm_support {
AclMgrInner {
entries: [INIT; MAX_ACL_ENTRIES],
}
} else {
let psm_handle = Psm::get()?;
let inner = {
let psm_lock = psm_handle.lock().unwrap();
AclMgrInner::load(&psm_lock)
};
psm = Some(psm_handle);
inner.unwrap_or({
// Error loading from PSM
AclMgrInner {
entries: [INIT; MAX_ACL_ENTRIES],
}
})
};
Ok(Self {
inner: RwLock::new(inner),
psm,
})
}
pub fn erase_all(&self) {
let mut inner = self.inner.write().unwrap();
for i in 0..MAX_ACL_ENTRIES {
inner.entries[i] = None;
}
if let Some(psm) = self.psm.as_ref() {
let psm = psm.lock().unwrap();
let _ = inner.store(&psm).map_err(|e| {
error!("Error in storing ACLs {}", e);
});
}
}
pub fn add(&self, entry: AclEntry) -> Result<(), Error> {
let mut inner = self.inner.write().unwrap();
let cnt = inner
.entries
.iter()
.flatten()
.filter(|a| a.fab_idx == entry.fab_idx)
.count();
if cnt >= ENTRIES_PER_FABRIC {
return Err(Error::NoSpace);
}
let index = inner
.entries
.iter()
.position(|a| a.is_none())
.ok_or(Error::NoSpace)?;
inner.entries[index] = Some(entry);
if let Some(psm) = self.psm.as_ref() {
let psm = psm.lock().unwrap();
inner.store(&psm)
} else {
Ok(())
}
}
// Since the entries are fabric-scoped, the index is only for entries with the matching fabric index
pub fn edit(&self, index: u8, fab_idx: u8, new: AclEntry) -> Result<(), Error> {
let mut inner = self.inner.write().unwrap();
let old = inner.for_index_in_fabric(index, fab_idx)?;
*old = Some(new);
if let Some(psm) = self.psm.as_ref() {
let psm = psm.lock().unwrap();
inner.store(&psm)
} else {
Ok(())
}
}
pub fn delete(&self, index: u8, fab_idx: u8) -> Result<(), Error> {
let mut inner = self.inner.write().unwrap();
let old = inner.for_index_in_fabric(index, fab_idx)?;
*old = None;
if let Some(psm) = self.psm.as_ref() {
let psm = psm.lock().unwrap();
inner.store(&psm)
} else {
Ok(())
}
}
pub fn delete_for_fabric(&self, fab_idx: u8) -> Result<(), Error> {
let mut inner = self.inner.write().unwrap();
for i in 0..MAX_ACL_ENTRIES {
if inner.entries[i]
.filter(|e| e.fab_idx == Some(fab_idx))
.is_some()
{
inner.entries[i] = None;
}
}
if let Some(psm) = self.psm.as_ref() {
let psm = psm.lock().unwrap();
inner.store(&psm)
} else {
Ok(())
}
}
pub fn for_each_acl<T>(&self, mut f: T) -> Result<(), Error>
where
T: FnMut(&AclEntry),
{
let inner = self.inner.read().unwrap();
for entry in inner.entries.iter().flatten() {
f(entry)
}
Ok(())
}
pub fn allow(&self, req: &AccessReq) -> bool {
// PASE Sessions have implicit access grant
if req.accessor.auth_mode == AuthMode::Pase {
return true;
}
let inner = self.inner.read().unwrap();
for e in inner.entries.iter().flatten() {
if e.allow(req) {
return true;
}
}
error!(
"ACL Disallow for subjects {} fab idx {}",
req.accessor.subjects, req.accessor.fab_idx
);
error!("{}", self);
false
}
}
impl std::fmt::Display for AclMgr {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let inner = self.inner.read().unwrap();
impl core::fmt::Display for AclMgr {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "ACLS: [")?;
for i in inner.entries.iter().flatten() {
for i in self.entries.iter().flatten() {
write!(f, " {{ {:?} }}, ", i)?;
}
write!(f, "]")
@ -594,22 +596,23 @@ impl std::fmt::Display for AclMgr {
#[cfg(test)]
#[allow(clippy::bool_assert_comparison)]
mod tests {
use core::cell::RefCell;
use crate::{
acl::{gen_noc_cat, AccessorSubjects},
data_model::objects::{Access, Privilege},
interaction_model::messages::GenericPath,
};
use std::sync::Arc;
use super::{AccessReq, Accessor, AclEntry, AclMgr, AuthMode, Target};
#[test]
fn test_basic_empty_subject_target() {
let am = Arc::new(AclMgr::new_with(false).unwrap());
am.erase_all();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, am.clone());
let am = RefCell::new(AclMgr::new());
am.borrow_mut().erase_all().unwrap();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, &am);
let path = GenericPath::new(Some(1), Some(1234), None);
let mut req = AccessReq::new(&accessor, &path, Access::READ);
let mut req = AccessReq::new(&accessor, path, Access::READ);
req.set_target_perms(Access::RWVA);
// Default deny
@ -617,46 +620,46 @@ mod tests {
// Deny for session mode mismatch
let new = AclEntry::new(1, Privilege::VIEW, AuthMode::Pase);
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), false);
// Deny for fab idx mismatch
let new = AclEntry::new(1, Privilege::VIEW, AuthMode::Case);
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), false);
// Allow
let new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), true);
}
#[test]
fn test_subject() {
let am = Arc::new(AclMgr::new_with(false).unwrap());
am.erase_all();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, am.clone());
let am = RefCell::new(AclMgr::new());
am.borrow_mut().erase_all().unwrap();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, &am);
let path = GenericPath::new(Some(1), Some(1234), None);
let mut req = AccessReq::new(&accessor, &path, Access::READ);
let mut req = AccessReq::new(&accessor, path, Access::READ);
req.set_target_perms(Access::RWVA);
// Deny for subject mismatch
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject(112232).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), false);
// Allow for subject match - target is wildcard
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject(112233).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), true);
}
#[test]
fn test_cat() {
let am = Arc::new(AclMgr::new_with(false).unwrap());
am.erase_all();
let am = RefCell::new(AclMgr::new());
am.borrow_mut().erase_all().unwrap();
let allow_cat = 0xABCD;
let disallow_cat = 0xCAFE;
@ -666,35 +669,35 @@ mod tests {
let mut subjects = AccessorSubjects::new(112233);
subjects.add_catid(gen_noc_cat(allow_cat, v2)).unwrap();
let accessor = Accessor::new(2, subjects, AuthMode::Case, am.clone());
let accessor = Accessor::new(2, subjects, AuthMode::Case, &am);
let path = GenericPath::new(Some(1), Some(1234), None);
let mut req = AccessReq::new(&accessor, &path, Access::READ);
let mut req = AccessReq::new(&accessor, path, Access::READ);
req.set_target_perms(Access::RWVA);
// Deny for CAT id mismatch
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject_catid(gen_noc_cat(disallow_cat, v2))
.unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), false);
// Deny of CAT version mismatch
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject_catid(gen_noc_cat(allow_cat, v3)).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), false);
// Allow for CAT match
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject_catid(gen_noc_cat(allow_cat, v2)).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), true);
}
#[test]
fn test_cat_version() {
let am = Arc::new(AclMgr::new_with(false).unwrap());
am.erase_all();
let am = RefCell::new(AclMgr::new());
am.borrow_mut().erase_all().unwrap();
let allow_cat = 0xABCD;
let disallow_cat = 0xCAFE;
@ -704,32 +707,32 @@ mod tests {
let mut subjects = AccessorSubjects::new(112233);
subjects.add_catid(gen_noc_cat(allow_cat, v3)).unwrap();
let accessor = Accessor::new(2, subjects, AuthMode::Case, am.clone());
let accessor = Accessor::new(2, subjects, AuthMode::Case, &am);
let path = GenericPath::new(Some(1), Some(1234), None);
let mut req = AccessReq::new(&accessor, &path, Access::READ);
let mut req = AccessReq::new(&accessor, path, Access::READ);
req.set_target_perms(Access::RWVA);
// Deny for CAT id mismatch
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject_catid(gen_noc_cat(disallow_cat, v2))
.unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), false);
// Allow for CAT match and version more than ACL version
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject_catid(gen_noc_cat(allow_cat, v2)).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), true);
}
#[test]
fn test_target() {
let am = Arc::new(AclMgr::new_with(false).unwrap());
am.erase_all();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, am.clone());
let am = RefCell::new(AclMgr::new());
am.borrow_mut().erase_all().unwrap();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, &am);
let path = GenericPath::new(Some(1), Some(1234), None);
let mut req = AccessReq::new(&accessor, &path, Access::READ);
let mut req = AccessReq::new(&accessor, path, Access::READ);
req.set_target_perms(Access::RWVA);
// Deny for target mismatch
@ -740,7 +743,7 @@ mod tests {
device_type: None,
})
.unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), false);
// Allow for cluster match - subject wildcard
@ -751,11 +754,11 @@ mod tests {
device_type: None,
})
.unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), true);
// Clean Slate
am.erase_all();
am.borrow_mut().erase_all().unwrap();
// Allow for endpoint match - subject wildcard
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
@ -765,11 +768,11 @@ mod tests {
device_type: None,
})
.unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), true);
// Clean Slate
am.erase_all();
am.borrow_mut().erase_all().unwrap();
// Allow for exact match
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
@ -780,16 +783,15 @@ mod tests {
})
.unwrap();
new.add_subject(112233).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
assert_eq!(req.allow(), true);
}
#[test]
fn test_privilege() {
let am = Arc::new(AclMgr::new_with(false).unwrap());
am.erase_all();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, am.clone());
let am = RefCell::new(AclMgr::new());
am.borrow_mut().erase_all().unwrap();
let accessor = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, &am);
let path = GenericPath::new(Some(1), Some(1234), None);
// Create an Exact Match ACL with View privilege
@ -801,10 +803,10 @@ mod tests {
})
.unwrap();
new.add_subject(112233).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
// Write on an RWVA without admin access - deny
let mut req = AccessReq::new(&accessor, &path, Access::WRITE);
let mut req = AccessReq::new(&accessor, path.clone(), Access::WRITE);
req.set_target_perms(Access::RWVA);
assert_eq!(req.allow(), false);
@ -817,40 +819,40 @@ mod tests {
})
.unwrap();
new.add_subject(112233).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
// Write on an RWVA with admin access - allow
let mut req = AccessReq::new(&accessor, &path, Access::WRITE);
let mut req = AccessReq::new(&accessor, path, Access::WRITE);
req.set_target_perms(Access::RWVA);
assert_eq!(req.allow(), true);
}
#[test]
fn test_delete_for_fabric() {
let am = Arc::new(AclMgr::new_with(false).unwrap());
am.erase_all();
let am = RefCell::new(AclMgr::new());
am.borrow_mut().erase_all().unwrap();
let path = GenericPath::new(Some(1), Some(1234), None);
let accessor2 = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, am.clone());
let mut req2 = AccessReq::new(&accessor2, &path, Access::READ);
let accessor2 = Accessor::new(2, AccessorSubjects::new(112233), AuthMode::Case, &am);
let mut req2 = AccessReq::new(&accessor2, path.clone(), Access::READ);
req2.set_target_perms(Access::RWVA);
let accessor3 = Accessor::new(3, AccessorSubjects::new(112233), AuthMode::Case, am.clone());
let mut req3 = AccessReq::new(&accessor3, &path, Access::READ);
let accessor3 = Accessor::new(3, AccessorSubjects::new(112233), AuthMode::Case, &am);
let mut req3 = AccessReq::new(&accessor3, path, Access::READ);
req3.set_target_perms(Access::RWVA);
// Allow for subject match - target is wildcard - Fabric idx 2
let mut new = AclEntry::new(2, Privilege::VIEW, AuthMode::Case);
new.add_subject(112233).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
// Allow for subject match - target is wildcard - Fabric idx 3
let mut new = AclEntry::new(3, Privilege::VIEW, AuthMode::Case);
new.add_subject(112233).unwrap();
am.add(new).unwrap();
am.borrow_mut().add(new).unwrap();
// Req for Fabric idx 2 gets denied, and that for Fabric idx 3 is allowed
assert_eq!(req2.allow(), true);
assert_eq!(req3.allow(), true);
am.delete_for_fabric(2).unwrap();
am.borrow_mut().delete_for_fabric(2).unwrap();
assert_eq!(req2.allow(), false);
assert_eq!(req3.allow(), true);
}

View file

@ -15,10 +15,14 @@
* limitations under the License.
*/
use time::OffsetDateTime;
use super::{CertConsumer, MAX_DEPTH};
use crate::error::Error;
use chrono::{Datelike, TimeZone, Utc};
use log::warn;
use crate::{
error::{Error, ErrorCode},
utils::epoch::MATTER_EPOCH_SECS,
};
use core::fmt::Write;
#[derive(Debug)]
pub struct ASN1Writer<'a> {
@ -52,7 +56,7 @@ impl<'a> ASN1Writer<'a> {
self.offset += size;
return Ok(());
}
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
}
pub fn append_tlv<F>(&mut self, tag: u8, len: usize, f: F) -> Result<(), Error>
@ -68,7 +72,7 @@ impl<'a> ASN1Writer<'a> {
self.offset += len;
return Ok(());
}
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
}
fn add_compound(&mut self, val: u8) -> Result<(), Error> {
@ -78,7 +82,7 @@ impl<'a> ASN1Writer<'a> {
self.depth[self.current_depth] = self.offset;
self.current_depth += 1;
if self.current_depth >= MAX_DEPTH {
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
} else {
Ok(())
}
@ -111,7 +115,7 @@ impl<'a> ASN1Writer<'a> {
fn end_compound(&mut self) -> Result<(), Error> {
if self.current_depth == 0 {
return Err(Error::Invalid);
Err(ErrorCode::Invalid)?;
}
let seq_len = self.get_compound_len();
let write_offset = self.get_length_encoding_offset();
@ -146,7 +150,7 @@ impl<'a> ASN1Writer<'a> {
// This is done with an 0xA2 followed by 2 bytes of actual len
3
} else {
return Err(Error::NoSpace);
Err(ErrorCode::NoSpace)?
};
Ok(len)
}
@ -260,29 +264,39 @@ impl<'a> CertConsumer for ASN1Writer<'a> {
self.write_str(0x06, oid)
}
fn utctime(&mut self, _tag: &str, epoch: u32) -> Result<(), Error> {
let mut matter_epoch = Utc
.with_ymd_and_hms(2000, 1, 1, 0, 0, 0)
.unwrap()
.timestamp();
fn utctime(&mut self, _tag: &str, epoch: u64) -> Result<(), Error> {
let matter_epoch = MATTER_EPOCH_SECS + epoch;
matter_epoch += epoch as i64;
let dt = OffsetDateTime::from_unix_timestamp(matter_epoch as _).unwrap();
let dt = match Utc.timestamp_opt(matter_epoch, 0) {
chrono::LocalResult::None => return Err(Error::InvalidTime),
chrono::LocalResult::Single(s) => s,
chrono::LocalResult::Ambiguous(_, a) => {
warn!("Ambiguous time for epoch {epoch}; returning latest timestamp: {a}");
a
}
};
let mut time_str: heapless::String<32> = heapless::String::<32>::new();
if dt.year() >= 2050 {
// If year is >= 2050, ASN.1 requires it to be Generalised Time
let time_str = format!("{}Z", dt.format("%Y%m%d%H%M%S"));
write!(
&mut time_str,
"{:04}{:02}{:02}{:02}{:02}{:02}Z",
dt.year(),
dt.month() as u8,
dt.day(),
dt.hour(),
dt.minute(),
dt.second()
)
.unwrap();
self.write_str(0x18, time_str.as_bytes())
} else {
let time_str = format!("{}Z", dt.format("%y%m%d%H%M%S"));
write!(
&mut time_str,
"{:02}{:02}{:02}{:02}{:02}{:02}Z",
dt.year() % 100,
dt.month() as u8,
dt.day(),
dt.hour(),
dt.minute(),
dt.second()
)
.unwrap();
self.write_str(0x17, time_str.as_bytes())
}
}

View file

@ -15,13 +15,13 @@
* limitations under the License.
*/
use std::fmt;
use core::fmt::{self, Write};
use crate::{
crypto::{CryptoKeyPair, KeyPair},
error::Error,
tlv::{self, FromTLV, TLVArrayOwned, TLVElement, TLVWriter, TagType, ToTLV},
utils::writebuf::WriteBuf,
crypto::KeyPair,
error::{Error, ErrorCode},
tlv::{self, FromTLV, OctetStr, TLVArray, TLVElement, TLVWriter, TagType, ToTLV},
utils::{epoch::MATTER_CERT_DOESNT_EXPIRE, writebuf::WriteBuf},
};
use log::error;
use num_derive::FromPrimitive;
@ -29,6 +29,8 @@ use num_derive::FromPrimitive;
pub use self::asn1_writer::ASN1Writer;
use self::printer::CertPrinter;
pub const MAX_CERT_TLV_LEN: usize = 1024; // TODO
// As per https://datatracker.ietf.org/doc/html/rfc5280
const OID_PUB_KEY_ECPUBKEY: [u8; 7] = [0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x02, 0x01];
@ -113,8 +115,10 @@ macro_rules! add_if {
};
}
fn get_print_str(key_usage: u16) -> String {
format!(
fn get_print_str(key_usage: u16) -> heapless::String<256> {
let mut string = heapless::String::new();
write!(
&mut string,
"{}{}{}{}{}{}{}{}{}",
add_if!(key_usage, KEY_USAGE_DIGITAL_SIGN, "digitalSignature "),
add_if!(key_usage, KEY_USAGE_NON_REPUDIATION, "nonRepudiation "),
@ -126,6 +130,9 @@ fn get_print_str(key_usage: u16) -> String {
add_if!(key_usage, KEY_USAGE_ENCIPHER_ONLY, "encipherOnly "),
add_if!(key_usage, KEY_USAGE_DECIPHER_ONLY, "decipherOnly "),
)
.unwrap();
string
}
#[allow(unused_assignments)]
@ -137,7 +144,7 @@ fn encode_key_usage(key_usage: u16, w: &mut dyn CertConsumer) -> Result<(), Erro
}
fn encode_extended_key_usage(
list: &TLVArrayOwned<u8>,
list: impl Iterator<Item = u8>,
w: &mut dyn CertConsumer,
) -> Result<(), Error> {
const OID_SERVER_AUTH: [u8; 8] = [0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01];
@ -157,19 +164,18 @@ fn encode_extended_key_usage(
];
w.start_seq("")?;
for t in list.iter() {
let t = *t as usize;
for t in list {
let t = t as usize;
if t > 0 && t <= encoding.len() {
w.oid(encoding[t].0, encoding[t].1)?;
} else {
error!("Skipping encoding key usage out of bounds");
}
}
w.end_seq()?;
Ok(())
w.end_seq()
}
#[derive(FromTLV, ToTLV, Default)]
#[derive(FromTLV, ToTLV, Default, Debug, PartialEq)]
#[tlvargs(start = 1)]
struct BasicConstraints {
is_ca: bool,
@ -209,18 +215,18 @@ fn encode_extension_end(w: &mut dyn CertConsumer) -> Result<(), Error> {
w.end_seq()
}
#[derive(FromTLV, ToTLV, Default)]
#[tlvargs(start = 1, datatype = "list")]
struct Extensions {
#[derive(FromTLV, ToTLV, Default, Debug, PartialEq)]
#[tlvargs(lifetime = "'a", start = 1, datatype = "list", unordered)]
struct Extensions<'a> {
basic_const: Option<BasicConstraints>,
key_usage: Option<u16>,
ext_key_usage: Option<TLVArrayOwned<u8>>,
subj_key_id: Option<Vec<u8>>,
auth_key_id: Option<Vec<u8>>,
future_extensions: Option<Vec<u8>>,
ext_key_usage: Option<TLVArray<'a, u8>>,
subj_key_id: Option<OctetStr<'a>>,
auth_key_id: Option<OctetStr<'a>>,
future_extensions: Option<OctetStr<'a>>,
}
impl Extensions {
impl<'a> Extensions<'a> {
fn encode(&self, w: &mut dyn CertConsumer) -> Result<(), Error> {
const OID_BASIC_CONSTRAINTS: [u8; 3] = [0x55, 0x1D, 0x13];
const OID_KEY_USAGE: [u8; 3] = [0x55, 0x1D, 0x0F];
@ -242,30 +248,29 @@ impl Extensions {
}
if let Some(t) = &self.ext_key_usage {
encode_extension_start("X509v3 Extended Key Usage", true, &OID_EXT_KEY_USAGE, w)?;
encode_extended_key_usage(t, w)?;
encode_extended_key_usage(t.iter(), w)?;
encode_extension_end(w)?;
}
if let Some(t) = &self.subj_key_id {
encode_extension_start("Subject Key ID", false, &OID_SUBJ_KEY_IDENTIFIER, w)?;
w.ostr("", t.as_slice())?;
w.ostr("", t.0)?;
encode_extension_end(w)?;
}
if let Some(t) = &self.auth_key_id {
encode_extension_start("Auth Key ID", false, &OID_AUTH_KEY_ID, w)?;
w.start_seq("")?;
w.ctx("", 0, t.as_slice())?;
w.ctx("", 0, t.0)?;
w.end_seq()?;
encode_extension_end(w)?;
}
if let Some(t) = &self.future_extensions {
error!("Future Extensions Not Yet Supported: {:x?}", t.as_slice())
error!("Future Extensions Not Yet Supported: {:x?}", t.0);
}
w.end_seq()?;
w.end_ctx()?;
Ok(())
}
}
const MAX_DN_ENTRIES: usize = 5;
#[derive(FromPrimitive, Copy, Clone)]
enum DnTags {
@ -293,20 +298,23 @@ enum DnTags {
NocCat = 22,
}
enum DistNameValue {
#[derive(Debug, PartialEq)]
enum DistNameValue<'a> {
Uint(u64),
Utf8Str(Vec<u8>),
PrintableStr(Vec<u8>),
Utf8Str(&'a [u8]),
PrintableStr(&'a [u8]),
}
#[derive(Default)]
struct DistNames {
const MAX_DN_ENTRIES: usize = 5;
#[derive(Default, Debug, PartialEq)]
struct DistNames<'a> {
// The order in which the DNs arrive is important, as the signing
// requires that the ASN1 notation retains the same order
dn: Vec<(u8, DistNameValue)>,
dn: heapless::Vec<(u8, DistNameValue<'a>), MAX_DN_ENTRIES>,
}
impl DistNames {
impl<'a> DistNames<'a> {
fn u64(&self, match_id: DnTags) -> Option<u64> {
self.dn
.iter()
@ -336,24 +344,27 @@ impl DistNames {
const PRINTABLE_STR_THRESHOLD: u8 = 0x80;
impl<'a> FromTLV<'a> for DistNames {
impl<'a> FromTLV<'a> for DistNames<'a> {
fn from_tlv(t: &TLVElement<'a>) -> Result<Self, Error> {
let mut d = Self {
dn: Vec::with_capacity(MAX_DN_ENTRIES),
dn: heapless::Vec::new(),
};
let iter = t.confirm_list()?.enter().ok_or(Error::Invalid)?;
let iter = t.confirm_list()?.enter().ok_or(ErrorCode::Invalid)?;
for t in iter {
if let TagType::Context(tag) = t.get_tag() {
if let Ok(value) = t.u64() {
d.dn.push((tag, DistNameValue::Uint(value)));
d.dn.push((tag, DistNameValue::Uint(value)))
.map_err(|_| ErrorCode::BufferTooSmall)?;
} else if let Ok(value) = t.slice() {
if tag > PRINTABLE_STR_THRESHOLD {
d.dn.push((
tag - PRINTABLE_STR_THRESHOLD,
DistNameValue::PrintableStr(value.to_vec()),
));
DistNameValue::PrintableStr(value),
))
.map_err(|_| ErrorCode::BufferTooSmall)?;
} else {
d.dn.push((tag, DistNameValue::Utf8Str(value.to_vec())));
d.dn.push((tag, DistNameValue::Utf8Str(value)))
.map_err(|_| ErrorCode::BufferTooSmall)?;
}
}
}
@ -362,24 +373,23 @@ impl<'a> FromTLV<'a> for DistNames {
}
}
impl ToTLV for DistNames {
impl<'a> ToTLV for DistNames<'a> {
fn to_tlv(&self, tw: &mut TLVWriter, tag: TagType) -> Result<(), Error> {
tw.start_list(tag)?;
for (name, value) in &self.dn {
match value {
DistNameValue::Uint(v) => tw.u64(TagType::Context(*name), *v)?,
DistNameValue::Utf8Str(v) => tw.utf8(TagType::Context(*name), v.as_slice())?,
DistNameValue::PrintableStr(v) => tw.utf8(
TagType::Context(*name + PRINTABLE_STR_THRESHOLD),
v.as_slice(),
)?,
DistNameValue::Utf8Str(v) => tw.utf8(TagType::Context(*name), v)?,
DistNameValue::PrintableStr(v) => {
tw.utf8(TagType::Context(*name + PRINTABLE_STR_THRESHOLD), v)?
}
}
}
tw.end_container()
}
}
impl DistNames {
impl<'a> DistNames<'a> {
fn encode(&self, tag: &str, w: &mut dyn CertConsumer) -> Result<(), Error> {
const OID_COMMON_NAME: [u8; 3] = [0x55_u8, 0x04, 0x03];
const OID_SURNAME: [u8; 3] = [0x55_u8, 0x04, 0x04];
@ -509,52 +519,60 @@ fn encode_dn_value(
w.oid(name, oid)?;
match value {
DistNameValue::Uint(v) => match expected_len {
Some(IntToStringLen::Len16) => w.utf8str("", format!("{:016X}", v).as_str())?,
Some(IntToStringLen::Len8) => w.utf8str("", format!("{:08X}", v).as_str())?,
Some(IntToStringLen::Len16) => {
let mut string = heapless::String::<32>::new();
write!(&mut string, "{:016X}", v).unwrap();
w.utf8str("", &string)?
}
Some(IntToStringLen::Len8) => {
let mut string = heapless::String::<32>::new();
write!(&mut string, "{:08X}", v).unwrap();
w.utf8str("", &string)?
}
_ => {
error!("Invalid encoding");
return Err(Error::Invalid);
Err(ErrorCode::Invalid)?
}
},
DistNameValue::Utf8Str(v) => {
let str = String::from_utf8(v.to_vec())?;
w.utf8str("", &str)?;
w.utf8str("", core::str::from_utf8(v)?)?;
}
DistNameValue::PrintableStr(v) => {
let str = String::from_utf8(v.to_vec())?;
w.printstr("", &str)?;
w.printstr("", core::str::from_utf8(v)?)?;
}
}
w.end_seq()?;
w.end_set()
}
#[derive(FromTLV, ToTLV, Default)]
#[tlvargs(start = 1)]
pub struct Cert {
serial_no: Vec<u8>,
#[derive(FromTLV, ToTLV, Default, Debug, PartialEq)]
#[tlvargs(lifetime = "'a", start = 1)]
pub struct Cert<'a> {
serial_no: OctetStr<'a>,
sign_algo: u8,
issuer: DistNames,
issuer: DistNames<'a>,
not_before: u32,
not_after: u32,
subject: DistNames,
subject: DistNames<'a>,
pubkey_algo: u8,
ec_curve_id: u8,
pubkey: Vec<u8>,
extensions: Extensions,
signature: Vec<u8>,
pubkey: OctetStr<'a>,
extensions: Extensions<'a>,
signature: OctetStr<'a>,
}
// TODO: Instead of parsing the TLVs everytime, we should just cache this, but the encoding
// rules in terms of sequence may get complicated. Need to look into this
impl Cert {
pub fn new(cert_bin: &[u8]) -> Result<Self, Error> {
impl<'a> Cert<'a> {
pub fn new(cert_bin: &'a [u8]) -> Result<Self, Error> {
let root = tlv::get_root_node(cert_bin)?;
Cert::from_tlv(&root)
}
pub fn get_node_id(&self) -> Result<u64, Error> {
self.subject.u64(DnTags::NodeId).ok_or(Error::NoNodeId)
self.subject
.u64(DnTags::NodeId)
.ok_or_else(|| Error::from(ErrorCode::NoNodeId))
}
pub fn get_cat_ids(&self, output: &mut [u32]) {
@ -562,21 +580,27 @@ impl Cert {
}
pub fn get_fabric_id(&self) -> Result<u64, Error> {
self.subject.u64(DnTags::FabricId).ok_or(Error::NoFabricId)
self.subject
.u64(DnTags::FabricId)
.ok_or_else(|| Error::from(ErrorCode::NoFabricId))
}
pub fn get_pubkey(&self) -> &[u8] {
self.pubkey.as_slice()
self.pubkey.0
}
pub fn get_subject_key_id(&self) -> Result<&[u8], Error> {
self.extensions.subj_key_id.as_deref().ok_or(Error::Invalid)
if let Some(id) = self.extensions.subj_key_id.as_ref() {
Ok(id.0)
} else {
Err(ErrorCode::Invalid.into())
}
}
pub fn is_authority(&self, their: &Cert) -> Result<bool, Error> {
if let Some(our_auth_key) = &self.extensions.auth_key_id {
let their_subject = their.get_subject_key_id()?;
if our_auth_key == their_subject {
if our_auth_key.0 == their_subject {
Ok(true)
} else {
Ok(false)
@ -587,11 +611,11 @@ impl Cert {
}
pub fn get_signature(&self) -> &[u8] {
self.signature.as_slice()
self.signature.0
}
pub fn as_tlv(&self, buf: &mut [u8]) -> Result<usize, Error> {
let mut wb = WriteBuf::new(buf, buf.len());
let mut wb = WriteBuf::new(buf);
let mut tw = TLVWriter::new(&mut wb);
self.to_tlv(&mut tw, TagType::Anonymous)?;
Ok(wb.as_slice().len())
@ -614,10 +638,10 @@ impl Cert {
w.integer("", &[2])?;
w.end_ctx()?;
w.integer("Serial Num:", self.serial_no.as_slice())?;
w.integer("Serial Num:", self.serial_no.0)?;
w.start_seq("Signature Algorithm:")?;
let (str, oid) = match get_sign_algo(self.sign_algo).ok_or(Error::Invalid)? {
let (str, oid) = match get_sign_algo(self.sign_algo).ok_or(ErrorCode::Invalid)? {
SignAlgoValue::ECDSAWithSHA256 => ("ECDSA with SHA256", OID_ECDSA_WITH_SHA256),
};
w.oid(str, &oid)?;
@ -626,25 +650,31 @@ impl Cert {
self.issuer.encode("Issuer:", w)?;
w.start_seq("Validity:")?;
w.utctime("Not Before:", self.not_before)?;
w.utctime("Not After:", self.not_after)?;
w.utctime("Not Before:", self.not_before.into())?;
if self.not_after == 0 {
// As per the spec a Not-After value of 0, indicates no well-defined
// expiration date and should return in GeneralizedTime of 99991231235959Z
w.utctime("Not After:", MATTER_CERT_DOESNT_EXPIRE)?;
} else {
w.utctime("Not After:", self.not_after.into())?;
}
w.end_seq()?;
self.subject.encode("Subject:", w)?;
w.start_seq("")?;
w.start_seq("Public Key Algorithm")?;
let (str, pub_key) = match get_pubkey_algo(self.pubkey_algo).ok_or(Error::Invalid)? {
let (str, pub_key) = match get_pubkey_algo(self.pubkey_algo).ok_or(ErrorCode::Invalid)? {
PubKeyAlgoValue::EcPubKey => ("ECPubKey", OID_PUB_KEY_ECPUBKEY),
};
w.oid(str, &pub_key)?;
let (str, curve_id) = match get_ec_curve_id(self.ec_curve_id).ok_or(Error::Invalid)? {
let (str, curve_id) = match get_ec_curve_id(self.ec_curve_id).ok_or(ErrorCode::Invalid)? {
EcCurveIdValue::Prime256V1 => ("Prime256v1", OID_EC_TYPE_PRIME256V1),
};
w.oid(str, &curve_id)?;
w.end_seq()?;
w.bitstr("Public-Key:", false, self.pubkey.as_slice())?;
w.bitstr("Public-Key:", false, self.pubkey.0)?;
w.end_seq()?;
self.extensions.encode(w)?;
@ -655,7 +685,7 @@ impl Cert {
}
}
impl fmt::Display for Cert {
impl<'a> fmt::Display for Cert<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut printer = CertPrinter::new(f);
let _ = self
@ -667,7 +697,7 @@ impl fmt::Display for Cert {
}
pub struct CertVerifier<'a> {
cert: &'a Cert,
cert: &'a Cert<'a>,
}
impl<'a> CertVerifier<'a> {
@ -677,7 +707,7 @@ impl<'a> CertVerifier<'a> {
pub fn add_cert(self, parent: &'a Cert) -> Result<CertVerifier<'a>, Error> {
if !self.cert.is_authority(parent)? {
return Err(Error::InvalidAuthKey);
Err(ErrorCode::InvalidAuthKey)?;
}
let mut asn1 = [0u8; MAX_ASN1_CERT_SIZE];
let len = self.cert.as_asn1(&mut asn1)?;
@ -686,8 +716,9 @@ impl<'a> CertVerifier<'a> {
let k = KeyPair::new_from_public(parent.get_pubkey())?;
k.verify_msg(asn1, self.cert.get_signature()).map_err(|e| {
error!(
"Error in signature verification of certificate: {:x?}",
self.cert.get_subject_key_id()
"Error in signature verification of certificate: {:x?} by {:x?}",
self.cert.get_subject_key_id(),
parent.get_subject_key_id()
);
e
})?;
@ -720,7 +751,7 @@ pub trait CertConsumer {
fn start_ctx(&mut self, tag: &str, id: u8) -> Result<(), Error>;
fn end_ctx(&mut self) -> Result<(), Error>;
fn oid(&mut self, tag: &str, oid: &[u8]) -> Result<(), Error>;
fn utctime(&mut self, tag: &str, epoch: u32) -> Result<(), Error>;
fn utctime(&mut self, tag: &str, epoch: u64) -> Result<(), Error>;
}
const MAX_DEPTH: usize = 10;
@ -731,8 +762,9 @@ mod printer;
#[cfg(test)]
mod tests {
use log::info;
use crate::cert::Cert;
use crate::error::Error;
use crate::tlv::{self, FromTLV, TLVWriter, TagType, ToTLV};
use crate::utils::writebuf::WriteBuf;
@ -777,49 +809,74 @@ mod tests {
#[test]
fn test_verify_chain_incomplete() {
// The chain doesn't lead up to a self-signed certificate
use crate::error::ErrorCode;
let noc = Cert::new(&test_vectors::NOC1_SUCCESS).unwrap();
let icac = Cert::new(&test_vectors::ICAC1_SUCCESS).unwrap();
let a = noc.verify_chain_start();
assert_eq!(
Err(Error::InvalidAuthKey),
a.add_cert(&icac).unwrap().finalise()
Err(ErrorCode::InvalidAuthKey),
a.add_cert(&icac).unwrap().finalise().map_err(|e| e.code())
);
}
#[test]
fn test_auth_key_chain_incorrect() {
use crate::error::ErrorCode;
let noc = Cert::new(&test_vectors::NOC1_AUTH_KEY_FAIL).unwrap();
let icac = Cert::new(&test_vectors::ICAC1_SUCCESS).unwrap();
let a = noc.verify_chain_start();
assert_eq!(Err(Error::InvalidAuthKey), a.add_cert(&icac).map(|_| ()));
assert_eq!(
Err(ErrorCode::InvalidAuthKey),
a.add_cert(&icac).map(|_| ()).map_err(|e| e.code())
);
}
#[test]
fn test_zero_value_of_not_after_field() {
let noc = Cert::new(&test_vectors::NOC_NOT_AFTER_ZERO).unwrap();
let rca = Cert::new(&test_vectors::RCA_FOR_NOC_NOT_AFTER_ZERO).unwrap();
let v = noc.verify_chain_start();
let v = v.add_cert(&rca).unwrap();
v.finalise().unwrap();
}
#[test]
fn test_cert_corrupted() {
use crate::error::ErrorCode;
let noc = Cert::new(&test_vectors::NOC1_CORRUPT_CERT).unwrap();
let icac = Cert::new(&test_vectors::ICAC1_SUCCESS).unwrap();
let a = noc.verify_chain_start();
assert_eq!(Err(Error::InvalidSignature), a.add_cert(&icac).map(|_| ()));
assert_eq!(
Err(ErrorCode::InvalidSignature),
a.add_cert(&icac).map(|_| ()).map_err(|e| e.code())
);
}
#[test]
fn test_tlv_conversions() {
let test_input: [&[u8]; 3] = [
let test_input: [&[u8]; 4] = [
&test_vectors::NOC1_SUCCESS,
&test_vectors::ICAC1_SUCCESS,
&test_vectors::ICAC2_SUCCESS,
&test_vectors::RCA1_SUCCESS,
];
for input in test_input.iter() {
println!("Testing next input...");
info!("Testing next input...");
let root = tlv::get_root_node(input).unwrap();
let cert = Cert::from_tlv(&root).unwrap();
let mut buf = [0u8; 1024];
let buf_len = buf.len();
let mut wb = WriteBuf::new(&mut buf, buf_len);
let mut wb = WriteBuf::new(&mut buf);
let mut tw = TLVWriter::new(&mut wb);
cert.to_tlv(&mut tw, TagType::Anonymous).unwrap();
assert_eq!(*input, wb.as_slice());
let root2 = tlv::get_root_node(wb.as_slice()).unwrap();
let cert2 = Cert::from_tlv(&root2).unwrap();
assert_eq!(cert, cert2);
}
}
@ -858,6 +915,23 @@ mod tests {
89, 175, 253, 78, 212, 7, 69, 207, 140, 45, 129, 249, 64, 104, 70, 68, 43, 164, 19,
126, 114, 138, 79, 104, 238, 20, 226, 88, 118, 105, 56, 12, 92, 31, 171, 24,
];
// This cert has two of the fields in the extensions list swapped to a different order to be non-consecutive
pub const ICAC2_SUCCESS: [u8; 263] = [
21, 48, 1, 16, 67, 38, 73, 198, 26, 31, 20, 101, 57, 46, 16, 143, 77, 160, 128, 161,
36, 2, 1, 55, 3, 39, 20, 255, 90, 200, 17, 145, 105, 71, 215, 24, 38, 4, 123, 59, 211,
42, 38, 5, 35, 11, 27, 52, 55, 6, 39, 19, 254, 111, 27, 53, 189, 134, 103, 200, 24, 36,
7, 1, 36, 8, 1, 48, 9, 65, 4, 88, 188, 13, 87, 50, 3, 213, 248, 182, 12, 240, 164, 220,
127, 150, 65, 81, 244, 125, 24, 48, 203, 83, 111, 133, 175, 182, 10, 40, 80, 147, 28,
39, 121, 183, 61, 159, 178, 231, 133, 75, 189, 143, 136, 191, 254, 115, 228, 186, 129,
56, 137, 213, 177, 13, 46, 97, 202, 95, 41, 5, 16, 24, 228, 55, 10, 53, 1, 41, 1, 36,
2, 0, 24, 48, 5, 20, 243, 119, 107, 152, 3, 212, 205, 76, 85, 38, 158, 240, 27, 213,
11, 235, 33, 21, 38, 5, 48, 4, 20, 88, 240, 172, 159, 2, 82, 193, 71, 83, 67, 184, 97,
99, 61, 125, 67, 232, 202, 171, 107, 36, 2, 96, 24, 48, 11, 64, 70, 43, 150, 195, 194,
170, 43, 125, 91, 213, 210, 221, 175, 131, 131, 85, 22, 247, 213, 18, 101, 189, 30,
134, 20, 226, 217, 145, 41, 225, 181, 150, 28, 200, 52, 237, 218, 195, 144, 209, 205,
73, 88, 114, 139, 216, 85, 170, 63, 238, 164, 69, 35, 69, 39, 87, 211, 234, 57, 98, 19,
43, 13, 0, 24,
];
// A single byte in the auth key id is changed in this
pub const NOC1_AUTH_KEY_FAIL: [u8; 247] = [
0x15, 0x30, 0x1, 0x1, 0x1, 0x24, 0x2, 0x1, 0x37, 0x3, 0x24, 0x13, 0x1, 0x24, 0x15, 0x1,
@ -1076,5 +1150,47 @@ mod tests {
0x16, 0x80, 0x14, 0x72, 0xc2, 0x01, 0xf7, 0x57, 0x19, 0x13, 0xb3, 0x48, 0xca, 0x00,
0xca, 0x7b, 0x45, 0xf4, 0x77, 0x46, 0x68, 0xc9, 0x7e,
];
/// An NOC that contains a Not-After validity field of '0'
pub const NOC_NOT_AFTER_ZERO: [u8; 251] = [
0x15, 0x30, 0x1, 0x1, 0x1, 0x24, 0x2, 0x1, 0x37, 0x3, 0x27, 0x14, 0xfc, 0x8d, 0xcf,
0x45, 0x19, 0xff, 0x9a, 0x9a, 0x24, 0x15, 0x1, 0x18, 0x26, 0x4, 0x21, 0x39, 0x5a, 0x2c,
0x24, 0x5, 0x0, 0x37, 0x6, 0x24, 0x15, 0x1, 0x26, 0x11, 0x6c, 0x4a, 0x95, 0xd2, 0x18,
0x24, 0x7, 0x1, 0x24, 0x8, 0x1, 0x30, 0x9, 0x41, 0x4, 0x41, 0x7f, 0xb1, 0x61, 0xb0,
0xbe, 0x19, 0x41, 0x81, 0xb9, 0x9f, 0xe8, 0x7b, 0xdd, 0xdf, 0xc4, 0x46, 0xe0, 0x74,
0xba, 0x83, 0x21, 0xda, 0x3d, 0xf7, 0x88, 0x68, 0x14, 0xa6, 0x9d, 0xa9, 0x14, 0x88,
0x94, 0x1e, 0xd3, 0x86, 0x62, 0xc7, 0x6f, 0xb4, 0x79, 0xd2, 0xaf, 0x34, 0xe7, 0xd6,
0x4d, 0x87, 0x29, 0x67, 0x10, 0x73, 0xb9, 0x81, 0xe0, 0x9, 0xe1, 0x13, 0xbb, 0x6a,
0xd2, 0x21, 0xaa, 0x37, 0xa, 0x35, 0x1, 0x28, 0x1, 0x18, 0x24, 0x2, 0x1, 0x36, 0x3,
0x4, 0x2, 0x4, 0x1, 0x18, 0x30, 0x4, 0x14, 0x98, 0xaf, 0xa1, 0x3d, 0x41, 0x67, 0x7a,
0x34, 0x8c, 0x67, 0x6c, 0xcc, 0x17, 0x6e, 0xd5, 0x58, 0xd8, 0x2b, 0x86, 0x8, 0x30, 0x5,
0x14, 0xf8, 0xcf, 0xd0, 0x45, 0x6b, 0xe, 0xd1, 0x6f, 0xc5, 0x67, 0xdf, 0x81, 0xd7,
0xe9, 0xb7, 0xeb, 0x39, 0x78, 0xec, 0x40, 0x18, 0x30, 0xb, 0x40, 0xf9, 0x80, 0x94,
0xbf, 0xcf, 0x72, 0xa5, 0x54, 0x87, 0x12, 0x35, 0xc, 0x38, 0x79, 0xa8, 0xb, 0x21, 0x94,
0xb5, 0x71, 0x2, 0xcb, 0xb, 0xda, 0xf9, 0x6c, 0x54, 0xcb, 0x50, 0x4b, 0x2, 0x5, 0xea,
0xff, 0xfd, 0xb2, 0x1b, 0x24, 0x30, 0x79, 0xb1, 0x69, 0x87, 0xa5, 0x7, 0xc6, 0x76,
0x15, 0x70, 0xc0, 0xec, 0x14, 0xd3, 0x9f, 0x1a, 0xa7, 0xe1, 0xca, 0x25, 0x2e, 0x44,
0xfc, 0x96, 0x4d, 0x18,
];
pub const RCA_FOR_NOC_NOT_AFTER_ZERO: [u8; 251] = [
0x15, 0x30, 0x1, 0x1, 0x0, 0x24, 0x2, 0x1, 0x37, 0x3, 0x27, 0x14, 0xfc, 0x8d, 0xcf,
0x45, 0x19, 0xff, 0x9a, 0x9a, 0x24, 0x15, 0x1, 0x18, 0x26, 0x4, 0xb1, 0x2a, 0x38, 0x2c,
0x26, 0x5, 0x31, 0x5e, 0x19, 0x2e, 0x37, 0x6, 0x27, 0x14, 0xfc, 0x8d, 0xcf, 0x45, 0x19,
0xff, 0x9a, 0x9a, 0x24, 0x15, 0x1, 0x18, 0x24, 0x7, 0x1, 0x24, 0x8, 0x1, 0x30, 0x9,
0x41, 0x4, 0x15, 0x69, 0x1e, 0x7b, 0x6a, 0xea, 0x5, 0xdb, 0xf8, 0x4b, 0xfd, 0xdc, 0x6c,
0x75, 0x46, 0x74, 0xb0, 0x60, 0xdb, 0x4, 0x71, 0xb6, 0xd0, 0x52, 0xf2, 0xf8, 0xe6,
0xbb, 0xd, 0xe5, 0x60, 0x1f, 0x84, 0x66, 0x4f, 0x3c, 0x90, 0x89, 0xa6, 0xc6, 0x99,
0x61, 0xfb, 0x89, 0xf7, 0xa, 0xa6, 0xe4, 0xa2, 0x21, 0xd3, 0x37, 0x30, 0x1b, 0xd2,
0x11, 0xc5, 0xcc, 0x0, 0xf4, 0x7a, 0x14, 0xfc, 0x3c, 0x37, 0xa, 0x35, 0x1, 0x29, 0x1,
0x18, 0x24, 0x2, 0x60, 0x30, 0x4, 0x14, 0xf8, 0xcf, 0xd0, 0x45, 0x6b, 0xe, 0xd1, 0x6f,
0xc5, 0x67, 0xdf, 0x81, 0xd7, 0xe9, 0xb7, 0xeb, 0x39, 0x78, 0xec, 0x40, 0x30, 0x5,
0x14, 0xf8, 0xcf, 0xd0, 0x45, 0x6b, 0xe, 0xd1, 0x6f, 0xc5, 0x67, 0xdf, 0x81, 0xd7,
0xe9, 0xb7, 0xeb, 0x39, 0x78, 0xec, 0x40, 0x18, 0x30, 0xb, 0x40, 0x4c, 0xae, 0xac,
0xc1, 0x26, 0xdd, 0x56, 0xc, 0x85, 0x86, 0xbc, 0xeb, 0xa2, 0xb5, 0xb7, 0xdf, 0x49,
0x92, 0x62, 0xcd, 0x2a, 0xb6, 0x4e, 0xc5, 0x31, 0x7c, 0xd9, 0xb, 0x1c, 0xe9, 0x6e,
0xe5, 0x82, 0xc7, 0xb8, 0xda, 0x22, 0x31, 0x7b, 0x23, 0x5a, 0x2a, 0xe6, 0x76, 0x28,
0xb6, 0xd4, 0xc7, 0x7b, 0x1c, 0x9c, 0x85, 0x71, 0x5f, 0xe6, 0xf6, 0x21, 0x50, 0x5c,
0xa7, 0x7c, 0xc7, 0x1d, 0x9a, 0x18,
];
}
}

View file

@ -15,11 +15,11 @@
* limitations under the License.
*/
use time::OffsetDateTime;
use super::{CertConsumer, MAX_DEPTH};
use crate::error::Error;
use chrono::{TimeZone, Utc};
use log::warn;
use std::fmt;
use crate::{error::Error, utils::epoch::MATTER_EPOCH_SECS};
use core::fmt;
pub struct CertPrinter<'a, 'b> {
level: usize,
@ -122,24 +122,12 @@ impl<'a, 'b> CertConsumer for CertPrinter<'a, 'b> {
}
Ok(())
}
fn utctime(&mut self, tag: &str, epoch: u32) -> Result<(), Error> {
let mut matter_epoch = Utc
.with_ymd_and_hms(2000, 1, 1, 0, 0, 0)
.unwrap()
.timestamp();
fn utctime(&mut self, tag: &str, epoch: u64) -> Result<(), Error> {
let matter_epoch = MATTER_EPOCH_SECS + epoch;
matter_epoch += epoch as i64;
let dt = OffsetDateTime::from_unix_timestamp(matter_epoch as _).unwrap();
let dt = match Utc.timestamp_opt(matter_epoch, 0) {
chrono::LocalResult::None => return Err(Error::InvalidTime),
chrono::LocalResult::Single(s) => s,
chrono::LocalResult::Ambiguous(_, a) => {
warn!("Ambiguous time for epoch {epoch}; returning latest timestamp: {a}");
a
}
};
let _ = writeln!(self.f, "{} {} {}", SPACE[self.level], tag, dt);
let _ = writeln!(self.f, "{} {} {:?}", SPACE[self.level], tag, dt);
Ok(())
}
}

View file

@ -0,0 +1,242 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//! Base38 encoding and decoding functions.
use crate::error::{Error, ErrorCode};
const BASE38_CHARS: [char; 38] = [
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',
'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '-', '.',
];
const UNUSED: u8 = 255;
// map of base38 charater to numeric value
// subtract 45 from the character, then index into this array, if possible
const DECODE_BASE38: [u8; 46] = [
36, // '-', =45
37, // '.', =46
UNUSED, // '/', =47
0, // '0', =48
1, // '1', =49
2, // '2', =50
3, // '3', =51
4, // '4', =52
5, // '5', =53
6, // '6', =54
7, // '7', =55
8, // '8', =56
9, // '9', =57
UNUSED, // ':', =58
UNUSED, // ';', =59
UNUSED, // '<', =50
UNUSED, // '=', =61
UNUSED, // '>', =62
UNUSED, // '?', =63
UNUSED, // '@', =64
10, // 'A', =65
11, // 'B', =66
12, // 'C', =67
13, // 'D', =68
14, // 'E', =69
15, // 'F', =70
16, // 'G', =71
17, // 'H', =72
18, // 'I', =73
19, // 'J', =74
20, // 'K', =75
21, // 'L', =76
22, // 'M', =77
23, // 'N', =78
24, // 'O', =79
25, // 'P', =80
26, // 'Q', =81
27, // 'R', =82
28, // 'S', =83
29, // 'T', =84
30, // 'U', =85
31, // 'V', =86
32, // 'W', =87
33, // 'X', =88
34, // 'Y', =89
35, // 'Z', =90
];
const RADIX: u32 = BASE38_CHARS.len() as u32;
/// Encode a byte array into a base38 string.
///
/// # Arguments
/// * `bytes` - byte array to encode
pub fn encode_string<const N: usize>(bytes: &[u8]) -> Result<heapless::String<N>, Error> {
let mut string = heapless::String::new();
for c in encode(bytes) {
string.push(c).map_err(|_| ErrorCode::NoSpace)?;
}
Ok(string)
}
pub fn encode(bytes: &[u8]) -> impl Iterator<Item = char> + '_ {
(0..bytes.len() / 3)
.flat_map(move |index| {
let offset = index * 3;
encode_base38(
((bytes[offset + 2] as u32) << 16)
| ((bytes[offset + 1] as u32) << 8)
| (bytes[offset] as u32),
5,
)
})
.chain(
core::iter::once(bytes.len() % 3).flat_map(move |remainder| {
let offset = bytes.len() / 3 * 3;
match remainder {
2 => encode_base38(
((bytes[offset + 1] as u32) << 8) | (bytes[offset] as u32),
4,
),
1 => encode_base38(bytes[offset] as u32, 2),
_ => encode_base38(0, 0),
}
}),
)
}
fn encode_base38(mut value: u32, repeat: usize) -> impl Iterator<Item = char> {
(0..repeat).map(move |_| {
let remainder = value % RADIX;
let c = BASE38_CHARS[remainder as usize];
value = (value - remainder) / RADIX;
c
})
}
pub fn decode_vec<const N: usize>(base38_str: &str) -> Result<heapless::Vec<u8, N>, Error> {
let mut vec = heapless::Vec::new();
for byte in decode(base38_str) {
vec.push(byte?).map_err(|_| ErrorCode::NoSpace)?;
}
Ok(vec)
}
/// Decode a base38-encoded string into a byte slice
///
/// # Arguments
/// * `base38_str` - base38-encoded string to decode
///
/// Fails if the string contains invalid characters or if the supplied buffer is too small to fit the decoded data
pub fn decode(base38_str: &str) -> impl Iterator<Item = Result<u8, Error>> + '_ {
let stru = base38_str.as_bytes();
(0..stru.len() / 5)
.flat_map(move |index| {
let offset = index * 5;
decode_base38(&stru[offset..offset + 5])
})
.chain({
let offset = stru.len() / 5 * 5;
decode_base38(&stru[offset..])
})
.take_while(Result::is_ok)
}
fn decode_base38(chars: &[u8]) -> impl Iterator<Item = Result<u8, Error>> {
let mut value = 0u32;
let mut cerr = None;
let repeat = match chars.len() {
5 => 3,
4 => 2,
2 => 1,
0 => 0,
_ => -1,
};
if repeat >= 0 {
for c in chars.iter().rev() {
match decode_char(*c) {
Ok(v) => value = value * RADIX + v as u32,
Err(err) => {
cerr = Some(err.code());
break;
}
}
}
} else {
cerr = Some(ErrorCode::InvalidData)
}
(0..repeat)
.map(move |_| {
if let Some(err) = cerr {
Err(err.into())
} else {
let byte = (value & 0xff) as u8;
value >>= 8;
Ok(byte)
}
})
.take_while(Result::is_ok)
}
fn decode_char(c: u8) -> Result<u8, Error> {
if !(45..=90).contains(&c) {
Err(ErrorCode::InvalidData)?;
}
let c = DECODE_BASE38[c as usize - 45];
if c == UNUSED {
Err(ErrorCode::InvalidData)?;
}
Ok(c)
}
#[cfg(test)]
mod tests {
use super::*;
const ENCODED: &str = "-MOA57ZU02IT2L2BJ00";
const DECODED: [u8; 11] = [
0x88, 0xff, 0xa7, 0x91, 0x50, 0x40, 0x00, 0x47, 0x51, 0xdd, 0x02,
];
#[test]
fn can_base38_encode() {
assert_eq!(
encode_string::<{ ENCODED.len() }>(&DECODED).unwrap(),
ENCODED
);
}
#[test]
fn can_base38_decode() {
assert_eq!(
decode_vec::<{ DECODED.len() }>(ENCODED).expect("Cannot decode base38"),
DECODED
);
}
}

242
rs-matter/src/core.rs Normal file
View file

@ -0,0 +1,242 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use core::{borrow::Borrow, cell::RefCell};
use embassy_sync::{blocking_mutex::raw::NoopRawMutex, mutex::Mutex};
use crate::{
acl::AclMgr,
data_model::{
cluster_basic_information::BasicInfoConfig,
sdm::{dev_att::DevAttDataFetcher, failsafe::FailSafe},
},
error::*,
fabric::FabricMgr,
mdns::Mdns,
pairing::{print_pairing_code_and_qr, DiscoveryCapabilities},
secure_channel::{pake::PaseMgr, spake2p::VerifierData},
transport::{
exchange::{ExchangeCtx, MAX_EXCHANGES},
session::SessionMgr,
},
utils::{epoch::Epoch, rand::Rand, select::Notification},
};
/* The Matter Port */
pub const MATTER_PORT: u16 = 5540;
/// Device Commissioning Data
pub struct CommissioningData {
/// The data like password or verifier that is required to authenticate
pub verifier: VerifierData,
/// The 12-bit discriminator used to differentiate between multiple devices
pub discriminator: u16,
}
/// The primary Matter Object
pub struct Matter<'a> {
fabric_mgr: RefCell<FabricMgr>,
pub acl_mgr: RefCell<AclMgr>, // Public for tests
pase_mgr: RefCell<PaseMgr>,
failsafe: RefCell<FailSafe>,
persist_notification: Notification,
pub(crate) send_notification: Notification,
mdns: &'a dyn Mdns,
pub(crate) epoch: Epoch,
pub(crate) rand: Rand,
dev_det: &'a BasicInfoConfig<'a>,
dev_att: &'a dyn DevAttDataFetcher,
pub(crate) port: u16,
pub(crate) exchanges: RefCell<heapless::Vec<ExchangeCtx, MAX_EXCHANGES>>,
pub(crate) ephemeral: RefCell<Option<ExchangeCtx>>,
pub(crate) ephemeral_mutex: Mutex<NoopRawMutex, ()>,
pub session_mgr: RefCell<SessionMgr>, // Public for tests
}
impl<'a> Matter<'a> {
#[cfg(feature = "std")]
#[inline(always)]
pub fn new_default(
dev_det: &'a BasicInfoConfig<'a>,
dev_att: &'a dyn DevAttDataFetcher,
mdns: &'a dyn Mdns,
port: u16,
) -> Self {
use crate::utils::epoch::sys_epoch;
use crate::utils::rand::sys_rand;
Self::new(dev_det, dev_att, mdns, sys_epoch, sys_rand, port)
}
/// Creates a new Matter object
///
/// # Parameters
/// * dev_att: An object that implements the trait [DevAttDataFetcher]. Any Matter device
/// requires a set of device attestation certificates and keys. It is the responsibility of
/// this object to return the device attestation details when queried upon.
#[inline(always)]
pub fn new(
dev_det: &'a BasicInfoConfig<'a>,
dev_att: &'a dyn DevAttDataFetcher,
mdns: &'a dyn Mdns,
epoch: Epoch,
rand: Rand,
port: u16,
) -> Self {
Self {
fabric_mgr: RefCell::new(FabricMgr::new()),
acl_mgr: RefCell::new(AclMgr::new()),
pase_mgr: RefCell::new(PaseMgr::new(epoch, rand)),
failsafe: RefCell::new(FailSafe::new()),
persist_notification: Notification::new(),
send_notification: Notification::new(),
mdns,
epoch,
rand,
dev_det,
dev_att,
port,
exchanges: RefCell::new(heapless::Vec::new()),
ephemeral: RefCell::new(None),
ephemeral_mutex: Mutex::new(()),
session_mgr: RefCell::new(SessionMgr::new(epoch, rand)),
}
}
pub fn dev_det(&self) -> &BasicInfoConfig<'_> {
self.dev_det
}
pub fn dev_att(&self) -> &dyn DevAttDataFetcher {
self.dev_att
}
pub fn port(&self) -> u16 {
self.port
}
pub fn load_fabrics(&self, data: &[u8]) -> Result<(), Error> {
self.fabric_mgr.borrow_mut().load(data, self.mdns)
}
pub fn load_acls(&self, data: &[u8]) -> Result<(), Error> {
self.acl_mgr.borrow_mut().load(data)
}
pub fn store_fabrics<'b>(&self, buf: &'b mut [u8]) -> Result<Option<&'b [u8]>, Error> {
self.fabric_mgr.borrow_mut().store(buf)
}
pub fn store_acls<'b>(&self, buf: &'b mut [u8]) -> Result<Option<&'b [u8]>, Error> {
self.acl_mgr.borrow_mut().store(buf)
}
pub fn is_changed(&self) -> bool {
self.acl_mgr.borrow().is_changed() || self.fabric_mgr.borrow().is_changed()
}
pub fn start_comissioning(
&self,
dev_comm: CommissioningData,
buf: &mut [u8],
) -> Result<bool, Error> {
if !self.pase_mgr.borrow().is_pase_session_enabled() && self.fabric_mgr.borrow().is_empty()
{
print_pairing_code_and_qr(
self.dev_det,
&dev_comm,
DiscoveryCapabilities::default(),
buf,
)?;
self.pase_mgr.borrow_mut().enable_pase_session(
dev_comm.verifier,
dev_comm.discriminator,
self.mdns,
)?;
Ok(true)
} else {
Ok(false)
}
}
pub fn notify_changed(&self) {
if self.is_changed() {
self.persist_notification.signal(());
}
}
pub async fn wait_changed(&self) {
self.persist_notification.wait().await
}
}
impl<'a> Borrow<RefCell<FabricMgr>> for Matter<'a> {
fn borrow(&self) -> &RefCell<FabricMgr> {
&self.fabric_mgr
}
}
impl<'a> Borrow<RefCell<AclMgr>> for Matter<'a> {
fn borrow(&self) -> &RefCell<AclMgr> {
&self.acl_mgr
}
}
impl<'a> Borrow<RefCell<PaseMgr>> for Matter<'a> {
fn borrow(&self) -> &RefCell<PaseMgr> {
&self.pase_mgr
}
}
impl<'a> Borrow<RefCell<FailSafe>> for Matter<'a> {
fn borrow(&self) -> &RefCell<FailSafe> {
&self.failsafe
}
}
impl<'a> Borrow<BasicInfoConfig<'a>> for Matter<'a> {
fn borrow(&self) -> &BasicInfoConfig<'a> {
self.dev_det
}
}
impl<'a> Borrow<dyn DevAttDataFetcher + 'a> for Matter<'a> {
fn borrow(&self) -> &(dyn DevAttDataFetcher + 'a) {
self.dev_att
}
}
impl<'a> Borrow<dyn Mdns + 'a> for Matter<'a> {
fn borrow(&self) -> &(dyn Mdns + 'a) {
self.mdns
}
}
impl<'a> Borrow<Epoch> for Matter<'a> {
fn borrow(&self) -> &Epoch {
&self.epoch
}
}
impl<'a> Borrow<Rand> for Matter<'a> {
fn borrow(&self) -> &Rand {
&self.rand
}
}

View file

@ -0,0 +1,136 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use log::error;
use crate::{
error::{Error, ErrorCode},
utils::rand::Rand,
};
pub fn hkdf_sha256(_salt: &[u8], _ikm: &[u8], _info: &[u8], _key: &mut [u8]) -> Result<(), Error> {
error!("This API should never get called");
Ok(())
}
#[derive(Clone, Debug)]
pub struct Sha256 {}
impl Sha256 {
pub fn new() -> Result<Self, Error> {
Ok(Self {})
}
pub fn update(&mut self, _data: &[u8]) -> Result<(), Error> {
Ok(())
}
pub fn finish(self, _digest: &mut [u8]) -> Result<(), Error> {
Ok(())
}
}
pub struct HmacSha256 {}
impl HmacSha256 {
pub fn new(_key: &[u8]) -> Result<Self, Error> {
error!("This API should never get called");
Ok(Self {})
}
pub fn update(&mut self, _data: &[u8]) -> Result<(), Error> {
error!("This API should never get called");
Ok(())
}
pub fn finish(self, _out: &mut [u8]) -> Result<(), Error> {
error!("This API should never get called");
Ok(())
}
}
#[derive(Debug)]
pub struct KeyPair;
impl KeyPair {
pub fn new(_rand: Rand) -> Result<Self, Error> {
Ok(Self)
}
pub fn new_from_components(_pub_key: &[u8], _priv_key: &[u8]) -> Result<Self, Error> {
Ok(Self {})
}
pub fn new_from_public(_pub_key: &[u8]) -> Result<Self, Error> {
error!("This API should never get called");
Ok(Self {})
}
pub fn get_csr<'a>(&self, _out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
error!("This API should never get called");
Err(ErrorCode::Invalid.into())
}
pub fn get_public_key(&self, _pub_key: &mut [u8]) -> Result<usize, Error> {
Ok(0)
}
pub fn get_private_key(&self, _pub_key: &mut [u8]) -> Result<usize, Error> {
Ok(0)
}
pub fn derive_secret(self, _peer_pub_key: &[u8], _secret: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(ErrorCode::Invalid.into())
}
pub fn sign_msg(&self, _msg: &[u8], _signature: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(ErrorCode::Invalid.into())
}
pub fn verify_msg(&self, _msg: &[u8], _signature: &[u8]) -> Result<(), Error> {
error!("This API should never get called");
Err(ErrorCode::Invalid.into())
}
}
pub fn pbkdf2_hmac(_pass: &[u8], _iter: usize, _salt: &[u8], _key: &mut [u8]) -> Result<(), Error> {
error!("This API should never get called");
Ok(())
}
pub fn encrypt_in_place(
_key: &[u8],
_nonce: &[u8],
_ad: &[u8],
_data: &mut [u8],
_data_len: usize,
) -> Result<usize, Error> {
Ok(0)
}
pub fn decrypt_in_place(
_key: &[u8],
_nonce: &[u8],
_ad: &[u8],
_data: &mut [u8],
) -> Result<usize, Error> {
Ok(0)
}

View file

@ -17,9 +17,8 @@
use log::error;
use crate::error::Error;
use super::CryptoKeyPair;
use crate::error::{Error, ErrorCode};
use crate::utils::rand::Rand;
pub fn hkdf_sha256(_salt: &[u8], _ikm: &[u8], _info: &[u8], _key: &mut [u8]) -> Result<(), Error> {
error!("This API should never get called");
@ -62,18 +61,17 @@ impl HmacSha256 {
}
}
#[derive(Debug)]
pub struct KeyPair {}
impl KeyPair {
pub fn new() -> Result<Self, Error> {
pub fn new(_rand: Rand) -> Result<Self, Error> {
error!("This API should never get called");
Ok(Self {})
}
pub fn new_from_components(_pub_key: &[u8], priv_key: &[u8]) -> Result<Self, Error> {
error!("This API should never get called");
Ok(Self {})
}
@ -82,28 +80,33 @@ impl KeyPair {
Ok(Self {})
}
}
impl CryptoKeyPair for KeyPair {
fn get_csr<'a>(&self, _out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
pub fn get_csr<'a>(&self, _out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
error!("This API should never get called");
Err(Error::Invalid)
Err(ErrorCode::Invalid.into())
}
fn get_public_key(&self, _pub_key: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
pub fn get_public_key(&self, _pub_key: &mut [u8]) -> Result<usize, Error> {
Ok(0)
}
fn derive_secret(self, _peer_pub_key: &[u8], _secret: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
pub fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error> {
Ok(0)
}
fn sign_msg(&self, _msg: &[u8], _signature: &mut [u8]) -> Result<usize, Error> {
pub fn derive_secret(self, _peer_pub_key: &[u8], _secret: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
Err(ErrorCode::Invalid.into())
}
fn verify_msg(&self, _msg: &[u8], _signature: &[u8]) -> Result<(), Error> {
pub fn sign_msg(&self, _msg: &[u8], _signature: &mut [u8]) -> Result<usize, Error> {
error!("This API should never get called");
Err(Error::Invalid)
Err(ErrorCode::Invalid.into())
}
pub fn verify_msg(&self, _msg: &[u8], _signature: &[u8]) -> Result<(), Error> {
error!("This API should never get called");
Err(ErrorCode::Invalid.into())
}
}

View file

@ -15,9 +15,13 @@
* limitations under the License.
*/
use std::sync::Arc;
extern crate alloc;
use log::error;
use core::fmt::{self, Debug};
use alloc::sync::Arc;
use log::{error, info};
use mbedtls::{
bignum::Mpi,
cipher::{Authenticated, Cipher},
@ -28,12 +32,12 @@ use mbedtls::{
x509,
};
use super::CryptoKeyPair;
use crate::{
// TODO: We should move ASN1Writer out of Cert,
// so Crypto doesn't have to depend on Cert
cert::{ASN1Writer, CertConsumer},
error::Error,
error::{Error, ErrorCode},
utils::rand::Rand,
};
pub struct HmacSha256 {
@ -48,11 +52,13 @@ impl HmacSha256 {
}
pub fn update(&mut self, data: &[u8]) -> Result<(), Error> {
self.inner.update(data).map_err(|_| Error::TLSStack)
self.inner
.update(data)
.map_err(|_| ErrorCode::TLSStack.into())
}
pub fn finish(self, out: &mut [u8]) -> Result<(), Error> {
self.inner.finish(out).map_err(|_| Error::TLSStack)?;
self.inner.finish(out).map_err(|_| ErrorCode::TLSStack)?;
Ok(())
}
}
@ -62,7 +68,7 @@ pub struct KeyPair {
}
impl KeyPair {
pub fn new() -> Result<Self, Error> {
pub fn new(_rand: Rand) -> Result<Self, Error> {
let mut ctr_drbg = CtrDrbg::new(Arc::new(OsEntropy::new()), None)?;
Ok(Self {
key: Pk::generate_ec(&mut ctr_drbg, EcGroupId::SecP256R1)?,
@ -85,10 +91,8 @@ impl KeyPair {
key: Pk::public_from_ec_components(group, pub_key)?,
})
}
}
impl CryptoKeyPair for KeyPair {
fn get_csr<'a>(&self, out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
pub fn get_csr<'a>(&self, out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
let tmp_priv = self.key.ec_private()?;
let mut tmp_key =
Pk::private_from_ec_components(EcGroup::new(EcGroupId::SecP256R1)?, tmp_priv)?;
@ -103,16 +107,16 @@ impl CryptoKeyPair for KeyPair {
Ok(Some(a)) => Ok(a),
Ok(None) => {
error!("Error in writing CSR: None received");
Err(Error::Invalid)
Err(ErrorCode::Invalid.into())
}
Err(e) => {
error!("Error in writing CSR {}", e);
Err(Error::TLSStack)
Err(ErrorCode::TLSStack.into())
}
}
}
fn get_public_key(&self, pub_key: &mut [u8]) -> Result<usize, Error> {
pub fn get_public_key(&self, pub_key: &mut [u8]) -> Result<usize, Error> {
let public_key = self.key.ec_public()?;
let group = EcGroup::new(EcGroupId::SecP256R1)?;
let vec = public_key.to_binary(&group, false)?;
@ -122,7 +126,7 @@ impl CryptoKeyPair for KeyPair {
Ok(len)
}
fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error> {
pub fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error> {
let priv_key_mpi = self.key.ec_private()?;
let vec = priv_key_mpi.to_binary()?;
@ -131,7 +135,7 @@ impl CryptoKeyPair for KeyPair {
Ok(len)
}
fn derive_secret(self, peer_pub_key: &[u8], secret: &mut [u8]) -> Result<usize, Error> {
pub fn derive_secret(self, peer_pub_key: &[u8], secret: &mut [u8]) -> Result<usize, Error> {
// mbedtls requires a 'mut' key. Instead of making a change in our Trait,
// we just clone the key this way
@ -149,7 +153,7 @@ impl CryptoKeyPair for KeyPair {
Ok(len)
}
fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error> {
pub fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error> {
// mbedtls requires a 'mut' key. Instead of making a change in our Trait,
// we just clone the key this way
let tmp_key = self.key.ec_private()?;
@ -162,7 +166,7 @@ impl CryptoKeyPair for KeyPair {
let mut ctr_drbg = CtrDrbg::new(Arc::new(OsEntropy::new()), None)?;
if signature.len() < super::EC_SIGNATURE_LEN_BYTES {
return Err(Error::NoSpace);
Err(ErrorCode::NoSpace)?;
}
safemem::write_bytes(signature, 0);
@ -175,7 +179,7 @@ impl CryptoKeyPair for KeyPair {
Ok(len)
}
fn verify_msg(&self, msg: &[u8], signature: &[u8]) -> Result<(), Error> {
pub fn verify_msg(&self, msg: &[u8], signature: &[u8]) -> Result<(), Error> {
// mbedtls requires a 'mut' key. Instead of making a change in our Trait,
// we just clone the key this way
let tmp_key = self.key.ec_public()?;
@ -192,14 +196,20 @@ impl CryptoKeyPair for KeyPair {
let mbedtls_sign = &mbedtls_sign[..len];
if let Err(e) = tmp_key.verify(hash::Type::Sha256, &msg_hash, mbedtls_sign) {
println!("The error is {}", e);
Err(Error::InvalidSignature)
info!("The error is {}", e);
Err(ErrorCode::InvalidSignature.into())
} else {
Ok(())
}
}
}
impl core::fmt::Debug for KeyPair {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("KeyPair").finish()
}
}
fn convert_r_s_to_asn1_sign(signature: &[u8], mbedtls_sign: &mut [u8]) -> Result<usize, Error> {
let r = &signature[0..32];
let s = &signature[32..64];
@ -224,7 +234,7 @@ fn convert_asn1_sign_to_r_s(signature: &mut [u8]) -> Result<usize, Error> {
// Type 0x2 is Integer (first integer is r)
if signature[offset] != 2 {
return Err(Error::Invalid);
Err(ErrorCode::Invalid)?;
}
offset += 1;
@ -249,7 +259,7 @@ fn convert_asn1_sign_to_r_s(signature: &mut [u8]) -> Result<usize, Error> {
// Type 0x2 is Integer (this integer is s)
if signature[offset] != 2 {
return Err(Error::Invalid);
Err(ErrorCode::Invalid)?;
}
offset += 1;
@ -268,17 +278,17 @@ fn convert_asn1_sign_to_r_s(signature: &mut [u8]) -> Result<usize, Error> {
Ok(64)
} else {
Err(Error::Invalid)
Err(ErrorCode::Invalid.into())
}
}
pub fn pbkdf2_hmac(pass: &[u8], iter: usize, salt: &[u8], key: &mut [u8]) -> Result<(), Error> {
mbedtls::hash::pbkdf2_hmac(Type::Sha256, pass, salt, iter as u32, key)
.map_err(|_e| Error::TLSStack)
.map_err(|_e| ErrorCode::TLSStack.into())
}
pub fn hkdf_sha256(salt: &[u8], ikm: &[u8], info: &[u8], key: &mut [u8]) -> Result<(), Error> {
Hkdf::hkdf(Type::Sha256, salt, ikm, info, key).map_err(|_e| Error::TLSStack)
Hkdf::hkdf(Type::Sha256, salt, ikm, info, key).map_err(|_e| ErrorCode::TLSStack.into())
}
pub fn encrypt_in_place(
@ -299,7 +309,7 @@ pub fn encrypt_in_place(
cipher
.encrypt_auth_inplace(ad, data, tag)
.map(|(len, _)| len)
.map_err(|_e| Error::TLSStack)
.map_err(|_e| ErrorCode::TLSStack.into())
}
pub fn decrypt_in_place(
@ -321,7 +331,7 @@ pub fn decrypt_in_place(
.map(|(len, _)| len)
.map_err(|e| {
error!("Error during decryption: {:?}", e);
Error::TLSStack
ErrorCode::TLSStack.into()
})
}
@ -338,12 +348,18 @@ impl Sha256 {
}
pub fn update(&mut self, data: &[u8]) -> Result<(), Error> {
self.ctx.update(data).map_err(|_| Error::TLSStack)?;
self.ctx.update(data).map_err(|_| ErrorCode::TLSStack)?;
Ok(())
}
pub fn finish(self, digest: &mut [u8]) -> Result<(), Error> {
self.ctx.finish(digest).map_err(|_| Error::TLSStack)?;
self.ctx.finish(digest).map_err(|_| ErrorCode::TLSStack)?;
Ok(())
}
}
impl Debug for Sha256 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "Sha256")
}
}

View file

@ -15,9 +15,12 @@
* limitations under the License.
*/
use crate::error::Error;
use core::fmt::{self, Debug};
use super::CryptoKeyPair;
use crate::error::{Error, ErrorCode};
use crate::utils::rand::Rand;
use alloc::vec;
use foreign_types::ForeignTypeRef;
use log::error;
use openssl::asn1::Asn1Type;
@ -40,6 +43,9 @@ use openssl::x509::{X509NameBuilder, X509ReqBuilder, X509};
// problem while using OpenSSL's Signer
// TODO: Use proper OpenSSL method for this
use hmac::{Hmac, Mac};
extern crate alloc;
pub struct HmacSha256 {
ctx: Hmac<sha2::Sha256>,
}
@ -47,12 +53,14 @@ pub struct HmacSha256 {
impl HmacSha256 {
pub fn new(key: &[u8]) -> Result<Self, Error> {
Ok(Self {
ctx: Hmac::<sha2::Sha256>::new_from_slice(key).map_err(|_x| Error::InvalidKeyLength)?,
ctx: Hmac::<sha2::Sha256>::new_from_slice(key)
.map_err(|_x| ErrorCode::InvalidKeyLength)?,
})
}
pub fn update(&mut self, data: &[u8]) -> Result<(), Error> {
Ok(self.ctx.update(data))
self.ctx.update(data);
Ok(())
}
pub fn finish(self, out: &mut [u8]) -> Result<(), Error> {
@ -62,16 +70,18 @@ impl HmacSha256 {
}
}
#[derive(Debug)]
pub enum KeyType {
Public(EcKey<pkey::Public>),
Private(EcKey<pkey::Private>),
}
#[derive(Debug)]
pub struct KeyPair {
key: KeyType,
}
impl KeyPair {
pub fn new() -> Result<Self, Error> {
pub fn new(_rand: Rand) -> Result<Self, Error> {
let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1)?;
let key = EcKey::generate(&group)?;
Ok(Self {
@ -108,14 +118,12 @@ impl KeyPair {
fn private_key(&self) -> Result<&EcKey<Private>, Error> {
match &self.key {
KeyType::Public(_) => Err(Error::Invalid),
KeyType::Private(k) => Ok(&k),
KeyType::Public(_) => Err(ErrorCode::Invalid.into()),
KeyType::Private(k) => Ok(k),
}
}
}
impl CryptoKeyPair for KeyPair {
fn get_public_key(&self, pub_key: &mut [u8]) -> Result<usize, Error> {
pub fn get_public_key(&self, pub_key: &mut [u8]) -> Result<usize, Error> {
let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1)?;
let mut bn_ctx = BigNumContext::new()?;
let s = self.public_key_point().to_bytes(
@ -128,14 +136,14 @@ impl CryptoKeyPair for KeyPair {
Ok(len)
}
fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error> {
pub fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error> {
let s = self.private_key()?.private_key().to_vec();
let len = s.len();
priv_key[..len].copy_from_slice(s.as_slice());
Ok(len)
}
fn derive_secret(self, peer_pub_key: &[u8], secret: &mut [u8]) -> Result<usize, Error> {
pub fn derive_secret(self, peer_pub_key: &[u8], secret: &mut [u8]) -> Result<usize, Error> {
let self_pkey = PKey::from_ec_key(self.private_key()?.clone())?;
let group = EcGroup::from_curve_name(Nid::X9_62_PRIME256V1)?;
@ -149,7 +157,7 @@ impl CryptoKeyPair for KeyPair {
Ok(deriver.derive(secret)?)
}
fn get_csr<'a>(&self, out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
pub fn get_csr<'a>(&self, out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
let mut builder = X509ReqBuilder::new()?;
builder.set_version(0)?;
@ -170,18 +178,18 @@ impl CryptoKeyPair for KeyPair {
a.copy_from_slice(csr);
Ok(a)
} else {
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
}
}
fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error> {
pub fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error> {
// First get the SHA256 of the message
let mut h = Hasher::new(MessageDigest::sha256())?;
h.update(msg)?;
let msg = h.finish()?;
if signature.len() < super::EC_SIGNATURE_LEN_BYTES {
return Err(Error::NoSpace);
Err(ErrorCode::NoSpace)?;
}
safemem::write_bytes(signature, 0);
@ -193,7 +201,7 @@ impl CryptoKeyPair for KeyPair {
Ok(64)
}
fn verify_msg(&self, msg: &[u8], signature: &[u8]) -> Result<(), Error> {
pub fn verify_msg(&self, msg: &[u8], signature: &[u8]) -> Result<(), Error> {
// First get the SHA256 of the message
let mut h = Hasher::new(MessageDigest::sha256())?;
h.update(msg)?;
@ -208,11 +216,11 @@ impl CryptoKeyPair for KeyPair {
KeyType::Public(key) => key,
_ => {
error!("Not yet supported");
return Err(Error::Invalid);
return Err(ErrorCode::Invalid.into());
}
};
if !sig.verify(&msg, k)? {
Err(Error::InvalidSignature)
Err(ErrorCode::InvalidSignature.into())
} else {
Ok(())
}
@ -220,10 +228,10 @@ impl CryptoKeyPair for KeyPair {
}
const P256_KEY_LEN: usize = 256 / 8;
pub fn pubkey_from_der<'a>(der: &'a [u8], out_key: &mut [u8]) -> Result<(), Error> {
pub fn pubkey_from_der(der: &[u8], out_key: &mut [u8]) -> Result<(), Error> {
if out_key.len() != P256_KEY_LEN {
error!("Insufficient length");
Err(Error::NoSpace)
Err(ErrorCode::NoSpace.into())
} else {
let key = X509::from_der(der)?.public_key()?.public_key_to_der()?;
let len = key.len();
@ -235,7 +243,7 @@ pub fn pubkey_from_der<'a>(der: &'a [u8], out_key: &mut [u8]) -> Result<(), Erro
pub fn pbkdf2_hmac(pass: &[u8], iter: usize, salt: &[u8], key: &mut [u8]) -> Result<(), Error> {
openssl::pkcs5::pbkdf2_hmac(pass, salt, iter, MessageDigest::sha256(), key)
.map_err(|_e| Error::TLSStack)
.map_err(|_e| ErrorCode::TLSStack.into())
}
pub fn hkdf_sha256(salt: &[u8], ikm: &[u8], info: &[u8], key: &mut [u8]) -> Result<(), Error> {
@ -279,7 +287,7 @@ pub fn decrypt_in_place(
) -> Result<usize, Error> {
let tag_start = data.len() - super::AEAD_MIC_LEN_BYTES;
let (data, tag) = data.split_at_mut(tag_start);
let result = lowlevel_decrypt_aead(key, Some(nonce), ad, data, &tag)?;
let result = lowlevel_decrypt_aead(key, Some(nonce), ad, data, tag)?;
data[..result.len()].copy_from_slice(result.as_slice());
Ok(result.len())
}
@ -295,7 +303,7 @@ pub fn lowlevel_encrypt_aead(
aad: &[u8],
data: &[u8],
tag: &mut [u8],
) -> Result<Vec<u8>, ErrorStack> {
) -> Result<alloc::vec::Vec<u8>, ErrorStack> {
let t = symm::Cipher::aes_128_ccm();
let mut ctx = CipherCtx::new()?;
CipherCtxRef::encrypt_init(
@ -331,7 +339,7 @@ pub fn lowlevel_decrypt_aead(
aad: &[u8],
data: &[u8],
tag: &[u8],
) -> Result<Vec<u8>, ErrorStack> {
) -> Result<alloc::vec::Vec<u8>, ErrorStack> {
let t = symm::Cipher::aes_128_ccm();
let mut ctx = CipherCtx::new()?;
CipherCtxRef::decrypt_init(
@ -375,7 +383,9 @@ impl Sha256 {
}
pub fn update(&mut self, data: &[u8]) -> Result<(), Error> {
self.hasher.update(data).map_err(|_| Error::TLSStack)
self.hasher
.update(data)
.map_err(|_| ErrorCode::TLSStack.into())
}
pub fn finish(mut self, data: &mut [u8]) -> Result<(), Error> {
@ -384,3 +394,9 @@ impl Sha256 {
Ok(())
}
}
impl Debug for Sha256 {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "Sha256")
}
}

View file

@ -15,9 +15,10 @@
* limitations under the License.
*/
use std::convert::{TryFrom, TryInto};
use core::convert::{TryFrom, TryInto};
use aes::Aes128;
use alloc::vec;
use ccm::{
aead::generic_array::GenericArray,
consts::{U13, U16},
@ -33,20 +34,24 @@ use p256::{
use sha2::Digest;
use x509_cert::{
attr::AttributeType,
der::{asn1::BitString, Any, Encode},
der::{asn1::BitString, Any, Encode, Writer},
name::RdnSequence,
request::CertReq,
spki::{AlgorithmIdentifier, SubjectPublicKeyInfoOwned},
};
use crate::error::Error;
use super::CryptoKeyPair;
use crate::{
error::{Error, ErrorCode},
secure_channel::crypto_rustcrypto::RandRngCore,
utils::rand::Rand,
};
type HmacSha256I = hmac::Hmac<sha2::Sha256>;
type AesCcm = Ccm<Aes128, U16, U13>;
#[derive(Clone)]
extern crate alloc;
#[derive(Debug, Clone)]
pub struct Sha256 {
hasher: sha2::Sha256,
}
@ -79,7 +84,7 @@ impl HmacSha256 {
Ok(Self {
inner: HmacSha256I::new_from_slice(key).map_err(|e| {
error!("Error creating HmacSha256 {:?}", e);
Error::TLSStack
ErrorCode::TLSStack
})?,
})
}
@ -96,18 +101,20 @@ impl HmacSha256 {
}
}
#[derive(Debug)]
pub enum KeyType {
Private(SecretKey),
Public(PublicKey),
}
#[derive(Debug)]
pub struct KeyPair {
key: KeyType,
}
impl KeyPair {
pub fn new() -> Result<Self, Error> {
let mut rng = rand::thread_rng();
pub fn new(rand: Rand) -> Result<Self, Error> {
let mut rng = RandRngCore(rand);
let secret_key = SecretKey::random(&mut rng);
Ok(Self {
@ -143,13 +150,11 @@ impl KeyPair {
fn private_key(&self) -> Result<&SecretKey, Error> {
match &self.key {
KeyType::Private(key) => Ok(key),
KeyType::Public(_) => Err(Error::Crypto),
KeyType::Public(_) => Err(ErrorCode::Crypto.into()),
}
}
}
impl CryptoKeyPair for KeyPair {
fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error> {
pub fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error> {
match &self.key {
KeyType::Private(key) => {
let bytes = key.to_bytes();
@ -158,10 +163,10 @@ impl CryptoKeyPair for KeyPair {
priv_key[..slice.len()].copy_from_slice(slice);
Ok(len)
}
KeyType::Public(_) => Err(Error::Crypto),
KeyType::Public(_) => Err(ErrorCode::Crypto.into()),
}
}
fn get_csr<'a>(&self, out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
pub fn get_csr<'a>(&self, out_csr: &'a mut [u8]) -> Result<&'a [u8], Error> {
use p256::ecdsa::signature::Signer;
let subject = RdnSequence(vec![x509_cert::name::RelativeDistinguishedName(
@ -200,7 +205,7 @@ impl CryptoKeyPair for KeyPair {
attributes: Default::default(),
};
let mut message = vec![];
info.encode(&mut message).unwrap();
info.encode(&mut VecWriter(&mut message)).unwrap();
// Can't use self.sign_msg as the signature has to be in DER format
let private_key = self.private_key()?;
@ -224,14 +229,14 @@ impl CryptoKeyPair for KeyPair {
Ok(a)
}
fn get_public_key(&self, pub_key: &mut [u8]) -> Result<usize, Error> {
pub fn get_public_key(&self, pub_key: &mut [u8]) -> Result<usize, Error> {
let point = self.public_key_point().to_encoded_point(false);
let bytes = point.as_bytes();
let len = bytes.len();
pub_key[..len].copy_from_slice(bytes);
Ok(len)
}
fn derive_secret(self, peer_pub_key: &[u8], secret: &mut [u8]) -> Result<usize, Error> {
pub fn derive_secret(self, peer_pub_key: &[u8], secret: &mut [u8]) -> Result<usize, Error> {
let encoded_point = EncodedPoint::from_bytes(peer_pub_key).unwrap();
let peer_pubkey = PublicKey::from_encoded_point(&encoded_point).unwrap();
let private_key = self.private_key()?;
@ -247,11 +252,11 @@ impl CryptoKeyPair for KeyPair {
Ok(len)
}
fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error> {
pub fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error> {
use p256::ecdsa::signature::Signer;
if signature.len() < super::EC_SIGNATURE_LEN_BYTES {
return Err(Error::NoSpace);
return Err(ErrorCode::NoSpace.into());
}
match &self.key {
@ -266,7 +271,7 @@ impl CryptoKeyPair for KeyPair {
KeyType::Public(_) => todo!(),
}
}
fn verify_msg(&self, msg: &[u8], signature: &[u8]) -> Result<(), Error> {
pub fn verify_msg(&self, msg: &[u8], signature: &[u8]) -> Result<(), Error> {
use p256::ecdsa::signature::Verifier;
let verifying_key = VerifyingKey::from_affine(self.public_key_point()).unwrap();
@ -274,7 +279,7 @@ impl CryptoKeyPair for KeyPair {
verifying_key
.verify(msg, &signature)
.map_err(|_| Error::InvalidSignature)?;
.map_err(|_| ErrorCode::InvalidSignature)?;
Ok(())
}
@ -291,7 +296,7 @@ pub fn hkdf_sha256(salt: &[u8], ikm: &[u8], info: &[u8], key: &mut [u8]) -> Resu
.expand(info, key)
.map_err(|e| {
error!("Error with hkdf_sha256 {:?}", e);
Error::TLSStack
ErrorCode::TLSStack.into()
})
}
@ -370,3 +375,13 @@ impl<'a> ccm::aead::Buffer for SliceBuffer<'a> {
self.len = len;
}
}
struct VecWriter<'a>(&'a mut alloc::vec::Vec<u8>);
impl<'a> Writer for VecWriter<'a> {
fn write(&mut self, slice: &[u8]) -> x509_cert::der::Result<()> {
self.0.extend_from_slice(slice);
Ok(())
}
}

View file

@ -14,8 +14,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::error::Error;
use crate::{
error::{Error, ErrorCode},
tlv::{FromTLV, TLVWriter, TagType, ToTLV},
};
pub const SYMM_KEY_LEN_BITS: usize = 128;
pub const SYMM_KEY_LEN_BYTES: usize = SYMM_KEY_LEN_BITS / 8;
@ -35,43 +37,70 @@ pub const ECDH_SHARED_SECRET_LEN_BYTES: usize = 32;
pub const EC_SIGNATURE_LEN_BYTES: usize = 64;
// APIs particular to a KeyPair so a KeyPair object can be defined
pub trait CryptoKeyPair {
fn get_csr<'a>(&self, csr: &'a mut [u8]) -> Result<&'a [u8], Error>;
fn get_public_key(&self, pub_key: &mut [u8]) -> Result<usize, Error>;
fn get_private_key(&self, priv_key: &mut [u8]) -> Result<usize, Error>;
fn derive_secret(self, peer_pub_key: &[u8], secret: &mut [u8]) -> Result<usize, Error>;
fn sign_msg(&self, msg: &[u8], signature: &mut [u8]) -> Result<usize, Error>;
fn verify_msg(&self, msg: &[u8], signature: &[u8]) -> Result<(), Error>;
}
#[cfg(feature = "crypto_esp_mbedtls")]
#[cfg(all(feature = "mbedtls", target_os = "espidf"))]
mod crypto_esp_mbedtls;
#[cfg(feature = "crypto_esp_mbedtls")]
#[cfg(all(feature = "mbedtls", target_os = "espidf"))]
pub use self::crypto_esp_mbedtls::*;
#[cfg(feature = "crypto_mbedtls")]
#[cfg(all(feature = "mbedtls", not(target_os = "espidf")))]
mod crypto_mbedtls;
#[cfg(feature = "crypto_mbedtls")]
#[cfg(all(feature = "mbedtls", not(target_os = "espidf")))]
pub use self::crypto_mbedtls::*;
#[cfg(feature = "crypto_openssl")]
#[cfg(feature = "openssl")]
mod crypto_openssl;
#[cfg(feature = "crypto_openssl")]
#[cfg(feature = "openssl")]
pub use self::crypto_openssl::*;
#[cfg(feature = "crypto_rustcrypto")]
#[cfg(feature = "rustcrypto")]
mod crypto_rustcrypto;
#[cfg(feature = "crypto_rustcrypto")]
#[cfg(feature = "rustcrypto")]
pub use self::crypto_rustcrypto::*;
#[cfg(not(any(feature = "openssl", feature = "mbedtls", feature = "rustcrypto")))]
pub mod crypto_dummy;
#[cfg(not(any(feature = "openssl", feature = "mbedtls", feature = "rustcrypto")))]
pub use self::crypto_dummy::*;
impl<'a> FromTLV<'a> for KeyPair {
fn from_tlv(t: &crate::tlv::TLVElement<'a>) -> Result<Self, Error>
where
Self: Sized,
{
t.confirm_array()?.enter();
if let Some(mut array) = t.enter() {
let pub_key = array.next().ok_or(ErrorCode::Invalid)?.slice()?;
let priv_key = array.next().ok_or(ErrorCode::Invalid)?.slice()?;
KeyPair::new_from_components(pub_key, priv_key)
} else {
Err(ErrorCode::Invalid.into())
}
}
}
impl ToTLV for KeyPair {
fn to_tlv(&self, tw: &mut TLVWriter, tag: TagType) -> Result<(), Error> {
let mut buf = [0; 1024]; // TODO
tw.start_array(tag)?;
let size = self.get_public_key(&mut buf)?;
tw.str16(TagType::Anonymous, &buf[..size])?;
let size = self.get_private_key(&mut buf)?;
tw.str16(TagType::Anonymous, &buf[..size])?;
tw.end_container()
}
}
#[cfg(test)]
mod tests {
use crate::error::Error;
use crate::error::ErrorCode;
use super::{CryptoKeyPair, KeyPair};
use super::KeyPair;
#[test]
fn test_verify_msg_success() {
@ -83,8 +112,9 @@ mod tests {
fn test_verify_msg_fail() {
let key = KeyPair::new_from_public(&test_vectors::PUB_KEY1).unwrap();
assert_eq!(
key.verify_msg(&test_vectors::MSG1_FAIL, &test_vectors::SIGNATURE1),
Err(Error::InvalidSignature)
key.verify_msg(&test_vectors::MSG1_FAIL, &test_vectors::SIGNATURE1)
.map_err(|e| e.code()),
Err(ErrorCode::InvalidSignature)
);
}

View file

@ -0,0 +1,212 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use core::{cell::RefCell, convert::TryInto};
use super::objects::*;
use crate::{
attribute_enum,
error::{Error, ErrorCode},
utils::rand::Rand,
};
use heapless::String;
use strum::FromRepr;
pub const ID: u32 = 0x0028;
#[derive(Clone, Copy, Debug, FromRepr)]
#[repr(u16)]
pub enum Attributes {
DMRevision(AttrType<u8>) = 0,
VendorName(AttrUtfType) = 1,
VendorId(AttrType<u16>) = 2,
ProductName(AttrUtfType) = 3,
ProductId(AttrType<u16>) = 4,
NodeLabel(AttrUtfType) = 5,
HwVer(AttrType<u16>) = 7,
SwVer(AttrType<u32>) = 9,
SwVerString(AttrUtfType) = 0xa,
SerialNo(AttrUtfType) = 0x0f,
}
attribute_enum!(Attributes);
pub enum AttributesDiscriminants {
DMRevision = 0,
VendorName = 1,
VendorId = 2,
ProductName = 3,
ProductId = 4,
NodeLabel = 5,
HwVer = 7,
SwVer = 9,
SwVerString = 0xa,
SerialNo = 0x0f,
}
#[derive(Default)]
pub struct BasicInfoConfig<'a> {
pub vid: u16,
pub pid: u16,
pub hw_ver: u16,
pub sw_ver: u32,
pub sw_ver_str: &'a str,
pub serial_no: &'a str,
/// Device name; up to 32 characters
pub device_name: &'a str,
pub vendor_name: &'a str,
pub product_name: &'a str,
}
pub const CLUSTER: Cluster<'static> = Cluster {
id: ID as _,
feature_map: 0,
attributes: &[
FEATURE_MAP,
ATTRIBUTE_LIST,
Attribute::new(
AttributesDiscriminants::DMRevision as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::VendorName as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::VendorId as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::ProductName as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::ProductId as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::NodeLabel as u16,
Access::RWVM,
Quality::N,
),
Attribute::new(
AttributesDiscriminants::HwVer as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::SwVer as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::SwVerString as u16,
Access::RV,
Quality::FIXED,
),
Attribute::new(
AttributesDiscriminants::SerialNo as u16,
Access::RV,
Quality::FIXED,
),
],
commands: &[],
};
pub struct BasicInfoCluster<'a> {
data_ver: Dataver,
cfg: &'a BasicInfoConfig<'a>,
node_label: RefCell<String<32>>, // Max node-label as per the spec
}
impl<'a> BasicInfoCluster<'a> {
pub fn new(cfg: &'a BasicInfoConfig<'a>, rand: Rand) -> Self {
let node_label = RefCell::new(String::from(""));
Self {
data_ver: Dataver::new(rand),
cfg,
node_label,
}
}
pub fn read(&self, attr: &AttrDetails, encoder: AttrDataEncoder) -> Result<(), Error> {
if let Some(writer) = encoder.with_dataver(self.data_ver.get())? {
if attr.is_system() {
CLUSTER.read(attr.attr_id, writer)
} else {
match attr.attr_id.try_into()? {
Attributes::DMRevision(codec) => codec.encode(writer, 1),
Attributes::VendorName(codec) => codec.encode(writer, self.cfg.vendor_name),
Attributes::VendorId(codec) => codec.encode(writer, self.cfg.vid),
Attributes::ProductName(codec) => codec.encode(writer, self.cfg.product_name),
Attributes::ProductId(codec) => codec.encode(writer, self.cfg.pid),
Attributes::NodeLabel(codec) => {
codec.encode(writer, self.node_label.borrow().as_str())
}
Attributes::HwVer(codec) => codec.encode(writer, self.cfg.hw_ver),
Attributes::SwVer(codec) => codec.encode(writer, self.cfg.sw_ver),
Attributes::SwVerString(codec) => codec.encode(writer, self.cfg.sw_ver_str),
Attributes::SerialNo(codec) => codec.encode(writer, self.cfg.serial_no),
}
}
} else {
Ok(())
}
}
pub fn write(&self, attr: &AttrDetails, data: AttrData) -> Result<(), Error> {
let data = data.with_dataver(self.data_ver.get())?;
match attr.attr_id.try_into()? {
Attributes::NodeLabel(codec) => {
*self.node_label.borrow_mut() = String::from(
codec
.decode(data)
.map_err(|_| Error::new(ErrorCode::InvalidAction))?,
);
}
_ => return Err(Error::new(ErrorCode::InvalidAction)),
}
self.data_ver.changed();
Ok(())
}
}
impl<'a> Handler for BasicInfoCluster<'a> {
fn read(&self, attr: &AttrDetails, encoder: AttrDataEncoder) -> Result<(), Error> {
BasicInfoCluster::read(self, attr, encoder)
}
fn write(&self, attr: &AttrDetails, data: AttrData) -> Result<(), Error> {
BasicInfoCluster::write(self, attr, data)
}
}
impl<'a> NonBlockingHandler for BasicInfoCluster<'a> {}
impl<'a> ChangeNotifier<()> for BasicInfoCluster<'a> {
fn consume_change(&mut self) -> Option<()> {
self.data_ver.consume_change(())
}
}

View file

@ -0,0 +1,168 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use core::{cell::Cell, convert::TryInto};
use super::objects::*;
use crate::{
attribute_enum, cmd_enter, command_enum, error::Error, tlv::TLVElement,
transport::exchange::Exchange, utils::rand::Rand,
};
use log::info;
use strum::{EnumDiscriminants, FromRepr};
pub const ID: u32 = 0x0006;
#[derive(FromRepr, EnumDiscriminants)]
#[repr(u16)]
pub enum Attributes {
OnOff(AttrType<bool>) = 0x0,
}
attribute_enum!(Attributes);
#[derive(FromRepr, EnumDiscriminants)]
#[repr(u32)]
pub enum Commands {
Off = 0x0,
On = 0x01,
Toggle = 0x02,
}
command_enum!(Commands);
pub const CLUSTER: Cluster<'static> = Cluster {
id: ID as _,
feature_map: 0,
attributes: &[
FEATURE_MAP,
ATTRIBUTE_LIST,
Attribute::new(
AttributesDiscriminants::OnOff as u16,
Access::RV,
Quality::SN,
),
],
commands: &[
CommandsDiscriminants::Off as _,
CommandsDiscriminants::On as _,
CommandsDiscriminants::Toggle as _,
],
};
pub struct OnOffCluster {
data_ver: Dataver,
on: Cell<bool>,
}
impl OnOffCluster {
pub fn new(rand: Rand) -> Self {
Self {
data_ver: Dataver::new(rand),
on: Cell::new(false),
}
}
pub fn set(&self, on: bool) {
if self.on.get() != on {
self.on.set(on);
self.data_ver.changed();
}
}
pub fn read(&self, attr: &AttrDetails, encoder: AttrDataEncoder) -> Result<(), Error> {
if let Some(writer) = encoder.with_dataver(self.data_ver.get())? {
if attr.is_system() {
CLUSTER.read(attr.attr_id, writer)
} else {
match attr.attr_id.try_into()? {
Attributes::OnOff(codec) => codec.encode(writer, self.on.get()),
}
}
} else {
Ok(())
}
}
pub fn write(&self, attr: &AttrDetails, data: AttrData) -> Result<(), Error> {
let data = data.with_dataver(self.data_ver.get())?;
match attr.attr_id.try_into()? {
Attributes::OnOff(codec) => self.set(codec.decode(data)?),
}
self.data_ver.changed();
Ok(())
}
pub fn invoke(
&self,
_exchange: &Exchange,
cmd: &CmdDetails,
_data: &TLVElement,
_encoder: CmdDataEncoder,
) -> Result<(), Error> {
match cmd.cmd_id.try_into()? {
Commands::Off => {
cmd_enter!("Off");
self.set(false);
}
Commands::On => {
cmd_enter!("On");
self.set(true);
}
Commands::Toggle => {
cmd_enter!("Toggle");
self.set(!self.on.get());
}
}
self.data_ver.changed();
Ok(())
}
}
impl Handler for OnOffCluster {
fn read(&self, attr: &AttrDetails, encoder: AttrDataEncoder) -> Result<(), Error> {
OnOffCluster::read(self, attr, encoder)
}
fn write(&self, attr: &AttrDetails, data: AttrData) -> Result<(), Error> {
OnOffCluster::write(self, attr, data)
}
fn invoke(
&self,
exchange: &Exchange,
cmd: &CmdDetails,
data: &TLVElement,
encoder: CmdDataEncoder,
) -> Result<(), Error> {
OnOffCluster::invoke(self, exchange, cmd, data, encoder)
}
}
// TODO: Might be removed once the `on` member is externalized
impl NonBlockingHandler for OnOffCluster {}
impl ChangeNotifier<()> for OnOffCluster {
fn consume_change(&mut self) -> Option<()> {
self.data_ver.consume_change(())
}
}

View file

@ -0,0 +1,74 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
data_model::objects::{Cluster, Handler},
error::{Error, ErrorCode},
utils::rand::Rand,
};
use super::objects::{
AttrDataEncoder, AttrDetails, ChangeNotifier, Dataver, NonBlockingHandler, ATTRIBUTE_LIST,
FEATURE_MAP,
};
const CLUSTER_NETWORK_COMMISSIONING_ID: u32 = 0x0031;
pub const CLUSTER: Cluster<'static> = Cluster {
id: CLUSTER_NETWORK_COMMISSIONING_ID as _,
feature_map: 0,
attributes: &[FEATURE_MAP, ATTRIBUTE_LIST],
commands: &[],
};
pub struct TemplateCluster {
data_ver: Dataver,
}
impl TemplateCluster {
pub fn new(rand: Rand) -> Self {
Self {
data_ver: Dataver::new(rand),
}
}
pub fn read(&self, attr: &AttrDetails, encoder: AttrDataEncoder) -> Result<(), Error> {
if let Some(writer) = encoder.with_dataver(self.data_ver.get())? {
if attr.is_system() {
CLUSTER.read(attr.attr_id, writer)
} else {
Err(ErrorCode::AttributeNotFound.into())
}
} else {
Ok(())
}
}
}
impl Handler for TemplateCluster {
fn read(&self, attr: &AttrDetails, encoder: AttrDataEncoder) -> Result<(), Error> {
TemplateCluster::read(self, attr, encoder)
}
}
impl NonBlockingHandler for TemplateCluster {}
impl ChangeNotifier<()> for TemplateCluster {
fn consume_change(&mut self) -> Option<()> {
self.data_ver.consume_change(())
}
}

View file

@ -0,0 +1,160 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use portable_atomic::{AtomicU32, Ordering};
use super::objects::*;
use crate::{
alloc,
error::*,
interaction_model::core::Interaction,
transport::{exchange::Exchange, packet::Packet},
};
// TODO: For now...
static SUBS_ID: AtomicU32 = AtomicU32::new(1);
/// The Maximum number of expanded writer request per transaction
///
/// The write requests are first wildcard-expanded, and these many number of
/// write requests per-transaction will be supported.
pub const MAX_WRITE_ATTRS_IN_ONE_TRANS: usize = 7;
pub struct DataModel<T>(T);
impl<T> DataModel<T> {
pub fn new(handler: T) -> Self {
Self(handler)
}
pub async fn handle<'r, 'p>(
&self,
exchange: &'r mut Exchange<'_>,
rx: &'r mut Packet<'p>,
tx: &'r mut Packet<'p>,
rx_status: &'r mut Packet<'p>,
) -> Result<(), Error>
where
T: DataModelHandler,
{
let timeout = Interaction::timeout(exchange, rx, tx).await?;
let mut interaction = alloc!(Interaction::new(
exchange,
rx,
tx,
rx_status,
|| SUBS_ID.fetch_add(1, Ordering::SeqCst),
timeout,
)?);
#[cfg(feature = "alloc")]
let interaction = &mut *interaction;
#[cfg(not(feature = "alloc"))]
let interaction = &mut interaction;
#[cfg(feature = "nightly")]
let metadata = self.0.lock().await;
#[cfg(not(feature = "nightly"))]
let metadata = self.0.lock();
if interaction.start().await? {
match interaction {
Interaction::Read {
req,
ref mut driver,
} => {
let accessor = driver.accessor()?;
'outer: for item in metadata.node().read(req, None, &accessor) {
while !AttrDataEncoder::handle_read(&item, &self.0, &mut driver.writer()?)
.await?
{
if !driver.send_chunk(req).await? {
break 'outer;
}
}
}
driver.complete(req).await?;
}
Interaction::Write {
req,
ref mut driver,
} => {
let accessor = driver.accessor()?;
// The spec expects that a single write request like DeleteList + AddItem
// should cause all ACLs of that fabric to be deleted and the new one to be added (Case 1).
//
// This is in conflict with the immediate-effect expectation of ACL: an ACL
// write should instantaneously update the ACL so that immediate next WriteAttribute
// *in the same WriteRequest* should see that effect (Case 2).
//
// As with the C++ SDK, here we do all the ACLs checks first, before any write begins.
// Thus we support the Case1 by doing this. It does come at the cost of maintaining an
// additional list of expanded write requests as we start processing those.
let node = metadata.node();
let write_attrs: heapless::Vec<_, MAX_WRITE_ATTRS_IN_ONE_TRANS> =
node.write(req, &accessor).collect();
for item in write_attrs {
AttrDataEncoder::handle_write(&item, &self.0, &mut driver.writer()?)
.await?;
}
driver.complete(req).await?;
}
Interaction::Invoke {
req,
ref mut driver,
} => {
let accessor = driver.accessor()?;
for item in metadata.node().invoke(req, &accessor) {
let (mut tw, exchange) = driver.writer_exchange()?;
CmdDataEncoder::handle(&item, &self.0, &mut tw, exchange).await?;
}
driver.complete(req).await?;
}
Interaction::Subscribe {
req,
ref mut driver,
} => {
let accessor = driver.accessor()?;
'outer: for item in metadata.node().subscribing_read(req, None, &accessor) {
while !AttrDataEncoder::handle_read(&item, &self.0, &mut driver.writer()?)
.await?
{
if !driver.send_chunk(req).await? {
break 'outer;
}
}
}
driver.complete(req).await?;
}
}
}
Ok(())
}
}

View file

@ -15,4 +15,19 @@
* limitations under the License.
*/
pub mod dev_att;
use super::objects::DeviceType;
pub const DEV_TYPE_ROOT_NODE: DeviceType = DeviceType {
dtype: 0x0016,
drev: 1,
};
pub const DEV_TYPE_ON_OFF_LIGHT: DeviceType = DeviceType {
dtype: 0x0100,
drev: 2,
};
pub const DEV_TYPE_ON_SMART_SPEAKER: DeviceType = DeviceType {
dtype: 0x0022,
drev: 2,
};

View file

@ -20,8 +20,9 @@ pub mod device_types;
pub mod objects;
pub mod cluster_basic_information;
pub mod cluster_media_playback;
// TODO pub mod cluster_media_playback;
pub mod cluster_on_off;
pub mod cluster_template;
pub mod root_endpoint;
pub mod sdm;
pub mod system_model;

View file

@ -14,16 +14,13 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#![allow(clippy::bad_bit_mask)]
use super::{AttrId, GlobalElements, Privilege};
use crate::{
error::*,
// TODO: This layer shouldn't really depend on the TLV layer, should create an abstraction layer
tlv::{TLVElement, TLVWriter, TagType, ToTLV},
};
use crate::data_model::objects::GlobalElements;
use super::{AttrId, Privilege};
use bitflags::bitflags;
use log::error;
use std::fmt::{self, Debug, Formatter};
use core::fmt::{self, Debug};
bitflags! {
#[derive(Default)]
@ -43,9 +40,12 @@ bitflags! {
const READ_PRIVILEGE_MASK = Self::NEED_VIEW.bits | Self::NEED_MANAGE.bits | Self::NEED_OPERATE.bits | Self::NEED_ADMIN.bits;
const WRITE_PRIVILEGE_MASK = Self::NEED_MANAGE.bits | Self::NEED_OPERATE.bits | Self::NEED_ADMIN.bits;
const RV = Self::READ.bits | Self::NEED_VIEW.bits;
const RF = Self::READ.bits | Self::FAB_SCOPED.bits;
const RA = Self::READ.bits | Self::NEED_ADMIN.bits;
const RWVA = Self::READ.bits | Self::WRITE.bits | Self::NEED_VIEW.bits | Self::NEED_ADMIN.bits;
const RWFA = Self::READ.bits | Self::WRITE.bits | Self::FAB_SCOPED.bits | Self::NEED_ADMIN.bits;
const RWVM = Self::READ.bits | Self::WRITE.bits | Self::NEED_VIEW.bits | Self::NEED_MANAGE.bits;
const RWFVM = Self::READ.bits | Self::WRITE.bits | Self::FAB_SCOPED.bits |Self::NEED_VIEW.bits | Self::NEED_MANAGE.bits;
}
}
@ -76,117 +76,37 @@ bitflags! {
#[derive(Default)]
pub struct Quality: u8 {
const NONE = 0x00;
const SCENE = 0x01;
const PERSISTENT = 0x02;
const FIXED = 0x03;
const NULLABLE = 0x04;
}
}
const SCENE = 0x01; // Short: S
const PERSISTENT = 0x02; // Short: N
const FIXED = 0x04; // Short: F
const NULLABLE = 0x08; // Short: X
/* This file needs some major revamp.
* - instead of allocating all over the heap, we should use some kind of slab/block allocator
* - instead of arrays, can use linked-lists to conserve space and avoid the internal fragmentation
*/
#[derive(PartialEq, PartialOrd, Clone)]
pub enum AttrValue {
Int64(i64),
Uint8(u8),
Uint16(u16),
Uint32(u32),
Uint64(u64),
Bool(bool),
Utf8(String),
Custom,
}
impl Debug for AttrValue {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
match &self {
AttrValue::Int64(v) => write!(f, "{:?}", *v),
AttrValue::Uint8(v) => write!(f, "{:?}", *v),
AttrValue::Uint16(v) => write!(f, "{:?}", *v),
AttrValue::Uint32(v) => write!(f, "{:?}", *v),
AttrValue::Uint64(v) => write!(f, "{:?}", *v),
AttrValue::Bool(v) => write!(f, "{:?}", *v),
AttrValue::Utf8(v) => write!(f, "{:?}", *v),
AttrValue::Custom => write!(f, "custom-attribute"),
}?;
Ok(())
}
}
impl ToTLV for AttrValue {
fn to_tlv(&self, tw: &mut TLVWriter, tag_type: TagType) -> Result<(), Error> {
// What is the time complexity of such long match statements?
match self {
AttrValue::Bool(v) => tw.bool(tag_type, *v),
AttrValue::Uint8(v) => tw.u8(tag_type, *v),
AttrValue::Uint16(v) => tw.u16(tag_type, *v),
AttrValue::Uint32(v) => tw.u32(tag_type, *v),
AttrValue::Uint64(v) => tw.u64(tag_type, *v),
AttrValue::Utf8(v) => tw.utf8(tag_type, v.as_bytes()),
_ => {
error!("Attribute type not yet supported");
Err(Error::AttributeNotFound)
}
}
}
}
impl AttrValue {
pub fn update_from_tlv(&mut self, tr: &TLVElement) -> Result<(), Error> {
match self {
AttrValue::Bool(v) => *v = tr.bool()?,
AttrValue::Uint8(v) => *v = tr.u8()?,
AttrValue::Uint16(v) => *v = tr.u16()?,
AttrValue::Uint32(v) => *v = tr.u32()?,
AttrValue::Uint64(v) => *v = tr.u64()?,
_ => {
error!("Attribute type not yet supported");
return Err(Error::AttributeNotFound);
}
}
Ok(())
const SN = Self::SCENE.bits | Self::PERSISTENT.bits;
const S = Self::SCENE.bits;
const N = Self::PERSISTENT.bits;
const F = Self::FIXED.bits;
const X = Self::NULLABLE.bits;
}
}
#[derive(Debug, Clone)]
pub struct Attribute {
pub(super) id: AttrId,
pub(super) value: AttrValue,
pub(super) quality: Quality,
pub(super) access: Access,
}
impl Default for Attribute {
fn default() -> Attribute {
Attribute {
id: 0,
value: AttrValue::Bool(true),
quality: Default::default(),
access: Default::default(),
}
}
pub id: AttrId,
pub quality: Quality,
pub access: Access,
}
impl Attribute {
pub fn new(id: AttrId, value: AttrValue, access: Access, quality: Quality) -> Self {
Attribute {
pub const fn new(id: AttrId, access: Access, quality: Quality) -> Self {
Self {
id,
value,
access,
quality,
}
}
pub fn set_value(&mut self, value: AttrValue) -> Result<(), Error> {
if !self.quality.contains(Quality::FIXED) {
self.value = value;
Ok(())
} else {
Err(Error::Invalid)
}
pub fn is_system(&self) -> bool {
Self::is_system_attr(self.id)
}
pub fn is_system_attr(attr_id: AttrId) -> bool {
@ -194,9 +114,9 @@ impl Attribute {
}
}
impl std::fmt::Display for Attribute {
impl core::fmt::Display for Attribute {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}: {:?}", self.id, self.value)
write!(f, "{}", self.id)
}
}

View file

@ -0,0 +1,349 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use log::error;
use strum::FromRepr;
use crate::{
acl::{AccessReq, Accessor},
attribute_enum,
data_model::objects::*,
error::{Error, ErrorCode},
interaction_model::{
core::IMStatusCode,
messages::{
ib::{AttrPath, AttrStatus, CmdPath, CmdStatus},
GenericPath,
},
},
// TODO: This layer shouldn't really depend on the TLV layer, should create an abstraction layer
tlv::{Nullable, TLVWriter, TagType},
};
use core::{
convert::TryInto,
fmt::{self, Debug},
};
#[derive(Clone, Copy, Debug, Eq, PartialEq, FromRepr)]
#[repr(u16)]
pub enum GlobalElements {
_ClusterRevision = 0xFFFD,
FeatureMap = 0xFFFC,
AttributeList = 0xFFFB,
_EventList = 0xFFFA,
_ClientGenCmd = 0xFFF9,
ServerGenCmd = 0xFFF8,
FabricIndex = 0xFE,
}
attribute_enum!(GlobalElements);
pub const FEATURE_MAP: Attribute =
Attribute::new(GlobalElements::FeatureMap as _, Access::RV, Quality::NONE);
pub const ATTRIBUTE_LIST: Attribute = Attribute::new(
GlobalElements::AttributeList as _,
Access::RV,
Quality::NONE,
);
// TODO: What if we instead of creating this, we just pass the AttrData/AttrPath to the read/write
// methods?
/// The Attribute Details structure records the details about the attribute under consideration.
#[derive(Debug)]
pub struct AttrDetails<'a> {
pub node: &'a Node<'a>,
/// The actual endpoint ID
pub endpoint_id: EndptId,
/// The actual cluster ID
pub cluster_id: ClusterId,
/// The actual attribute ID
pub attr_id: AttrId,
/// List Index, if any
pub list_index: Option<Nullable<u16>>,
/// The current Fabric Index
pub fab_idx: u8,
/// Fabric Filtering Activated
pub fab_filter: bool,
pub dataver: Option<u32>,
pub wildcard: bool,
}
impl<'a> AttrDetails<'a> {
pub fn is_system(&self) -> bool {
Attribute::is_system_attr(self.attr_id)
}
pub fn path(&self) -> AttrPath {
AttrPath {
endpoint: Some(self.endpoint_id),
cluster: Some(self.cluster_id),
attr: Some(self.attr_id),
list_index: self.list_index,
..Default::default()
}
}
pub fn status(&self, status: IMStatusCode) -> Result<Option<AttrStatus>, Error> {
if self.should_report(status) {
Ok(Some(AttrStatus::new(
&GenericPath {
endpoint: Some(self.endpoint_id),
cluster: Some(self.cluster_id),
leaf: Some(self.attr_id as _),
},
status,
0,
)))
} else {
Ok(None)
}
}
fn should_report(&self, status: IMStatusCode) -> bool {
!self.wildcard
|| !matches!(
status,
IMStatusCode::UnsupportedEndpoint
| IMStatusCode::UnsupportedCluster
| IMStatusCode::UnsupportedAttribute
| IMStatusCode::UnsupportedCommand
| IMStatusCode::UnsupportedAccess
| IMStatusCode::UnsupportedRead
| IMStatusCode::UnsupportedWrite
| IMStatusCode::DataVersionMismatch
)
}
}
#[derive(Debug)]
pub struct CmdDetails<'a> {
pub node: &'a Node<'a>,
pub endpoint_id: EndptId,
pub cluster_id: ClusterId,
pub cmd_id: CmdId,
pub wildcard: bool,
}
impl<'a> CmdDetails<'a> {
pub fn path(&self) -> CmdPath {
CmdPath::new(
Some(self.endpoint_id),
Some(self.cluster_id),
Some(self.cmd_id),
)
}
pub fn success(&self, tracker: &CmdDataTracker) -> Option<CmdStatus> {
if tracker.needs_status() {
self.status(IMStatusCode::Success)
} else {
None
}
}
pub fn status(&self, status: IMStatusCode) -> Option<CmdStatus> {
if self.should_report(status) {
Some(CmdStatus::new(
CmdPath::new(
Some(self.endpoint_id),
Some(self.cluster_id),
Some(self.cmd_id),
),
status,
0,
))
} else {
None
}
}
fn should_report(&self, status: IMStatusCode) -> bool {
!self.wildcard
|| !matches!(
status,
IMStatusCode::UnsupportedEndpoint
| IMStatusCode::UnsupportedCluster
| IMStatusCode::UnsupportedAttribute
| IMStatusCode::UnsupportedCommand
| IMStatusCode::UnsupportedAccess
| IMStatusCode::UnsupportedRead
| IMStatusCode::UnsupportedWrite
)
}
}
#[derive(Debug, Clone)]
pub struct Cluster<'a> {
pub id: ClusterId,
pub feature_map: u32,
pub attributes: &'a [Attribute],
pub commands: &'a [CmdId],
}
impl<'a> Cluster<'a> {
pub const fn new(
id: ClusterId,
feature_map: u32,
attributes: &'a [Attribute],
commands: &'a [CmdId],
) -> Self {
Self {
id,
feature_map,
attributes,
commands,
}
}
pub fn match_attributes(
&self,
attr: Option<AttrId>,
) -> impl Iterator<Item = &'_ Attribute> + '_ {
self.attributes
.iter()
.filter(move |attribute| attr.map(|attr| attr == attribute.id).unwrap_or(true))
}
pub fn match_commands(&self, cmd: Option<CmdId>) -> impl Iterator<Item = CmdId> + '_ {
self.commands
.iter()
.filter(move |id| cmd.map(|cmd| **id == cmd).unwrap_or(true))
.copied()
}
pub fn check_attribute(
&self,
accessor: &Accessor,
ep: EndptId,
attr: AttrId,
write: bool,
) -> Result<(), IMStatusCode> {
let attribute = self
.attributes
.iter()
.find(|attribute| attribute.id == attr)
.ok_or(IMStatusCode::UnsupportedAttribute)?;
Self::check_attr_access(
accessor,
GenericPath::new(Some(ep), Some(self.id), Some(attr as _)),
write,
attribute.access,
)
}
pub fn check_command(
&self,
accessor: &Accessor,
ep: EndptId,
cmd: CmdId,
) -> Result<(), IMStatusCode> {
self.commands
.iter()
.find(|id| **id == cmd)
.ok_or(IMStatusCode::UnsupportedCommand)?;
Self::check_cmd_access(
accessor,
GenericPath::new(Some(ep), Some(self.id), Some(cmd)),
)
}
pub(crate) fn check_attr_access(
accessor: &Accessor,
path: GenericPath,
write: bool,
target_perms: Access,
) -> Result<(), IMStatusCode> {
let mut access_req = AccessReq::new(
accessor,
path,
if write { Access::WRITE } else { Access::READ },
);
if !target_perms.contains(access_req.operation()) {
Err(if matches!(access_req.operation(), Access::WRITE) {
IMStatusCode::UnsupportedWrite
} else {
IMStatusCode::UnsupportedRead
})?;
}
access_req.set_target_perms(target_perms);
if access_req.allow() {
Ok(())
} else {
Err(IMStatusCode::UnsupportedAccess)
}
}
pub(crate) fn check_cmd_access(
accessor: &Accessor,
path: GenericPath,
) -> Result<(), IMStatusCode> {
let mut access_req = AccessReq::new(accessor, path, Access::WRITE);
access_req.set_target_perms(
Access::WRITE
.union(Access::NEED_OPERATE)
.union(Access::NEED_MANAGE)
.union(Access::NEED_ADMIN),
); // TODO
if access_req.allow() {
Ok(())
} else {
Err(IMStatusCode::UnsupportedAccess)
}
}
pub fn read(&self, attr: AttrId, mut writer: AttrDataWriter) -> Result<(), Error> {
match attr.try_into()? {
GlobalElements::AttributeList => {
self.encode_attribute_ids(AttrDataWriter::TAG, &mut writer)?;
writer.complete()
}
GlobalElements::FeatureMap => writer.set(self.feature_map),
other => {
error!("This attribute is not yet handled {:?}", other);
Err(ErrorCode::AttributeNotFound.into())
}
}
}
fn encode_attribute_ids(&self, tag: TagType, tw: &mut TLVWriter) -> Result<(), Error> {
tw.start_array(tag)?;
for a in self.attributes {
tw.u16(TagType::Anonymous, a.id)?;
}
tw.end_container()
}
}
impl<'a> core::fmt::Display for Cluster<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "id:{}, ", self.id)?;
write!(f, "attrs[")?;
let mut comma = "";
for element in self.attributes.iter() {
write!(f, "{} {}", comma, element)?;
comma = ",";
}
write!(f, " ], ")
}
}

View file

@ -0,0 +1,57 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use core::cell::Cell;
use crate::utils::rand::Rand;
pub struct Dataver {
ver: Cell<u32>,
changed: Cell<bool>,
}
impl Dataver {
pub fn new(rand: Rand) -> Self {
let mut buf = [0; 4];
rand(&mut buf);
Self {
ver: Cell::new(u32::from_be_bytes(buf)),
changed: Cell::new(false),
}
}
pub fn get(&self) -> u32 {
self.ver.get()
}
pub fn changed(&self) -> u32 {
self.ver.set(self.ver.get().overflowing_add(1).0);
self.changed.set(true);
self.get()
}
pub fn consume_change<T>(&self, change: T) -> Option<T> {
if self.changed.get() {
self.changed.set(false);
Some(change)
} else {
None
}
}
}

View file

@ -0,0 +1,545 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use core::fmt::{Debug, Formatter};
use core::marker::PhantomData;
use core::ops::{Deref, DerefMut};
use crate::interaction_model::core::IMStatusCode;
use crate::interaction_model::messages::ib::{
AttrPath, AttrResp, AttrStatus, CmdDataTag, CmdPath, CmdStatus, InvResp, InvRespTag,
};
use crate::tlv::UtfStr;
use crate::transport::exchange::Exchange;
use crate::{
error::{Error, ErrorCode},
interaction_model::messages::ib::{AttrDataTag, AttrRespTag},
tlv::{FromTLV, TLVElement, TLVWriter, TagType, ToTLV},
};
use log::error;
use super::{AttrDetails, CmdDetails, DataModelHandler};
// TODO: Should this return an IMStatusCode Error? But if yes, the higher layer
// may have already started encoding the 'success' headers, we might not want to manage
// the tw.rewind() in that case, if we add this support
pub type EncodeValueGen<'a> = &'a dyn Fn(TagType, &mut TLVWriter);
#[derive(Clone)]
/// A structure for encoding various types of values
pub enum EncodeValue<'a> {
/// This indicates a value that is dynamically generated. This variant
/// is typically used in the transmit/to-tlv path where we want to encode a value at
/// run time
Closure(EncodeValueGen<'a>),
/// This indicates a value that is in the TLVElement form. this variant is
/// typically used in the receive/from-tlv path where we don't want to decode the
/// full value but it can be done at the time of its usage
Tlv(TLVElement<'a>),
/// This indicates a static value. This variant is typically used in the transmit/
/// to-tlv path
Value(&'a dyn ToTLV),
}
impl<'a> EncodeValue<'a> {
pub fn unwrap_tlv(self) -> Option<TLVElement<'a>> {
match self {
EncodeValue::Tlv(t) => Some(t),
_ => None,
}
}
}
impl<'a> PartialEq for EncodeValue<'a> {
fn eq(&self, other: &Self) -> bool {
match self {
EncodeValue::Closure(_) => {
error!("PartialEq not yet supported");
false
}
EncodeValue::Tlv(a) => {
if let EncodeValue::Tlv(b) = other {
a == b
} else {
false
}
}
// Just claim false for now
EncodeValue::Value(_) => {
error!("PartialEq not yet supported");
false
}
}
}
}
impl<'a> Debug for EncodeValue<'a> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), core::fmt::Error> {
match self {
EncodeValue::Closure(_) => write!(f, "Contains closure"),
EncodeValue::Tlv(t) => write!(f, "{:?}", t),
EncodeValue::Value(_) => write!(f, "Contains EncodeValue"),
}?;
Ok(())
}
}
impl<'a> ToTLV for EncodeValue<'a> {
fn to_tlv(&self, tw: &mut TLVWriter, tag_type: TagType) -> Result<(), Error> {
match self {
EncodeValue::Closure(f) => {
(f)(tag_type, tw);
Ok(())
}
EncodeValue::Tlv(_) => panic!("This looks invalid"),
EncodeValue::Value(v) => v.to_tlv(tw, tag_type),
}
}
}
impl<'a> FromTLV<'a> for EncodeValue<'a> {
fn from_tlv(data: &TLVElement<'a>) -> Result<Self, Error> {
Ok(EncodeValue::Tlv(data.clone()))
}
}
pub struct AttrDataEncoder<'a, 'b, 'c> {
dataver_filter: Option<u32>,
path: AttrPath,
tw: &'a mut TLVWriter<'b, 'c>,
}
impl<'a, 'b, 'c> AttrDataEncoder<'a, 'b, 'c> {
pub async fn handle_read<T: DataModelHandler>(
item: &Result<AttrDetails<'_>, AttrStatus>,
handler: &T,
tw: &mut TLVWriter<'_, '_>,
) -> Result<bool, Error> {
let status = match item {
Ok(attr) => {
let encoder = AttrDataEncoder::new(attr, tw);
let result = {
#[cfg(not(feature = "nightly"))]
{
handler.read(attr, encoder)
}
#[cfg(feature = "nightly")]
{
handler.read(attr, encoder).await
}
};
match result {
Ok(()) => None,
Err(e) => {
if e.code() == ErrorCode::NoSpace {
return Ok(false);
} else {
attr.status(e.into())?
}
}
}
}
Err(status) => Some(status.clone()),
};
if let Some(status) = status {
AttrResp::Status(status).to_tlv(tw, TagType::Anonymous)?;
}
Ok(true)
}
pub async fn handle_write<T: DataModelHandler>(
item: &Result<(AttrDetails<'_>, TLVElement<'_>), AttrStatus>,
handler: &T,
tw: &mut TLVWriter<'_, '_>,
) -> Result<(), Error> {
let status = match item {
Ok((attr, data)) => {
let result = {
#[cfg(not(feature = "nightly"))]
{
handler.write(attr, AttrData::new(attr.dataver, data))
}
#[cfg(feature = "nightly")]
{
handler.write(attr, AttrData::new(attr.dataver, data)).await
}
};
match result {
Ok(()) => attr.status(IMStatusCode::Success)?,
Err(error) => attr.status(error.into())?,
}
}
Err(status) => Some(status.clone()),
};
if let Some(status) = status {
status.to_tlv(tw, TagType::Anonymous)?;
}
Ok(())
}
pub fn new(attr: &AttrDetails, tw: &'a mut TLVWriter<'b, 'c>) -> Self {
Self {
dataver_filter: attr.dataver,
path: attr.path(),
tw,
}
}
pub fn with_dataver(self, dataver: u32) -> Result<Option<AttrDataWriter<'a, 'b, 'c>>, Error> {
if self
.dataver_filter
.map(|dataver_filter| dataver_filter != dataver)
.unwrap_or(true)
{
let mut writer = AttrDataWriter::new(self.tw);
writer.start_struct(TagType::Anonymous)?;
writer.start_struct(TagType::Context(AttrRespTag::Data as _))?;
writer.u32(TagType::Context(AttrDataTag::DataVer as _), dataver)?;
self.path
.to_tlv(&mut writer, TagType::Context(AttrDataTag::Path as _))?;
Ok(Some(writer))
} else {
Ok(None)
}
}
}
pub struct AttrDataWriter<'a, 'b, 'c> {
tw: &'a mut TLVWriter<'b, 'c>,
anchor: usize,
completed: bool,
}
impl<'a, 'b, 'c> AttrDataWriter<'a, 'b, 'c> {
pub const TAG: TagType = TagType::Context(AttrDataTag::Data as _);
fn new(tw: &'a mut TLVWriter<'b, 'c>) -> Self {
let anchor = tw.get_tail();
Self {
tw,
anchor,
completed: false,
}
}
pub fn set<T: ToTLV>(self, value: T) -> Result<(), Error> {
value.to_tlv(self.tw, Self::TAG)?;
self.complete()
}
pub fn complete(mut self) -> Result<(), Error> {
self.tw.end_container()?;
self.tw.end_container()?;
self.completed = true;
Ok(())
}
fn reset(&mut self) {
self.tw.rewind_to(self.anchor);
}
}
impl<'a, 'b, 'c> Drop for AttrDataWriter<'a, 'b, 'c> {
fn drop(&mut self) {
if !self.completed {
self.reset();
}
}
}
impl<'a, 'b, 'c> Deref for AttrDataWriter<'a, 'b, 'c> {
type Target = TLVWriter<'b, 'c>;
fn deref(&self) -> &Self::Target {
self.tw
}
}
impl<'a, 'b, 'c> DerefMut for AttrDataWriter<'a, 'b, 'c> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.tw
}
}
pub struct AttrData<'a> {
for_dataver: Option<u32>,
data: &'a TLVElement<'a>,
}
impl<'a> AttrData<'a> {
pub fn new(for_dataver: Option<u32>, data: &'a TLVElement<'a>) -> Self {
Self { for_dataver, data }
}
pub fn with_dataver(self, dataver: u32) -> Result<&'a TLVElement<'a>, Error> {
if let Some(req_dataver) = self.for_dataver {
if req_dataver != dataver {
Err(ErrorCode::DataVersionMismatch)?;
}
}
Ok(self.data)
}
}
#[derive(Default)]
pub struct CmdDataTracker {
skip_status: bool,
}
impl CmdDataTracker {
pub const fn new() -> Self {
Self { skip_status: false }
}
pub(crate) fn complete(&mut self) {
self.skip_status = true;
}
pub fn needs_status(&self) -> bool {
!self.skip_status
}
}
pub struct CmdDataEncoder<'a, 'b, 'c> {
tracker: &'a mut CmdDataTracker,
path: CmdPath,
tw: &'a mut TLVWriter<'b, 'c>,
}
impl<'a, 'b, 'c> CmdDataEncoder<'a, 'b, 'c> {
pub async fn handle<T: DataModelHandler>(
item: &Result<(CmdDetails<'_>, TLVElement<'_>), CmdStatus>,
handler: &T,
tw: &mut TLVWriter<'_, '_>,
exchange: &Exchange<'_>,
) -> Result<(), Error> {
let status = match item {
Ok((cmd, data)) => {
let mut tracker = CmdDataTracker::new();
let encoder = CmdDataEncoder::new(cmd, &mut tracker, tw);
let result = {
#[cfg(not(feature = "nightly"))]
{
handler.invoke(exchange, cmd, data, encoder)
}
#[cfg(feature = "nightly")]
{
handler.invoke(exchange, cmd, data, encoder).await
}
};
match result {
Ok(()) => cmd.success(&tracker),
Err(error) => {
error!("Error invoking command: {}", error);
cmd.status(error.into())
}
}
}
Err(status) => {
error!("Error invoking command: {:?}", status);
Some(status.clone())
}
};
if let Some(status) = status {
InvResp::Status(status).to_tlv(tw, TagType::Anonymous)?;
}
Ok(())
}
pub fn new(
cmd: &CmdDetails,
tracker: &'a mut CmdDataTracker,
tw: &'a mut TLVWriter<'b, 'c>,
) -> Self {
Self {
tracker,
path: cmd.path(),
tw,
}
}
pub fn with_command(mut self, cmd: u16) -> Result<CmdDataWriter<'a, 'b, 'c>, Error> {
let mut writer = CmdDataWriter::new(self.tracker, self.tw);
writer.start_struct(TagType::Anonymous)?;
writer.start_struct(TagType::Context(InvRespTag::Cmd as _))?;
self.path.path.leaf = Some(cmd as _);
self.path
.to_tlv(&mut writer, TagType::Context(CmdDataTag::Path as _))?;
Ok(writer)
}
}
pub struct CmdDataWriter<'a, 'b, 'c> {
tracker: &'a mut CmdDataTracker,
tw: &'a mut TLVWriter<'b, 'c>,
anchor: usize,
completed: bool,
}
impl<'a, 'b, 'c> CmdDataWriter<'a, 'b, 'c> {
pub const TAG: TagType = TagType::Context(CmdDataTag::Data as _);
fn new(tracker: &'a mut CmdDataTracker, tw: &'a mut TLVWriter<'b, 'c>) -> Self {
let anchor = tw.get_tail();
Self {
tracker,
tw,
anchor,
completed: false,
}
}
pub fn set<T: ToTLV>(self, value: T) -> Result<(), Error> {
value.to_tlv(self.tw, Self::TAG)?;
self.complete()
}
pub fn complete(mut self) -> Result<(), Error> {
self.tw.end_container()?;
self.tw.end_container()?;
self.completed = true;
self.tracker.complete();
Ok(())
}
fn reset(&mut self) {
self.tw.rewind_to(self.anchor);
}
}
impl<'a, 'b, 'c> Drop for CmdDataWriter<'a, 'b, 'c> {
fn drop(&mut self) {
if !self.completed {
self.reset();
}
}
}
impl<'a, 'b, 'c> Deref for CmdDataWriter<'a, 'b, 'c> {
type Target = TLVWriter<'b, 'c>;
fn deref(&self) -> &Self::Target {
self.tw
}
}
impl<'a, 'b, 'c> DerefMut for CmdDataWriter<'a, 'b, 'c> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.tw
}
}
#[derive(Copy, Clone, Debug)]
pub struct AttrType<T>(PhantomData<fn() -> T>);
impl<T> AttrType<T> {
pub const fn new() -> Self {
Self(PhantomData)
}
pub fn encode(&self, writer: AttrDataWriter, value: T) -> Result<(), Error>
where
T: ToTLV,
{
writer.set(value)
}
pub fn decode<'a>(&self, data: &'a TLVElement) -> Result<T, Error>
where
T: FromTLV<'a>,
{
T::from_tlv(data)
}
}
impl<T> Default for AttrType<T> {
fn default() -> Self {
Self::new()
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct AttrUtfType;
impl AttrUtfType {
pub const fn new() -> Self {
Self
}
pub fn encode(&self, writer: AttrDataWriter, value: &str) -> Result<(), Error> {
writer.set(UtfStr::new(value.as_bytes()))
}
pub fn decode<'a>(&self, data: &'a TLVElement) -> Result<&'a str, IMStatusCode> {
data.str().map_err(|_| IMStatusCode::InvalidDataType)
}
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! attribute_enum {
($en:ty) => {
impl core::convert::TryFrom<$crate::data_model::objects::AttrId> for $en {
type Error = $crate::error::Error;
fn try_from(id: $crate::data_model::objects::AttrId) -> Result<Self, Self::Error> {
<$en>::from_repr(id)
.ok_or_else(|| $crate::error::ErrorCode::AttributeNotFound.into())
}
}
};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! command_enum {
($en:ty) => {
impl core::convert::TryFrom<$crate::data_model::objects::CmdId> for $en {
type Error = $crate::error::Error;
fn try_from(id: $crate::data_model::objects::CmdId) -> Result<Self, Self::Error> {
<$en>::from_repr(id).ok_or_else(|| $crate::error::ErrorCode::CommandNotFound.into())
}
}
};
}

View file

@ -0,0 +1,99 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{acl::Accessor, interaction_model::core::IMStatusCode};
use core::fmt;
use super::{AttrId, Attribute, Cluster, ClusterId, CmdId, DeviceType, EndptId};
#[derive(Debug, Clone)]
pub struct Endpoint<'a> {
pub id: EndptId,
pub device_type: DeviceType,
pub clusters: &'a [Cluster<'a>],
}
impl<'a> Endpoint<'a> {
pub fn match_attributes(
&self,
cl: Option<ClusterId>,
attr: Option<AttrId>,
) -> impl Iterator<Item = (&'_ Cluster, &'_ Attribute)> + '_ {
self.match_clusters(cl).flat_map(move |cluster| {
cluster
.match_attributes(attr)
.map(move |attr| (cluster, attr))
})
}
pub fn match_commands(
&self,
cl: Option<ClusterId>,
cmd: Option<CmdId>,
) -> impl Iterator<Item = (&'_ Cluster, CmdId)> + '_ {
self.match_clusters(cl)
.flat_map(move |cluster| cluster.match_commands(cmd).map(move |cmd| (cluster, cmd)))
}
pub fn check_attribute(
&self,
accessor: &Accessor,
cl: ClusterId,
attr: AttrId,
write: bool,
) -> Result<(), IMStatusCode> {
self.check_cluster(cl)
.and_then(|cluster| cluster.check_attribute(accessor, self.id, attr, write))
}
pub fn check_command(
&self,
accessor: &Accessor,
cl: ClusterId,
cmd: CmdId,
) -> Result<(), IMStatusCode> {
self.check_cluster(cl)
.and_then(|cluster| cluster.check_command(accessor, self.id, cmd))
}
pub fn match_clusters(&self, cl: Option<ClusterId>) -> impl Iterator<Item = &'_ Cluster> + '_ {
self.clusters
.iter()
.filter(move |cluster| cl.map(|id| id == cluster.id).unwrap_or(true))
}
pub fn check_cluster(&self, cl: ClusterId) -> Result<&Cluster, IMStatusCode> {
self.clusters
.iter()
.find(|cluster| cluster.id == cl)
.ok_or(IMStatusCode::UnsupportedCluster)
}
}
impl<'a> core::fmt::Display for Endpoint<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "clusters:[")?;
let mut comma = "";
for cluster in self.clusters {
write!(f, "{} {{ {} }}", comma, cluster)?;
comma = ", ";
}
write!(f, "]")
}
}

Some files were not shown because too many files have changed in this diff Show more