Commit 86dfb208 authored by Wennan Zhu's avatar Wennan Zhu Committed by tensorflow-copybara
Browse files

Create a hierarchical histogram IterativeProcess that is compatible with...

Create a hierarchical histogram IterativeProcess that is compatible with tff.backends.mapreduce.MapReduceForm.

PiperOrigin-RevId: 411845363
parent 621f8edd
......@@ -26,6 +26,7 @@ py_library(
":data_processing",
":histogram_processing",
"//tensorflow_federated/python/analytics/heavy_hitters",
"//tensorflow_federated/python/analytics/hierarchical_histogram:hierarchical_histogram_lib",
],
)
......
......@@ -16,3 +16,4 @@
from tensorflow_federated.python.analytics import data_processing
from tensorflow_federated.python.analytics import heavy_hitters
from tensorflow_federated.python.analytics import histogram_processing
from tensorflow_federated.python.analytics.hierarchical_histogram.hierarchical_histogram_lib import build_hierarchical_histogram_process
......@@ -4,6 +4,13 @@ package(default_visibility = ["//tensorflow_federated/python/analytics:__subpack
licenses(["notice"])
py_library(
name = "hierarchical_histogram",
srcs = ["__init__.py"],
srcs_version = "PY3",
visibility = ["//tensorflow_federated/tools/python_package:python_package_tool"],
)
py_library(
name = "hierarchical_histogram_factory",
srcs = ["hierarchical_histogram_factory.py"],
......@@ -38,25 +45,29 @@ py_test(
)
py_library(
name = "hierarchical_histogram",
srcs = ["hierarchical_histogram.py"],
name = "hierarchical_histogram_lib",
srcs = ["hierarchical_histogram_lib.py"],
srcs_version = "PY3",
visibility = ["//tensorflow_federated/python/analytics:__pkg__"],
deps = [
":clipping_factory",
":hierarchical_histogram_factory",
"//tensorflow_federated/python/core/api:computations",
"//tensorflow_federated/python/core/impl/federated_context:intrinsics",
"//tensorflow_federated/python/core/impl/types:computation_types",
"//tensorflow_federated/python/core/impl/types:placements",
"//tensorflow_federated/python/core/templates:iterative_process",
],
)
py_test(
name = "hierarchical_histogram_test",
srcs = ["hierarchical_histogram_test.py"],
name = "hierarchical_histogram_lib_test",
srcs = ["hierarchical_histogram_lib_test.py"],
python_version = "PY3",
shard_count = 4,
srcs_version = "PY3",
deps = [
":hierarchical_histogram",
":hierarchical_histogram_lib",
"//tensorflow_federated/python/core/api:test_case",
"//tensorflow_federated/python/core/backends/test:execution_contexts",
],
......
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for computing hierarchical histogram."""
......@@ -133,7 +133,6 @@ class HistogramClippingSumFactory(factory.UnweightedAggregationFactory):
return aggregation_process.AggregationProcess(init_fn, next_fn)
@tf.function
def _sub_sample_clip(histogram, sample_num):
"""Clips `histogram` by sub-sampling.
......@@ -168,7 +167,6 @@ def _sub_sample_clip(histogram, sample_num):
return result
@tf.function
def _distinct_clip(histogram, sample_num):
"""Clips `histogram` by distinct sub-sampling.
......@@ -187,8 +185,8 @@ def _distinct_clip(histogram, sample_num):
"""
def distinct():
indices = tf.cast(
tf.squeeze(tf.where(tf.not_equal(histogram, 0))), tf.int32)
indices = tf.squeeze(
tf.cast(tf.where(tf.not_equal(histogram, 0)), tf.int32))
seed = tf.cast(
tf.stack([tf.timestamp() * 1e6,
tf.timestamp() * 1e6]), dtype=tf.int64)
......
......@@ -13,6 +13,7 @@
# limitations under the License.
"""The functions for creating the federated computation for hierarchical histogram aggregation."""
import math
import numpy as np
import tensorflow as tf
......@@ -21,9 +22,10 @@ from tensorflow_federated.python.analytics.hierarchical_histogram import hierarc
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.templates import iterative_process
@tf.function
def _discretized_histogram_counts(client_data: tf.data.Dataset,
lower_bound: float, upper_bound: float,
num_bins: int) -> tf.Tensor:
......@@ -120,8 +122,8 @@ def build_hierarchical_histogram_computation(
use. Currently supported mechanisms are
- 'no-noise': (Default) Tree aggregation mechanism without noise.
- 'central-gaussian': Tree aggregation with central Gaussian mechanism.
- 'distributed-discrete-gaussian': Tree aggregation mechanism with
the distributed discrete Gaussian mechanism in "The Distributed Discrete
- 'distributed-discrete-gaussian': Tree aggregation mechanism with the
distributed discrete Gaussian mechanism in "The Distributed Discrete
Gaussian Mechanism for Federated Learning with Secure Aggregation. Peter
Kairouz, Ziyu Liu, Thomas Steinke".
noise_multiplier: A `float` specifying the noise multiplier (central noise
......@@ -133,15 +135,13 @@ def build_hierarchical_histogram_computation(
2**B will be the field size for SecAgg operations). Only needed when
`dp_mechanism` is 'distributed-discrete-gaussian'. Please read the below
precautions carefully and set `bits` accordingly. Otherwise, unexpected
overflow or accuracy degradation might happen.
(1) Should be in the inclusive range [1, 22] to avoid overflow inside
secure aggregation;
(2) Should be at least as large as
`log2(4 * sqrt(expected_clients_per_round)* noise_multiplier *
l2_norm_bound + expected_clients_per_round * max_records_per_user) + 1`
to avoid accuracy degradation caused by frequent modular clipping;
(3) If the number of clients exceed `expected_clients_per_round`, overflow
might happen.
overflow or accuracy degradation might happen. (1) Should be in the
inclusive range [1, 22] to avoid overflow inside secure aggregation; (2)
Should be at least as large as `log2(4 * sqrt(expected_clients_per_round)*
noise_multiplier * l2_norm_bound + expected_clients_per_round *
max_records_per_user) + 1` to avoid accuracy degradation caused by
frequent modular clipping; (3) If the number of clients exceed
`expected_clients_per_round`, overflow might happen.
Returns:
A federated computation that performs hierarchical histogram aggregation.
......@@ -181,6 +181,124 @@ def build_hierarchical_histogram_computation(
return hierarchical_histogram_computation
def build_hierarchical_histogram_process(
lower_bound: float,
upper_bound: float,
num_bins: int,
arity: int = 2,
clip_mechanism: str = 'sub-sampling',
max_records_per_user: int = 10,
dp_mechanism: str = 'no-noise',
noise_multiplier: float = 0.0,
expected_clients_per_round: int = 10,
bits: int = 22) -> iterative_process.IterativeProcess:
"""Creates an IterativeProcess for hierarchical histogram aggregation.
This function wraps the `tff.computation` created by the
`build_hierarchical_histogram_computation` in an `IterativeProcess` that
is compatible with `tff.backends.mapreduce.MapReduceForm`.
Args:
lower_bound: A `float` specifying the lower bound of the data range.
upper_bound: A `float` specifying the upper bound of the data range.
num_bins: The integer number of bins to compute.
arity: The branching factor of the tree. Defaults to 2.
clip_mechanism: A `str` representing the clipping mechanism. Currently
supported mechanisms are
- 'sub-sampling': (Default) Uniformly sample up to `max_records_per_user`
records without replacement from the client dataset.
- 'distinct': Uniquify client dataset and uniformly sample up to
`max_records_per_user` records without replacement from it.
max_records_per_user: An `int` representing the maximum of records each user
can include in their local histogram. Defaults to 10.
dp_mechanism: A `str` representing the differentially private mechanism to
use. Currently supported mechanisms are
- 'no-noise': (Default) Tree aggregation mechanism without noise.
- 'central-gaussian': Tree aggregation with central Gaussian mechanism.
- 'distributed-discrete-gaussian': Tree aggregation mechanism with the
distributed discrete Gaussian mechanism in "The Distributed Discrete
Gaussian Mechanism for Federated Learning with Secure Aggregation. Peter
Kairouz, Ziyu Liu, Thomas Steinke".
noise_multiplier: A `float` specifying the noise multiplier (central noise
stddev / L2 clip norm) for model updates. Defaults to 0.0.
expected_clients_per_round: An `int` specifying the lower bound on the
expected number of clients. Only needed when `dp_mechanism` is
'distributed-discrete-gaussian'. Defaults to 10.
bits: A positive integer specifying the communication bit-width B (where
2**B will be the field size for SecAgg operations). Only needed when
`dp_mechanism` is 'distributed-discrete-gaussian'. Please read the below
precautions carefully and set `bits` accordingly. Otherwise, unexpected
overflow or accuracy degradation might happen. (1) Should be in the
inclusive range [1, 22] to avoid overflow inside secure aggregation; (2)
Should be at least as large as `log2(4 * sqrt(expected_clients_per_round)*
noise_multiplier * l2_norm_bound + expected_clients_per_round *
max_records_per_user) + 1` to avoid accuracy degradation caused by
frequent modular clipping; (3) If the number of clients exceed
`expected_clients_per_round`, overflow might happen.
Returns:
A federated computation that performs hierarchical histogram aggregation.
"""
_check_greater_than_equal(upper_bound, lower_bound, 'upper_bound',
'lower_bound')
_check_positive(num_bins, 'num_bins')
_check_greater_than_equal_thres(arity, 2, 'arity')
_check_membership(clip_mechanism, clipping_factory.CLIP_MECHANISMS,
'clip_mechanism')
_check_greater_than_equal_thres(max_records_per_user, 1,
'max_records_per_user')
_check_membership(dp_mechanism, hihi_factory.DP_MECHANISMS, 'dp_mechanism')
_check_greater_than_equal_thres(noise_multiplier, 0., noise_multiplier)
_check_positive(expected_clients_per_round, 'expected_clients_per_round')
_check_in_range(bits, 'bits', 1, 22)
_check_greater_than_equal_thres(bits, math.log2(expected_clients_per_round),
'bits')
one_round_computation = build_hierarchical_histogram_computation(
lower_bound=lower_bound,
upper_bound=upper_bound,
num_bins=num_bins,
arity=arity,
clip_mechanism=clip_mechanism,
max_records_per_user=max_records_per_user,
dp_mechanism=dp_mechanism,
noise_multiplier=noise_multiplier,
expected_clients_per_round=expected_clients_per_round,
bits=bits)
parameter_type_signature = one_round_computation.type_signature.parameter
result_type_signature = one_round_computation.type_signature.result
@computations.tf_computation
def initialize():
# Creates a `tf.RaggedTensor` that has the same `type_signature` as the
# result returned by `one_round_computation`. This is to make sure the
# generated IterativeProcess is compatible with
# `tff.backends.mapreduce.MapReduceForm`.
flat_values_shape = result_type_signature.member[0].shape
flat_values_dtype = result_type_signature.member[0].dtype
nested_row_splits = np.zeros(shape=result_type_signature.member[1][0].shape)
# To generated a valid `tf.RaggedTensor`, the first element in
# `nested_row_splits` must be 0, and the last element in `nested_row_splits`
# must be the length of `flat_values`.
nested_row_splits[-1] = flat_values_shape[0]
return tf.RaggedTensor.from_nested_row_splits(
flat_values=tf.zeros(shape=flat_values_shape, dtype=flat_values_dtype),
nested_row_splits=[nested_row_splits])
@computations.federated_computation
def init_fn():
return intrinsics.federated_eval(initialize, placements.SERVER)
@computations.federated_computation(init_fn.type_signature.result,
parameter_type_signature)
def next_fn(_, client_data):
return one_round_computation(client_data), intrinsics.federated_value(
(), placements.SERVER)
return iterative_process.IterativeProcess(init_fn, next_fn)
def _check_greater_than_equal(lvalue, rvalue, llabel, rlabel):
if lvalue < rvalue:
raise ValueError(f'`{llabel}` should be no smaller than '
......
......@@ -13,11 +13,12 @@
# limitations under the License.
"""Tests for hierarchical_histogram."""
from typing import Tuple
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow_federated.python.analytics.hierarchical_histogram import hierarchical_histogram as hihi
from tensorflow_federated.python.analytics.hierarchical_histogram import hierarchical_histogram_lib as hihi
from tensorflow_federated.python.core.api import test_case
from tensorflow_federated.python.core.backends.test import execution_contexts
......@@ -50,6 +51,100 @@ class ClientWorkTest(test_case.TestCase, parameterized.TestCase):
class HierarchicalHistogramTest(test_case.TestCase, parameterized.TestCase):
def _get_hierarchical_histogram_results(
self,
client_data: tf.data.Dataset,
lower_bound: float,
upper_bound: float,
num_bins: int,
arity: int = 2,
clip_mechanism: str = 'sub-sampling',
max_records_per_user: int = 10,
dp_mechanism: str = 'no-noise',
noise_multiplier: float = 0.0,
expected_clients_per_round: int = 10,
bits: int = 22,
) -> Tuple[tf.RaggedTensor, tf.RaggedTensor]:
"""Runs the Hierarchical Histogram computation and returns the results.
Runs the computation with both `build_hierarchical_histogram_computation`
and `build_hierarchical_histogram_process`.
Args:
client_data: A tf.data.Dataset of the input client data.
lower_bound: A `float` specifying the lower bound of the data range.
upper_bound: A `float` specifying the upper bound of the data range.
num_bins: The integer number of bins to compute.
arity: The branching factor of the tree. Defaults to 2.
clip_mechanism: A `str` representing the clipping mechanism. Currently
supported mechanisms are
- 'sub-sampling': (Default) Uniformly sample up to
`max_records_per_user` records without replacement from the client
dataset.
- 'distinct': Uniquify client dataset and uniformly sample up to
`max_records_per_user` records without replacement from it.
max_records_per_user: An `int` representing the maximum of records each
user can include in their local histogram. Defaults to 10.
dp_mechanism: A `str` representing the differentially private mechanism to
use. Currently supported mechanisms are
- 'no-noise': (Default) Tree aggregation mechanism without noise.
- 'central-gaussian': Tree aggregation with central Gaussian mechanism.
- 'distributed-discrete-gaussian': Tree aggregation mechanism with the
distributed discrete Gaussian mechanism in "The Distributed Discrete
Gaussian Mechanism for Federated Learning with Secure Aggregation.
Peter Kairouz, Ziyu Liu, Thomas Steinke".
noise_multiplier: A `float` specifying the noise multiplier (central
noise stddev / L2 clip norm) for model updates. Defaults to 0.0.
expected_clients_per_round: An `int` specifying the lower bound on the
expected number of clients. Only needed when `dp_mechanism` is
'distributed-discrete-gaussian'. Defaults to 10.
bits: A positive integer specifying the communication bit-width B (where
2**B will be the field size for SecAgg operations). Only needed when
`dp_mechanism` is 'distributed-discrete-gaussian'. Please read the below
precautions carefully and set `bits` accordingly. Otherwise, unexpected
overflow or accuracy degradation might happen. (1) Should be in the
inclusive range [1, 22] to avoid overflow inside secure aggregation; (2)
Should be at least as large as `log2(4*sqrt(expected_clients_per_round)
* noise_multiplier * l2_norm_bound + expected_clients_per_round *
max_records_per_user) + 1` to avoid accuracy degradation caused by
frequent modular clipping; (3) If the number of clients exceed
`expected_clients_per_round`, overflow might happen.
Returns:
A `Tuple` of two `tf.RaggedTenor`, which contain the hierarchical
histograms computed by `build_hierarchical_histogram_computation` and
`build_hierarchical_histogram_process`.
"""
hihi_computation = hihi.build_hierarchical_histogram_computation(
lower_bound=lower_bound,
upper_bound=upper_bound,
num_bins=num_bins,
arity=arity,
clip_mechanism=clip_mechanism,
max_records_per_user=max_records_per_user,
dp_mechanism=dp_mechanism,
noise_multiplier=noise_multiplier,
expected_clients_per_round=expected_clients_per_round,
bits=bits)
hihi_computation_result = hihi_computation(client_data)
hihi_process = hihi.build_hierarchical_histogram_process(
lower_bound=lower_bound,
upper_bound=upper_bound,
num_bins=num_bins,
arity=arity,
clip_mechanism=clip_mechanism,
max_records_per_user=max_records_per_user,
dp_mechanism=dp_mechanism,
noise_multiplier=noise_multiplier,
expected_clients_per_round=expected_clients_per_round,
bits=bits)
init_state = hihi_process.initialize()
hihi_process_result, _ = hihi_process.next(init_state, client_data)
return hihi_computation_result, hihi_process_result
@parameterized.named_parameters(
('data_range_error', [2, 1], 1, 2, 'sub-sampling', 1, 'central-gaussian',
0.1, 1, 1),
......@@ -74,9 +169,10 @@ class HierarchicalHistogramTest(test_case.TestCase, parameterized.TestCase):
('bits_less_than_log_client_num', [1, 2], 1, 2, 'sub-sampling', 1,
'central-gaussian', 0.1, 8, 2),
)
def test_raises_error(self, data_range, num_bins, arity, clip_mechanism,
max_records_per_user, dp_mechanism, noise_multiplier,
expected_clients_per_round, bits):
def test_raises_error_hh_computation(self, data_range, num_bins, arity,
clip_mechanism, max_records_per_user,
dp_mechanism, noise_multiplier,
expected_clients_per_round, bits):
with self.assertRaises(ValueError):
hihi.build_hierarchical_histogram_computation(
lower_bound=data_range[0],
......@@ -90,6 +186,47 @@ class HierarchicalHistogramTest(test_case.TestCase, parameterized.TestCase):
expected_clients_per_round=expected_clients_per_round,
bits=bits)
@parameterized.named_parameters(
('data_range_error', [2, 1], 1, 2, 'sub-sampling', 1, 'central-gaussian',
0.1, 1, 1),
('num_bins_error', [1, 2], 0, 2, 'sub-sampling', 1, 'central-gaussian',
0.1, 1, 1),
('arity_error', [1, 2], 1, 1, 'sub-sampling', 1, 'central-gaussian', 0.1,
1, 1),
('clip_mechanism_error', [1, 2], 1, 2, 'invalid', 1, 'central-gaussian',
0.1, 1, 1),
('max_records_per_user_error', [1, 2], 1, 2, 'sub-sampling', 0,
'central-gaussian', 0.1, 1, 1),
('dp_mechanism_error', [1, 2
], 1, 2, 'sub-sampling', 1, 'invalid', 0.1, 1, 1),
('noise_multiplier_error', [1, 2], 1, 2, 'sub-sampling', 1,
'central-gaussian', -0.1, 1, 1),
('expected_clients_per_round_error', [1, 2], 1, 2, 'sub-sampling', 1,
'central-gaussian', 0.1, 0, 1),
('bits_less_than_1', [1, 2], 1, 2, 'sub-sampling', 1, 'central-gaussian',
0.1, 1, 0),
('bits_large_than_23', [1, 2], 1, 2, 'sub-sampling', 1,
'central-gaussian', 0.1, 1, 23),
('bits_less_than_log_client_num', [1, 2], 1, 2, 'sub-sampling', 1,
'central-gaussian', 0.1, 8, 2),
)
def test_raises_error_hh_process(self, data_range, num_bins, arity,
clip_mechanism, max_records_per_user,
dp_mechanism, noise_multiplier,
expected_clients_per_round, bits):
with self.assertRaises(ValueError):
hihi.build_hierarchical_histogram_process(
lower_bound=data_range[0],
upper_bound=data_range[1],
num_bins=num_bins,
arity=arity,
clip_mechanism=clip_mechanism,
max_records_per_user=max_records_per_user,
dp_mechanism=dp_mechanism,
noise_multiplier=noise_multiplier,
expected_clients_per_round=expected_clients_per_round,
bits=bits)
@parameterized.named_parameters(
('test_binary_1', [
tf.data.Dataset.from_tensor_slices([1., 2., 3., 4.]),
......@@ -110,16 +247,18 @@ class HierarchicalHistogramTest(test_case.TestCase, parameterized.TestCase):
)
def test_central_no_noise_hierarchical_histogram_wo_clip(
self, client_data, data_range, num_bins, arity, reference_hi_hist):
hihi_computation = hihi.build_hierarchical_histogram_computation(
lower_bound=data_range[0],
upper_bound=data_range[1],
num_bins=num_bins,
arity=arity,
max_records_per_user=4,
dp_mechanism='no-noise')
hi_hist = hihi_computation(client_data)
(hihi_computation_result,
hihi_process_result) = self._get_hierarchical_histogram_results(
client_data=client_data,
lower_bound=data_range[0],
upper_bound=data_range[1],
num_bins=num_bins,
arity=arity,
max_records_per_user=4,
dp_mechanism='no-noise')
self.assertAllClose(hi_hist, reference_hi_hist)
self.assertAllClose(hihi_computation_result, reference_hi_hist)
self.assertAllClose(hihi_process_result, reference_hi_hist)
@parameterized.named_parameters(
('test_binary_sub_sampling', [
......@@ -142,19 +281,25 @@ class HierarchicalHistogramTest(test_case.TestCase, parameterized.TestCase):
def test_central_no_noise_hierarchical_histogram_w_clip(
self, client_data, data_range, num_bins, arity, clip_mechanism,
max_records_per_user, reference_layer_l1_norm):
hihi_computation = hihi.build_hierarchical_histogram_computation(
lower_bound=data_range[0],
upper_bound=data_range[1],
num_bins=num_bins,
arity=arity,
clip_mechanism=clip_mechanism,
max_records_per_user=max_records_per_user,
dp_mechanism='no-noise')
hi_hist = hihi_computation(client_data)
(hihi_computation_result,
hihi_process_result) = self._get_hierarchical_histogram_results(
client_data=client_data,
lower_bound=data_range[0],
upper_bound=data_range[1],
num_bins=num_bins,
arity=arity,
clip_mechanism=clip_mechanism,
max_records_per_user=max_records_per_user,
dp_mechanism='no-noise')
for layer in range(hi_hist.shape[0]):
for layer in range(hihi_computation_result.shape[0]):
self.assertAllClose(
tf.math.reduce_sum(hihi_computation_result[layer]),
reference_layer_l1_norm)
for layer in range(hihi_process_result.shape[0]):
self.assertAllClose(
tf.math.reduce_sum(hi_hist[layer]), reference_layer_l1_norm)
tf.math.reduce_sum(hihi_process_result[layer]),
reference_layer_l1_norm)
@parameterized.named_parameters(
('test_binary_1', [
......@@ -177,20 +322,25 @@ class HierarchicalHistogramTest(test_case.TestCase, parameterized.TestCase):
def test_central_gaussian_hierarchical_histogram_wo_clip(
self, client_data, data_range, num_bins, arity, reference_hi_hist,
noise_multiplier):
hihi_computation = hihi.build_hierarchical_histogram_computation(
lower_bound=data_range[0],
upper_bound=data_range[1],
num_bins=num_bins,
arity=arity,
max_records_per_user=4,
dp_mechanism='central-gaussian',
noise_multiplier=noise_multiplier)
hi_hist = hihi_computation(client_data)
(hihi_computation_result,
hihi_process_result) = self._get_hierarchical_histogram_results(
client_data=client_data,
lower_bound=data_range[0],
upper_bound=data_range[1],
num_bins=num_bins,
arity=arity,
max_records_per_user=4,
dp_mechanism='central-gaussian',
noise_multiplier=noise_multiplier)
# 300 is a rough estimation of six-sigma considering the effect of the L2
# norm bound and the privacy composition.
self.assertAllClose(
hi_hist, reference_hi_hist, atol=300. * noise_multiplier)
hihi_computation_result,
reference_hi_hist,
atol=300. * noise_multiplier)
self.assertAllClose(
hihi_process_result, reference_hi_hist, atol=300. * noise_multiplier)
@parameterized.named_parameters(
('test_binary_sub_sampling', [
......@@ -213,21 +363,27 @@ class HierarchicalHistogramTest(test_case.TestCase, parameterized.TestCase):
def test_central_gaussian_hierarchical_histogram_w_clip(
self, client_data, data_range, num_bins, arity, clip_mechanism,
max_records_per_user, reference_layer_l1_norm, noise_multiplier):
hihi_computation = hihi.build_hierarchical_histogram_computation(