Skip to content

Commit

Permalink
Merge pull request #1 from decryptofy/engine-linting-fix
Browse files Browse the repository at this point in the history
Engine Linting Issues and Naming Fix
  • Loading branch information
decryptofy authored Jan 28, 2024
2 parents bf7dca1 + 3082ada commit 8159bd6
Show file tree
Hide file tree
Showing 8 changed files with 211 additions and 218 deletions.
36 changes: 14 additions & 22 deletions src/scarr/engines/NICV.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import numba as nb
from .engine import Engine


class NICV(Engine):

def __init__(self) -> None:
Expand All @@ -17,7 +18,7 @@ def __init__(self) -> None:
self.means = None
self.moments = None
self.results = None

def update(self, traces: np.ndarray, plaintext: np.ndarray):
self.internal_state_update(traces, plaintext, self.trace_counts, self.sum, self.sum_sq)

Expand All @@ -31,37 +32,28 @@ def calculate(self):
self.trace_counts = self.trace_counts[self.trace_counts != 0]

mean = np.sum(self.sum, axis=0) / np.sum(self.trace_counts)
signals = (((self.sum / self.trace_counts[:,None]) - mean))**2
signals *= (self.trace_counts / self.trace_counts.shape[0])[:,None]
signals = (((self.sum / self.trace_counts[:, None]) - mean))**2
signals *= (self.trace_counts / self.trace_counts.shape[0])[:, None]
signals = np.sum(signals, axis=0)

noises = np.sum(self.sum_sq, axis=0) / np.sum(self.trace_counts) - (mean)**2


self.results = signals / noises

def _get_result(self):
if self.results is None:
self.calculate()
return self.results

return signals / noises

@staticmethod
@nb.njit(parallel=True, fastmath=True)
@nb.njit(parallel=True, fastmath=True)
def internal_state_update(traces: np.ndarray, plaintext: np.ndarray, counts, sums, sums_sq):
for sample in nb.prange(traces.shape[1]):
for trace in range(traces.shape[0]):
if sample == 0:
counts[plaintext[trace]] += 1
sums[plaintext[trace], sample] += traces[trace, sample]
sums_sq[plaintext[trace], sample] += np.square(traces[trace, sample])
sums_sq[plaintext[trace], sample] += np.square(traces[trace, sample])

def populate(self, sample_length):
try:
# Count for each plaintext value
self.trace_counts = np.zeros((256), dtype=np.uint16)
# Mean value for each hex value and each sample point
self.sum = np.zeros((256, sample_length), dtype=np.float32)
# Moment value for each hex value and each sample point
self.sum_sq = np.zeros((256, sample_length), dtype=np.float32)
except:
print("Error populating.")
# Count for each plaintext value
self.trace_counts = np.zeros((256), dtype=np.uint16)
# Mean value for each hex value and each sample point
self.sum = np.zeros((256, sample_length), dtype=np.float32)
# Moment value for each hex value and each sample point
self.sum_sq = np.zeros((256, sample_length), dtype=np.float32)
113 changes: 65 additions & 48 deletions src/scarr/engines/cpa.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
from multiprocessing.pool import Pool
import asyncio


class CPA(Engine):

def __init__(self, model, convergence_step = None) -> None:
def __init__(self, model, convergence_step=None) -> None:
self.model = model

self.trace_count = 0
Expand All @@ -39,21 +40,27 @@ def run(self, container):
(tile_x, tile_y) = tile
workload.append((self, container, tile_x, tile_y))

starmap_results = pool.starmap(self._run, workload)
starmap_results = pool.starmap(self.run_workload, workload)
pool.close()
pool.join()

for tile_x, tile_y, results, candidates in starmap_results:
if self.final_results is None:
self.final_results = np.zeros((len(container.tiles), len(container.bytes), results.shape[1], 256, container.sample_length), dtype=np.float64)
self.final_candidates = np.zeros((len(container.tiles), len(container.bytes), results.shape[1]), dtype=np.uint8)
self.final_results = np.zeros((len(container.tiles),
len(container.bytes),
results.shape[1],
256,
container.sample_length), dtype=np.float64)
self.final_candidates = np.zeros((len(container.tiles),
len(container.bytes),
results.shape[1]), dtype=np.uint8)

tile_index = container.tiles.index((tile_x, tile_y))
self.final_results[tile_index] = results
self.final_candidates[tile_index] = candidates

@staticmethod
def _run(self, container, tile_x, tile_y):
def run_workload(self, container, tile_x, tile_y):
num_steps = container.configure(tile_x, tile_y, container.bytes, self.convergence_step)
if self.convergence_step is None:
self.convergence_step = np.inf
Expand All @@ -71,20 +78,24 @@ def _run(self, container, tile_x, tile_y):
for batch in container.get_batches(tile_x, tile_y):
if traces_processed >= self.convergence_step:
result = self.calculate()
self.results[:,converge_index,:,:] = result
self.candidates[:,converge_index] = self._get_candidate(result)
self.results[:, converge_index, :, :] = result
self.candidates[:, converge_index] = self.find_candidate(result)
traces_processed = 0
converge_index += 1

self.update(batch[-1], batch[0])
traces_processed += batch[-1].shape[0]
# Generate modeled power values for plaintext values
model = np.apply_along_axis(self.model.calculate_table, axis=1, arr=batch[0])
traces = batch[-1].astype(np.float32)

self.update(traces, model)
traces_processed += traces.shape[0]

result = self.calculate()
self.results[:,converge_index,:,:] = result
self.candidates[:,converge_index] = self._get_candidate(result)
self.results[:, converge_index, :, :] = result
self.candidates[:, converge_index] = self.find_candidate(result)

return tile_x, tile_y, self.results, self.candidates

async def batch_loop(self, container):
index = 0
batch = container.get_batch_index(index)
Expand All @@ -95,34 +106,41 @@ async def batch_loop(self, container):

while len(batch) > 0:
if traces_processed >= self.convergence_step:
result = self.calculate()
self.results[:,converge_index,:,:] = result
self.candidates[:,converge_index] = self._get_candidate(result)
traces_processed = 0
converge_index += 1
result = self.calculate()
self.results[:, converge_index, :, :] = result
self.candidates[:, converge_index] = self.find_candidate(result)
traces_processed = 0
converge_index += 1

# Generate modeled power values for plaintext values
model = np.apply_along_axis(self.model.calculate_table, axis=1, arr=batch[0])
traces = batch[-1].astype(np.float32)

task = asyncio.create_task(self.async_update(traces, model))
traces_processed += traces.shape[0]

task = asyncio.create_task(self.async_update(batch[-1], batch[0]))
traces_processed += batch[-1].shape[0]
batch = container.get_batch_index(index)
index += 1
await task

result = self.calculate()
self.results[:,converge_index,:,:] = result
self.candidates[:,converge_index] = self._get_candidate(result)

def update(self, traces: np.ndarray, plaintext: np.ndarray):
# Generate modeled power values for plaintext values
model = np.apply_along_axis(self.model.calculate_table, axis=1, arr=plaintext)
# Update accumulators
self.internal_state_update(traces.astype(np.float32), model)

async def async_update(self, traces: np.ndarray, plaintext: np.ndarray):
# Generate modeled power values for plaintext values
model = np.apply_along_axis(self.model.calculate_table, axis=1, arr=plaintext)

# Update accumulators
self.internal_state_update(traces.astype(np.float32), model)
self.results[:, converge_index, :, :] = result
self.candidates[:, converge_index] = self.find_candidate(result)

async def async_update(self, traces: np.ndarray, data: np.ndarray):
# Update the number of rows processed
self.trace_count += traces.shape[0]
# Update sample accumulator
self.sample_sum += np.sum(traces, axis=0)
# Update sample squared accumulator
self.sample_sq_sum += np.sum(np.square(traces), axis=0)
# Update model accumulator
self.model_sum += np.sum(data, axis=0)
# Update model squared accumulator
self.model_sq_sum += np.sum(np.square(data), axis=0)
data = data.reshape((data.shape[0], -1))
# Update product accumulator
self.prod_sum += np.matmul(data.T, traces)

def calculate(self):
# Sample mean computation
Expand All @@ -132,7 +150,7 @@ def calculate(self):

prod_mean = np.divide(self.prod_sum.reshape(256, self.model_sum.shape[1], self.sample_sum.shape[0]), self.trace_count)
# Calculate correlation coefficient numerator
numerator = np.subtract(prod_mean, model_mean[:,:,None]*sample_mean)
numerator = np.subtract(prod_mean, model_mean[:, :, None]*sample_mean)
# Calculate correlation coeefficient denominator sample part
to_sqrt = np.subtract(np.divide(self.sample_sq_sum, self.trace_count), np.square(sample_mean))
to_sqrt[to_sqrt < 0] = 0
Expand All @@ -142,29 +160,28 @@ def calculate(self):
to_sqrt[to_sqrt < 0] = 0
denom_model = np.sqrt(to_sqrt)

denominator = denom_model[:,:,None]*denom_sample
denominator = denom_model[:, :, None]*denom_sample

denominator[denominator == 0] = 1

return np.divide(numerator, denominator).swapaxes(0,1)
return np.divide(numerator, denominator).swapaxes(0, 1)

def get_candidate(self):
return self.final_candidates

def populate(self, sample_length, num_bytes):
# Sum of the model so far
self.model_sum = np.zeros((256, num_bytes),dtype=np.float32)
self.model_sum = np.zeros((256, num_bytes), dtype=np.float32)
# Sum of the model squared so far
self.model_sq_sum = np.zeros((256, num_bytes),dtype=np.float32)
self.model_sq_sum = np.zeros((256, num_bytes), dtype=np.float32)
# Sum of the samples observed
self.sample_sum = np.zeros((sample_length),dtype=np.float32)
self.sample_sum = np.zeros((sample_length), dtype=np.float32)
# Sum of the samples observed squared
self.sample_sq_sum = np.zeros((sample_length),dtype=np.float32)
self.sample_sq_sum = np.zeros((sample_length), dtype=np.float32)
# Sum of the product of the samples and the models
self.prod_sum = np.zeros((256 * num_bytes, sample_length),dtype=np.float32)
self.prod_sum = np.zeros((256 * num_bytes, sample_length), dtype=np.float32)

# Update the values using the current batch of trace samples and the computed model incrementing the vals into the accumulators
def internal_state_update(self, traces:np.ndarray, data:np.ndarray):
def update(self, traces: np.ndarray, data: np.ndarray):
# Update the number of rows processed
self.trace_count += traces.shape[0]
# Update sample accumulator
Expand All @@ -179,10 +196,10 @@ def internal_state_update(self, traces:np.ndarray, data:np.ndarray):
# Update product accumulator
self.prod_sum += np.matmul(data.T, traces)

def _get_candidate(self, result):
def find_candidate(self, result):
candidate = [None for _ in range(result.shape[0])]

for i in range(result.shape[0]):
candidate[i] = np.unravel_index(np.abs(result[i,:,:]).argmax(), result[i,:,:].shape[0:])[0]
candidate[i] = np.unravel_index(np.abs(result[i, :, :]).argmax(), result[i, :, :].shape[0:])[0]

return candidate
29 changes: 11 additions & 18 deletions src/scarr/engines/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import os
import asyncio


class Engine:
"""
Base class that engines inherit from.
Expand All @@ -19,26 +20,26 @@ def __init__(self):

def run(self, container):
final_results = np.zeros((len(container.tiles), len(container.bytes), container.sample_length), dtype=np.float32)
#with Pool(processes=int(os.cpu_count()/2),maxtasksperchild=1000) as pool: #used for benchmarking
# with Pool(processes=int(os.cpu_count()/2),maxtasksperchild=1000) as pool: #used for benchmarking
with Pool(processes=int(os.cpu_count()/2)) as pool:
workload = []
for tile in container.tiles:
(tile_x, tile_y) = tile
for byte in container.bytes:
workload.append((self, container, tile_x, tile_y, byte))
starmap_results = pool.starmap(self._run, workload, chunksize=1) #possibly more testing needed
starmap_results = pool.starmap(self.run_workload, workload, chunksize=1) # Possibly more testing needed
pool.close()
pool.join()

for tile_x, tile_y, byte_pos, _result in starmap_results:
for tile_x, tile_y, byte_pos, tmp_result in starmap_results:
tile_index = list(container.tiles).index((tile_x, tile_y))
byte_index = list(container.bytes).index(byte_pos)
final_results[tile_index, byte_index] = _result
final_results[tile_index, byte_index] = tmp_result

self.final_results = final_results

@staticmethod
def _run(self, container, tile_x, tile_y, byte):
def run_workload(self, container, tile_x, tile_y, byte):
self.populate(container.sample_length)
container.configure(tile_x, tile_y, [byte])
if container.fetch_async:
Expand All @@ -47,8 +48,8 @@ def _run(self, container, tile_x, tile_y, byte):
for batch in container.get_batches(tile_x, tile_y, byte):
self.update(batch[-1], np.squeeze(batch[0]))

return tile_x, tile_y, byte, self._get_result()
return tile_x, tile_y, byte, self.calculate()

async def batch_loop(self, container):
index = 0
batch = container.get_batch_index(index)
Expand All @@ -64,29 +65,21 @@ def update(self, traces: np.ndarray, plaintext: np.ndarray):
"""
Function that updates the statistics of the algorithm to be called by the container class.
Gets passed in an array of traces and an array of plaintext from the trace_handler class.
Returns None.
Returns None.
"""
pass

async def async_update(self, traces: np.ndarray, plaintext: np.ndarray):
"""
Function that updates the statistics of the algorithm to be called by the container class.
Gets passed in an array of traces and an array of plaintext from the trace_handler class.
Returns None.
Returns None.
"""
pass

def calculate(self):
pass

def _get_result(self):
"""
Function for finalizing the calculations of an algorithm.
Gets passed in nothing.
Returns Results.
"""
pass

def get_result(self):
return self.final_results

Expand All @@ -99,4 +92,4 @@ def populate(self, sample_length):
pass

def get_points(self, lower_lim, tile_index=0, byte_index=0,):
return list(np.where(np.abs(self.final_results[tile_index, byte_index]) >= lower_lim)[0])
return list(np.where(np.abs(self.final_results[tile_index, byte_index]) >= lower_lim)[0])
Loading

0 comments on commit 8159bd6

Please sign in to comment.