diff options
| -rw-r--r-- | README.md | 1 | ||||
| -rw-r--r-- | tutorials/figures/totalError.png | bin | 0 -> 128094 bytes | |||
| -rw-r--r-- | tutorials/figures/truncationError.png | bin | 0 -> 87254 bytes | |||
| -rw-r--r-- | tutorials/module_1/errorPlots.py | 144 | ||||
| -rw-r--r-- | tutorials/module_2/error.md | 58 |
5 files changed, 203 insertions, 0 deletions
@@ -37,6 +37,7 @@ See the tutorials under the /tutorials/ directory. - [Fundamentals of Computing](tutorials/module_1/fundamentals_of_programming.md) - [Open source software](tutorials/module_1/open_source_software.md) - [Computational Expense](tutorials/module_1/computational_expense.md) + - [Error](error.md) #### Algorithm Developments for ME - [Numerical Methods](tutorials/module_2/intro_to_numerical_methods.md) - [Version Control](tutorials/module_2/version_control.md) diff --git a/tutorials/figures/totalError.png b/tutorials/figures/totalError.png Binary files differnew file mode 100644 index 0000000..b492ffc --- /dev/null +++ b/tutorials/figures/totalError.png diff --git a/tutorials/figures/truncationError.png b/tutorials/figures/truncationError.png Binary files differnew file mode 100644 index 0000000..4c35754 --- /dev/null +++ b/tutorials/figures/truncationError.png diff --git a/tutorials/module_1/errorPlots.py b/tutorials/module_1/errorPlots.py new file mode 100644 index 0000000..8351fc8 --- /dev/null +++ b/tutorials/module_1/errorPlots.py @@ -0,0 +1,144 @@ +# Import required packages +import numpy as np +import matplotlib.pyplot as plt + +# --------------------------------------------------------------------------- +# PART 1: Visualizing Truncation Error and Approximate Integral +# --------------------------------------------------------------------------- + +# Generate true curve: y = sin(x) +x_fine = np.linspace(0, np.pi/2, 400) +y_fine = np.sin(x_fine) + +# Linear approximation (straight line between 0 and pi/2) +x_line = np.array([0, np.pi/2]) +y_line = np.sin(x_line) + +# Plot true curve and linear approximation +plt.figure(figsize=(8, 6)) + +plt.plot(x_fine, y_fine, label='True function: $y = \sin(x)$', color='black', linewidth=3) +plt.plot(x_line, y_line, label='Linear approximation', color='red', linestyle='--', linewidth=2) + +# Shade area between true curve and linear approximation (truncation error) +plt.fill_between(x_fine, y_fine, np.interp(x_fine, x_line, y_line), + where=(y_fine > np.interp(x_fine, x_line, y_line)), + interpolate=True, color='gray', alpha=0.5, label='Truncation Error') + +# Shade approximate integral (area under linear approximation) +plt.fill_between(x_fine, 0, np.interp(x_fine, x_line, y_line), + color='lightblue', alpha=0.5, label='Approximate Integral') + +# Remove ticks +plt.xticks([]) +plt.yticks([]) +plt.grid(False) + +# Labels and title +plt.xlabel('x', fontsize=20) +plt.ylabel('y', fontsize=20) +plt.title('Truncation Error', fontsize=22) +plt.legend(fontsize=16, loc='lower right') +plt.tight_layout() +plt.show() + +# --------------------------------------------------------------------------- +# PART 2: Error Plot for Truncation, Round-off, and Total Errors +# --------------------------------------------------------------------------- + +# Set up for log-log error plot +h = np.logspace(-16, -1, 400) +truncation_error = 1e-1 * h +roundoff_error = 1e-16 / h +total_error = truncation_error + roundoff_error + +# Find minimum total error point +min_idx = np.argmin(total_error) +min_h = h[min_idx] +arrow_target_h = 2.5 * min_h +arrow_target_err = total_error[np.argmin(np.abs(h - arrow_target_h))] + +# Plot errors +plt.figure(figsize=(10, 7)) + +plt.loglog(h, truncation_error, label='Truncation error', linewidth=3, color='blue') +plt.loglog(h, roundoff_error, label='Round-off error', linewidth=3, color='orange') +plt.loglog(h, total_error, label='Total error', linewidth=4, color='black') + +# Annotation +plt.annotate('Point of\n diminishing\n returns', + xy=(arrow_target_h, arrow_target_err), xycoords='data', + xytext=(arrow_target_h, 15 * arrow_target_err), textcoords='data', + arrowprops=dict(arrowstyle='->', lw=2), + fontsize=20, ha='center') + +# Remove ticks and grid +plt.xticks([]) +plt.yticks([]) +plt.grid(False) + +# Labels and legend +plt.xlabel('Log step size', fontsize=22) +plt.ylabel('Log error', fontsize=22) +plt.legend(fontsize=20, loc='upper right') +plt.tight_layout() +plt.show() + + +# --------------------------------------------------------------------------- +# PART 3: Error Plot for Truncation with more steps +# --------------------------------------------------------------------------- + +# Fix legend duplication and improve clarity +import numpy as np +import matplotlib.pyplot as plt + +# Generate true curve: y = sin(x) over 0 to π +x_fine = np.linspace(0, np.pi, 800) +y_fine = np.sin(x_fine) + +# Choose multiple linear segments +n_steps = 3 # number of straight line segments +x_steps = np.linspace(0, np.pi, n_steps + 1) +y_steps = np.sin(x_steps) + +# Plot true curve +plt.figure(figsize=(8, 6)) +plt.plot(x_fine, y_fine, label='True function: $y = \\sin(x)$', color='black', linewidth=3) + +# Plot piecewise linear approximation +plt.plot(x_steps, y_steps, label='Linear approximation', color='red', linestyle='--', linewidth=2) + +# Track whether we've added the truncation label already +truncation_label_added = False + +# Shade truncation error (avoid duplicate legend entry) +for i in range(n_steps): + x_segment = np.linspace(x_steps[i], x_steps[i+1], 100) + y_segment_line = np.interp(x_segment, [x_steps[i], x_steps[i+1]], [y_steps[i], y_steps[i+1]]) + y_segment_true = np.sin(x_segment) + + label = 'Truncation error' if not truncation_label_added else None + plt.fill_between(x_segment, y_segment_true, y_segment_line, + where=(y_segment_true > y_segment_line), + interpolate=True, color='gray', alpha=0.5, label=label) + truncation_label_added = True + +# Shade approximate integral under the piecewise linear approximation +plt.fill_between(x_fine, 0, np.interp(x_fine, x_steps, y_steps), + color='lightblue', alpha=0.5, label='Approximate integral') + +# Remove ticks +plt.xticks([]) +plt.yticks([]) +plt.grid(False) + +# Labels and title +plt.xlabel('x', fontsize=20) +plt.ylabel('y', fontsize=20) +plt.title('Truncation Error', fontsize=24) +plt.legend(fontsize=16, loc='lower right') +plt.tight_layout() +plt.show() + + diff --git a/tutorials/module_2/error.md b/tutorials/module_2/error.md new file mode 100644 index 0000000..f8ad3f3 --- /dev/null +++ b/tutorials/module_2/error.md @@ -0,0 +1,58 @@ +# Errors in Numerical Computations + +In any numerical method, **error** is inevitable. Understanding **what kinds of errors occur** and **why** is essential to building reliable and accurate computations. + +We mainly classify errors into two major types: +- Truncation Error +- Round-off Error + +## What is Error? + +Let's remind ourselves what error is: +$$ +\text{Error} = \text{True Value} - \text{Approximate Value} +$$ +However, often the **true value** is unknown, so we focus on **reducing** and **analyzing** different types of errors instead of eliminating them completely. This can be done by using relative error when using iterative methods and is calculated as follows: +$$ +\text{Relative Error} = \frac{\text{Best} - \text{Second to best}}{Best} +$$ + +## Truncation Error + +Truncation error occurs **when an infinite process is approximated by a finite process**. +In simple terms, it happens **when you cut off or "truncate" part of the computation**. An example of this could be using a finite number of terms from a Taylor Series expansion to approximate a function. + +Approximating $e^x$ by the first few terms of its Taylor series: + +$$e^x \approx 1 + x + \frac{x^2}{2!} + \frac{x^3}{3!}$$ + +The error comes from **neglecting** all the higher order terms ($\frac{x^4}{4!}, \frac{x^5}{5!}$, ...). + +Truncation error occurs when using numerical methods such as approximating and calculating derivatives and integrals. A representation of the truncation error is show in the figure below. Using our numerical methods we are left if some degree of error. + + + +In order to reduce truncation error there are a few things we can do: +- Include more terms (higher-order methods) +- Decrease step sizes (e.g., smaller $\Delta x$ in approximations) +- Use better approximation algorithms. + +## Round-off Error + +Round-off error is caused by **the limited precision** with which computers represent numbers. Since computers cannot store an infinite number of digits, **they round off** after a certain number of decimal or binary places. For example, instead of representing π with infinite decimal places it may be rounded off to approximately 16 digits depending on number of bits and the representation of the bits. + +In other words, round-off error happens because of how computers store numbers. For a double-floating point, the number is stored using 64-bits. The more bits we use, the more precise of a number we can store. However, it makes it costs us more memory making it more computational expensive. + +While individual round-off errors may seem negligible, their effects can **accumulate over repeated computations**, leading to significant inaccuracies. This is particularly problematic in operations such as **subtracting two nearly equal numbers**, where **loss of significance** can occur, severely reducing numerical precision and amplifying the impact of round-off error. + +### How to Reduce Round-off Error: + +To reduce round-off error, use higher-precision data types when storing numerical values. Additionally, code and algorithms should be structured to **avoid subtracting nearly equal numbers**, a common source of significant error. Finally, employing **numerically stable algorithms** is essential for minimizing the accumulation of round-off errors during computation. + +## Total Error + +Truncation and round-off error are inversely proportional, meaning that if we decrease one, the other increases. If we want to minimize total error we must find the optimal point between step size and error. + + + + |
