# install.packages("keras") # install keras packages
pacman::p_load(keras, tidyverse, knitr, kableExtra)
# install_keras() # Run this only once. Installation takes time.外国書購読 Day4
Risk Assessment and Planning
2024-10-23
kerasExamples below assume that Tensorflow is installed. See https://www.tensorflow.org/install/。
[1] "list"
List of 2
$ train:List of 2
..$ x: int [1:60000, 1:28, 1:28] 0 0 0 0 0 0 0 0 0 0 ...
..$ y: int [1:60000(1d)] 5 0 4 1 9 2 1 3 1 4 ...
$ test :List of 2
..$ x: int [1:10000, 1:28, 1:28] 0 0 0 0 0 0 0 0 0 0 ...
..$ y: int [1:10000(1d)] 7 2 1 0 4 1 4 9 5 9 ...
The MNIST database was constructed from NIST’s Special Database 3 and Special Database 1 which contain binary images of handwritten digits.
Construct a neural network model. Keras Model composed of a linear stack of layers.
network <- keras_model_sequential() # null model
network |>
# 28*28 pixel images are flattened into 784 pixel vectors
layer_dense(units = 512, input_shape = c(28 * 28)) |>
# ReLU activation function converts negative values to zero
layer_activation('relu') |> # ReLU activation function
layer_dense(units = 10) |> # 10 output layers 1:10
# softmax activation function convert the output to a probability distribution
layer_activation("softmax") # softmax activation functionSpecify the algorithm and function series.
# Training data
train_images <- array_reshape( # 行列に変換
train_images, # 訓練用画像データ
c(60000, 28 * 28) # 画像の形状
)
# Test data
test_images <- array_reshape( # 行列に変換
test_images, # テスト用画像データ
c(10000, 28 * 28) # 画像の形状
)
# 0が黒,255が白のデータを0-1の範囲に正規化
train_images <- train_images / 255
test_images <- test_images / 255Label data is one hot encoded.
to_categolical() takes a vector or 1 column matrix of class labels and converts it into a matrix with p columns, one for each category.
(補足) 0〜9の手書き文字の大小関係は今回関係ないため,10個のカテゴリーを意味する10列の行列に変換してます。
history <- network |>
fit( # training the model
train_images, # training image data
train_labels, # training label data
epochs = 10, # the number of times the model will be trained
batch_size = 128) # the number of samples per gradient updateEpoch 1/10
469/469 - 1s - loss: 0.2562 - accuracy: 0.9265 - 1s/epoch - 2ms/step
Epoch 2/10
469/469 - 1s - loss: 0.1040 - accuracy: 0.9697 - 799ms/epoch - 2ms/step
Epoch 3/10
469/469 - 1s - loss: 0.0683 - accuracy: 0.9794 - 795ms/epoch - 2ms/step
Epoch 4/10
469/469 - 1s - loss: 0.0496 - accuracy: 0.9854 - 789ms/epoch - 2ms/step
Epoch 5/10
469/469 - 1s - loss: 0.0375 - accuracy: 0.9890 - 792ms/epoch - 2ms/step
Epoch 6/10
469/469 - 1s - loss: 0.0283 - accuracy: 0.9915 - 795ms/epoch - 2ms/step
Epoch 7/10
469/469 - 1s - loss: 0.0210 - accuracy: 0.9938 - 787ms/epoch - 2ms/step
Epoch 8/10
469/469 - 1s - loss: 0.0168 - accuracy: 0.9952 - 785ms/epoch - 2ms/step
Epoch 9/10
469/469 - 1s - loss: 0.0127 - accuracy: 0.9963 - 783ms/epoch - 2ms/step
Epoch 10/10
469/469 - 1s - loss: 0.0098 - accuracy: 0.9970 - 784ms/epoch - 2ms/step
model evaluation
keras modelThis particular Keras model overfits the MNIST data, with a final accuracy of 98% and loss of 7%.
Machine learning is a vast and rapidly evolving field.
It is increasingly for the analysis of social network and other text based intelligence required for the analytical review portion (and other parts) of the audit.
In the future, expect its role in auditing to expand, as suitable models are developed in the field.
パラメータが\thetaである母集団の従う分布の確率密度関数をf(x \mid \theta)とする。 そのとき,尤度関数と対数尤度関数は、
L (\theta) = f(x \mid \theta), \quad l (\theta) = \log L(\theta)
となり,スコア関数は次式で定義される。
\frac{\partial}{\partial \theta} \log l (\theta)
The five accounting cycles are: 1. Revenue cycle. 2. Expenditure cycle (this cycle focuses on two separate resources: inventory and human resources and often considers two separate cycles: purchasing and payroll/HR ). 3. Conversion cycle (Production cycle). 4. Financing (Capital Acquisition and repayment). 5. Fixed assets.
Problems in any transaction generated in these cycles are the sources of “loss” considered in the Risk Assessment Matrix (RAM).
Risk assessment consists of subjective and objective evaluations of risk in which assumptions and uncertainties are clearly considered and presented.
Part of the difficulty in risk management is that measurement of both potential loss and probability of occurrence is error prone and subjective.
Risk with a large potential loss and a low probability of occurring is often treated differently from one with a low potential loss and a high likelihood of occurring.
The audit risk model expresses the risk of an auditor providing an inappropriate opinion of a commercial entity’s financial statements and is calculated:
AR = IR \times CR \times DR
finstr packagepacman::p_load(finstr, XBRL, xbrlus, pander, knitr, kableExtra)
old_o <- options(stringsAsFactors = FALSE) # 文字列をファクターとして扱わない
xbrl_data_2016 <- xbrlDoAll("XBRL/gm-20161231.xml")
xbrl_data_2017 <- xbrlDoAll("XBRL/gm-20171231.xml")
options(old_o) # 文字列をファクターとして扱う
st2016 <- xbrl_get_statements(xbrl_data_2016)
st2017 <- xbrl_get_statements(xbrl_data_2017)
print(st2017) # 2017年度の財務諸表Financial statements repository
From To Rows Columns
ConsolidatedBalanceSheets 2016-12-31 2017-12-31 2 44
ConsolidatedIncomeStatements 2015-12-31 2017-12-31 3 29
ConsolidatedStatementsOfCashFlows 2015-12-31 2017-12-31 3 42
ConsolidatedStatementsOfComprehensiveIncome 2015-12-31 2017-12-31 3 11
# 連結貸借対照表を取得
balance_sheet2017 <- st2017$ConsolidatedBalanceSheets
balance_sheet2016 <- st2016$ConsolidatedBalanceSheets
# 連結損益計算書
income2017 <- st2017$ConsolidatedIncomeStatements
income2016 <- st2016$ConsolidatedIncomeStatements
## 貸借対照表を出力
capture.output(
bs_table <- print( # 出力
balance_sheet2017, # オブジェクト名
html = FALSE, # html出力しない
big.mark = ",", # 3桁区切りにカンマを使用
dateFormat = "%Y"), # 日付のフォーマット
file= "NUL") # 出力先をNULにする
bs_table |>
head(10) |> # 先頭10行を表示
kable(
longtable = T, # ページまたぎ
caption = "Balance Sheet", # タイトル
booktabs = T
) |>
kable_styling(
bootstrap_options = c("striped", "hover", "condensed"),
full_width = F,
font_siz = 18
)| Element | 2017-12-31 | 2016-12-31 |
|---|---|---|
| Assets = | 212482 | 221690 |
| + AssetsCurrent = | 68744 | 76203 |
| + CashAndCashEquivalentsAtCarryingValue | 15512 | 12574 |
| + MarketableSecuritiesCurrent | 8313 | 11841 |
| + AccountsNotesAndLoansReceivableNetCurrent | 8164 | 8700 |
| + InventoryNet | 10663 | 11040 |
| + gm_AssetsSubjecttoorAvailableforOperatingLeaseNetCurrent | 1106 | 1110 |
| + OtherAssetsCurrent | 4465 | 3633 |
| + AssetsOfDisposalGroupIncludingDiscontinuedOperationCurrent | 0 | 11178 |
| + NotesAndLoansReceivableNetCurrent | 0 | 0 |
merge() command consolidates the information from different .XML files into single files (Table 2). balance_sheet <- merge(balance_sheet2017, balance_sheet2016)
capture.output(
bs_table <- print(
balance_sheet,
html = FALSE,
big.mark = ",",
dateFormat = "%Y"
),
file = "NUL"
)
bs_table |>
head(10) |>
kable(
longtable = T,
caption="Merged Balance Sheet",
# "latex",
booktabs = T) |>
kable_styling(
bootstrap_options = c("striped", "hover", "condensed"),
full_width = F,
font_size = 18
)| Element | 2017-12-31 | 2016-12-31 | 2015-12-31 |
|---|---|---|---|
| Assets = | 212482 | 221690 | 194338 |
| + AssetsCurrent = | 68744 | 76203 | 69408 |
| + CashAndCashEquivalentsAtCarryingValue | 15512 | 12960 | 15238 |
| + MarketableSecuritiesCurrent | 8313 | 11841 | 8163 |
| + AccountsNotesAndLoansReceivableNetCurrent | 8164 | 9638 | 8337 |
| + InventoryNet | 10663 | 13788 | 13764 |
| + gm_AssetsSubjecttoorAvailableforOperatingLeaseNetCurrent | 1106 | 1896 | 2783 |
| + OtherAssetsCurrent | 4465 | 4015 | 3072 |
| + AssetsOfDisposalGroupIncludingDiscontinuedOperationCurrent | 0 | 0 | 0 |
| + NotesAndLoansReceivableNetCurrent | 0 | 0 | 0 |
The check_statement() command in finstr will automatically validate internal consistency of transaction lines and summary lines in the EDGAR filings.
Number of errors: 8
Number of elements in errors: 4
Element: AssetsCurrent = + CashAndCashEquivalentsAtCarryingValue + MarketableSecuritiesCurrent + AccountsNotesAndLoansReceivableNetCurrent + InventoryNet + gm_AssetsSubjecttoorAvailableforOperatingLeaseNetCurrent + OtherAssetsCurrent + AssetsOfDisposalGroupIncludingDiscontinuedOperationCurrent + NotesAndLoansReceivableNetCurrent
date original calculated error
3 2016-12-31 7.6203e+10 7.1116e+10 5.087e+09
4 2017-12-31 6.8744e+10 5.8886e+10 9.858e+09
Element: AssetsNoncurrent = + EquityMethodInvestments + PropertyPlantAndEquipmentNet + IntangibleAssetsNetIncludingGoodwill + DeferredIncomeTaxAssetsNet + OtherAssetsNoncurrent + DisposalGroupIncludingDiscontinuedOperationAssetsNoncurrent + NotesAndLoansReceivableNetNoncurrent + PropertySubjectToOrAvailableForOperatingLeaseNet
date original calculated error
5 2016-12-31 1.45487e+11 1.28486e+11 1.7001e+10
6 2017-12-31 1.43738e+11 1.22530e+11 2.1208e+10
Element: LiabilitiesCurrent = + AccountsPayableCurrent + AccruedLiabilitiesCurrent + LiabilitiesOfDisposalGroupIncludingDiscontinuedOperationCurrent + DebtCurrent
date original calculated error
11 2016-12-31 8.5181e+10 6.1384e+10 2.3797e+10
12 2017-12-31 7.6890e+10 4.9925e+10 2.6965e+10
Element: LiabilitiesNoncurrent = + OtherPostretirementDefinedBenefitPlanLiabilitiesNoncurrent + DefinedBenefitPensionPlanLiabilitiesNoncurrent + OtherLiabilitiesNoncurrent + LiabilitiesOfDisposalGroupIncludingDiscontinuedOperationNoncurrent + LongTermDebtAndCapitalLeaseObligations
date original calculated error
13 2016-12-31 9.2434e+10 4.1108e+10 5.1326e+10
14 2017-12-31 9.9392e+10 3.2138e+10 6.7254e+10
Rearranging statements is often a useful step before actual calculations. Rearrangements can offer several advantages in ad hoc analyses such as analytical review:
expose() functionTo rearrange the statement to simple two-level hierarchy use the expose function.
expose(balance_sheet,
# Assets
"Current Assets" = "AssetsCurrent",
"Noncurrent Assets" = other("Assets"),
# Liabilites and equity
"Current Liabilities" = "LiabilitiesCurrent",
"Noncurrent Liabilities" = other(c("Liabilities", "CommitmentsAndContingencies")),
"Stockholders Equity" = "StockholdersEquity"
)expose() functionFinancial statement: 3 observations from 2015-12-31 to 2017-12-31
Element 2017-12-31 2016-12-31 2015-12-31
Assets = 212482 221690 194338
+ Current.Assets 48223 54138 51357
+ Noncurrent.Assets 122530 90237 86258
LiabilitiesAndStockholdersEquity = 212482 221690 194338
+ Current.Liabilities 49925 56153 51655
+ Noncurrent.Liabilities 32138 36834 39249
+ Stockholders.Equity 35001 43836 39871
+ OtherLiabilitiesAndStockholdersEquity_ 1199 239 452
Here, the balance sheet stays divided by assets, liabilities, and equity. For the second level we are exposing current assets from noncurrent and similarly for the liabilities. We choose to separate equity.
Function expose() expects a list of vectors with element names.
Function other() helps us identify elements without enumerating every single element.
Using other() reduces potential errors, as the function knows which elements are not specified and keeps the balance sheet complete.
Sometimes it is easier to define a complement than a list of elements. In this case we can use the %without% operator.
Let us expose, for example, tangible and then intangible assets (Table 3):
expose( balance_sheet,
# Assets
"Tangible Assets" = "Assets" %without% c(
"AssetsOfDisposalGroupIncludingDiscontinuedOperationCurrent",
"NotesAndLoansReceivableNetCurrent",
"gm_AssetsSubjecttoorAvailableforOperatingLeaseNetCurrent"
),
"Intangible Assets" = other("Assets"),
# Liabilites and equity
"Liabilities" = c("Liabilities", "CommitmentsAndContingencies"),
"Stockholders Equity" = "StockholdersEquity"
)Financial statement: 3 observations from 2015-12-31 to 2017-12-31
Element 2017-12-31 2016-12-31 2015-12-31
Assets = 212482 221690 194338
+ Tangible.Assets 169647 142479 134832
+ Intangible.Assets 1106 1896 2783
LiabilitiesAndStockholdersEquity = 212482 221690 194338
+ Liabilities 82063 92987 90904
+ Stockholders.Equity 35001 43836 39871
+ OtherLiabilitiesAndStockholdersEquity_ 1199 239 452
diff_bs <- diff(balance_sheet)
capture.output(
bs_table <- print(
diff_bs,
html = FALSE,
big.mark = ",",
dateFormat = "%Y"
), file = "NUL")
bs_table |>
head(10) |>
kable(longtable = T,
caption = "Lagged Differences in Balance Sheets",
# "latex",
booktabs = T) |>
kable_styling(
bootstrap_options = c("striped", "hover", "condensed"),
full_width = F,
font_size = 18)| Element | 2017-12-31 | 2016-12-31 |
|---|---|---|
| Assets = | -9208 | 27352 |
| + AssetsCurrent = | -7459 | 6795 |
| + CashAndCashEquivalentsAtCarryingValue | 2552 | -2278 |
| + MarketableSecuritiesCurrent | -3528 | 3678 |
| + AccountsNotesAndLoansReceivableNetCurrent | -1474 | 1301 |
| + InventoryNet | -3125 | 24 |
| + gm_AssetsSubjecttoorAvailableforOperatingLeaseNetCurrent | -790 | -887 |
| + OtherAssetsCurrent | 450 | 943 |
| + AssetsOfDisposalGroupIncludingDiscontinuedOperationCurrent | 0 | 0 |
| + NotesAndLoansReceivableNetCurrent | 0 | 0 |
sec.gov.# install.packages("finreportr")
library(finreportr)
# The following commands will directly load
# EDGAR information into the R workspace for analysis tesla_co <- CompanyInfo("TSLA")
tesla_ann <- AnnualReports("TSLA")
tesla_ann
tesla_inc <- GetIncome("TSLA", 2018)
tesla_bs <- GetBalanceSheet("TSLA", 2018)
tesla_cf <- GetCashFlow("TSLA", 2018)
head(tesla_inc)But this code will not be able to access Tesla’s 2019 reports, because it throws an error:
What has happened: rather than asking for tsla-20191231.xml the package should have asked for tsla-10k_20191231_htm.xml.
EDGAR either made a mistake in their index files, or changed naming conventions.
You can explore this further by going to their website.
You can also use the xml2 package to read what is in the correct file, bypassing finreportr altogether (or just wait for the repositories to be updated with corrected code).
Consider this workaround to access 2019 data.
edgar packageNULLになりました。
Additionally, you may wish to look at stock prices, and this is easy to do with the tseries package.
Shiny is the client–server extension of the R, and as with the rest of the R, is uniquely suited to handling the ad hoc nature of audits, where each audit often represents an entirely new set of analyses. Shiny is a tool for fast prototyping of digital dashboards, giving you a large number of HTML Widgets at your disposal which lend themselves really well to building general-purpose web applications.Shiny is particularly suited for fast prototyping and is fairly easy to use for someone who is not a programmer. Dashboards locally display some data (such as in a database or a file) providing a variety of metrics in an interactive way. # Define UI for application
library(shiny) # Load the shiny package
ui <- fluidPage(
titlePanel("Risk Assessment Matrix"),
sidebarLayout(
sidebarPanel(
# Input: 監査テストの統計的信頼水準
sliderInput("confidence", "Confidence:",
min = .7,
max = .999,
value = .95),
# Input: サンプル取引ごとの監査コスト
sliderInput("cost", "Audit $ / transaction:",
min = 0,
max = 500,
value = 100),
# Input: Text for providing a caption for the RAM
textInput(
inputId = "caption",
label = "クライアント:",
value = "XYZ Corp.")
),
# Main panel for displaying outputs
mainPanel(
# Output: slider values entered
tableOutput("values"),
# Output: Formatted text for caption
h3(textOutput("caption", container = span)),
# Output: total cost of the audit
textOutput("view"),
# Output: RAM summary with sample sizes (scope) and cost
verbatimTextOutput("summary"),
h6("リスク選択: 1 = 低, 2 = 中, 3 = 高"),
h6("リスク知能 = ビジネス・インテリジェンス・スキャンニングで示されるリスク水準"),
h6("前年度リスク = 前期に監査人が示したリスク水準"),
h6("Scope = estimated discovery sample size that will be needed in the audit of this account"),
h6("Audit cost = audit labor dollars per sampled transaction"),
h6("Confidence = statistical confidence"),
h6("Account Amount and the Ave. Transaction size are in $ without decimals or 000 dividers")
)
)
)n \approx \frac{log(1-confidence)}{log(1-\frac{10-risk_{intelligence} \times risk_{prior}}{100})}
あるサンプルサイズ n を持ち、誤り率が p = 0.05 であると仮定
誤りが1つも見つからない確率: 各サンプルで誤りが発生しない確率は1 - pで,nの全てにおいて誤りが1つも発生しない確率は(1 - p)^n
少なくとも1つの誤りが見つかる確率:
信頼水準\text{confidence}: 「少なくとも1つの誤りを検出できる確率」が信頼水準\text{confidence}以上であるために必要なサンプルサイズnを計算 1 - (1 - p)^n = \text{confidence}
サンプルサイズnを解く。両辺から1を引いて整理して,対数をとる。 \begin{aligned} (1 - p)^n &= 1 - \text{confidence}\\ \log((1 - p)^n) &= \log(1 - \text{confidence})\\ n \log(1 - p) &= \log(1 - \text{confidence}) \end{aligned}
n を求めるために、両辺を \log(1 - p) で割ると, n = \frac{\log(1 - \text{confidence})}{\log(1 - p)}
許容誤差率p = 0.05と信頼水準\text{confidence} = 0.95を使うと次のようになる。 n = \frac{\log(1 - 0.95)}{\log(1 - 0.05)}
この計算結果は、発見サンプルサイズnを示しており、監査などにおいて設定された許容誤差率(5%)を 95% の確信で検出するために必要なサンプル数を表している。 この式は、二項分布に基づき「少なくとも1つの誤りを検出する確率が信頼水準\text{confidence}に達するために必要なサンプルサイズn」を計算するためのものです。
server <- function(input, output) {
ram <- read.csv(system.file("extdata",
"risk_asst_matrix.csv",
package = "auditanalytics",
mustWork = TRUE)
)
sliderValues <- reactive({
data.frame(
Audit_Parameter = c("confidence", "cost"),
Value = as.character(c(input$confidence, input$cost)),
stringsAsFactors = FALSE)
})
output$values <- renderTable({
sliderValues()
})
output$caption <- renderText({
input$caption
})
output$summary <- renderPrint({
ram <- ram
conf <- input$confidence
cost <- input$cost
risk <- (10 - (as.numeric(ram[,2]) * as.numeric(ram[,3])) )/100
Scope <- ceiling( log(1-conf) / log( 1- risk))
ram <- cbind(ram[,1:5], Scope)
Min_cost <- Scope * cost
ram <- cbind(ram[,1:6], Min_cost)
ram
})
output$view <- renderText({ # 監査費用を表示
ram <- ram # リスク評価行列を読み込む
conf <- input$confidence # 監査の信頼度
cost <- input$cost # 監査費用
risk <- (10 - (as.numeric(ram[,2]) * as.numeric(ram[,3])) )/100 # リスク
Scope <- ceiling( log(1-conf) / log( 1- risk)) # 範囲
ram <- cbind(ram[,1:5], Scope) # リスク評価行列に範囲を追加
Min_cost <- Scope * cost # 最小監査費用を計算
minimum_audit_cost <- sum(Min_cost) # 最小監査費用を合計
c("Minimum estimated audit cost = ",minimum_audit_cost) # 最小監査費用を表示
})
}R Studio gives you various options for assembling Shiny apps, including apps with server side code resident on either an RStudio or a bespoke server, and stand-alone client side apps which can be constructed with the following code.
In addition, planning will need to estimate costs associated with:
Then a simple linear cost model would be:
\text{total cost of technical interim test} = \sum _{i,j} S_{i,j} \times C_{i,j}
The matrix form is typically more useful in writing R language code, because the transaction, cost, and sample values are matrices that can directly use the fast BLAS/LAPACK implementations in R for linear algebra, rather than coding slow, messy, nested for statements. The matrix form is:
\text{total cost of technical interim test} = 1_{(i)}^{\top} \times \left (T \times S^{\top} \times C \right ) \times 1_{(j)}
where 1_{(i)}^{\top} is the row i-vector whose entries are all 1’s, 1_(j) is the column j-vector whose entries are all 1’s, T = \{T_{i,j} \}, S = \{ S_{i,j} \}, and C = \{ C_{i,j} \}.
There are two types of sampling in interim tests:
Discovery sampling sets a sample size that is likely to discover at least one error in the sample if the actual transaction error rate exceeds the minimum acceptable error-rate (alternatively called the out-of-control* rate of error). Discovery tests helps the auditor decide whether the systems processing a particular transaction stream are in or out of control.
So for a 5% intolerable error rate at 95% confidence we have:
confidence <- 0.95
n <- (log(1 - confidence)) / log(1 - 0.05)
cat("\n Discovery sample size = ", ceiling(n))
Discovery sample size = 59
Where the RAM assesses control risk to be anything higher, the auditor can assume that scope will be expanded to include attribute sampling.
Attribute sampling size is determined using Cohen’s power analysis (Cohen 1992) which is implemented in R’s pwr package, We compute both in the following code chunk
# install.packages("pwr") # first time only
library(pwr) # Cohen本のpower analysis
size <- 1000 # トランザクションの総数
Delta <- 0.05 * size # 5%の許容エラー率を検出する
sigma <- 0.3 * size # 変動(おそらく1/3)を推測する
effect <- Delta/sigma # 許容度÷変動
sample <- pwr.t.test( # pwr.t.test関数を使用
d = effect, sig.level = 0.05, power = 0.8,
type = "one.sample",
alternative = "greater" ## look for overstatement of earnings
)
cat("\n Attribute sample size for occurrence of error = ", ceiling(sample$n))
Attribute sample size for occurrence of error = 224
Attribute sampling determines sample size to estimate the error amount in a transaction stream.
size <- 100000 ## total amount of transactions
mu <- 50 ## average value of transaction
Delta <- 0.05 * mu ## detect 5% amount intolerrable error
sigma <- 30 ## variability
effect <- Delta/sigma
sample <- pwr.t.test(
d = effect, sig.level = 0.05, power = 0.8,
type = "one.sample", alternative = "greater")
cat("\n Attribute sample size for amount of error = ", ceiling(sample$n))
Attribute sample size for amount of error = 892
We compute both in the following code chunk
size <- 100000 # 取引の総額
mu <- 50 # 取引の平均値
Delta <- 0.05 * mu # 5%の金額の許容誤差を検出
sigma <- 30 # 変動
effect <- Delta / sigma # 許容度÷変動
sample <- pwr.t.test( # Cohenのpower analysis
d = effect, # Cohenのd
sig.level = 0.05, # 有意水準
power = 0.8, # 効果量
type = "one.sample", # 一標本検定
alternative = "greater" # 偽陽性
)
cat("\n Attribute sample size for amount of error = ", ceiling(sample$n))
Attribute sample size for amount of error = 892
Auditing for better or worse has chosen to couch the industrial organization of the audit business in an authoritarian structure.
Authority for rendering audit opinions is vested in a small number of firms; the Fortune 500 firms must typically be audited by one of the Big Four audit firms;
Audits authoritative strategy for solving its wicked problem offers the advantage of substantially reduced cost and greater efficiency of audits.
Its disadvantage is that no matter how well educated the auditors are they will not have as much information about IT storage and processing platforms.

Kobe University, Business Administration