#include<stdio.h>
#include<stdlib.h>
#include<math.h>
//#include<mathscall.h>
#include<time.h>
#include"plot.h"
int main()
{
int no_packets = 10000;
//float lamda = 0.1;
double lamda = 0.1,*lamdas;
int i,j,cumulative_X,count = 0;
int trans_time = 1;
double temp,*throughput;
int iterations = 1/0.1;
printf("Iterations: %d\n",iterations);
throughput = (float *)malloc(iterations*sizeof(float));
lamdas = (float *)malloc(iterations*sizeof(float));
int *pkt_collision;
double *pkt_holder;
int k=0;
float p;
for(p=0.1;p<1.1;p=p+0.1)
{
lamdas[k]=p;
printf("lamdas: %f\n",lamdas[k]);
k++;
//printf("k: %d\n",k);
}
int l=0;
while(lamda<1.1)
{
count = 0;
temp = lamdas[l] * no_packets;
int avg_packets = (int)temp;
//printf("In here\n");
pkt_holder = (double *)malloc(avg_packets*sizeof(double));
pkt_collision = (int *)malloc(avg_packets*sizeof(int));
//temp = lamda * no_packets;
//printf("In here\n");
srand(0);
for(i=0;i<avg_packets;i++)
{
pkt_holder[i] = 0.0;
pkt_collision[i] = 0;
}
for(i=1;i<avg_packets;i++)
{
double X;
do
{
X= (rand() % 10000)/10000.0;
}while(X == 0);
double time_temp = -(double)log(X)/lamda;
//printf("i: %d\t time_temp: %f\n ",i,time_temp);
pkt_holder[i]=pkt_holder[i-1] + time_temp;
}
for(i=1;i<avg_packets;i++)
{
for(j=i;j<avg_packets;j++)
{
if((pkt_holder[j]-pkt_holder[i-1])<=5 &&pkt_collision[j]==1)
{
pkt_collision[j] = 1;
pkt_collision[i-1] = 1;
}
else
break;
}
}
for(i=0;i<avg_packets;i++)
{
if(pkt_collision[i] == 0)
{
count++;
}
}
throughput[l] = (float) count/no_packets;
lamda+=0.1;
free(pkt_holder);
free(pkt_collision);// glibc error occuring after execution of this //statement
l++;
}
printf("Sucess: %d\n",count);
return 0;
}
在這個程序中,我試圖模擬純阿羅哈的吞吐量。glibc錯誤檢測不知道什麼是錯
從lamda 0.1
開始到lamda = 1
,我試圖找到與這些值對應的吞吐量。
在while循環中,我試圖通過在每次迭代循環結束時釋放它們的內存來重新分配每次迭代中pkt_holder和pkt_collision的大小。
當我試圖在eclipse中調試時,它顯示glibc錯誤時執行空閒(pkt_collision)。
任何幫助或建議將不勝感激。
謝謝
'lambda'和'thoughput'應該是float *你有double *。 'lambda [k]'是否會導致訪問無效內存的問題 –