Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* Licensed to the OpenAirInterface (OAI) Software Alliance under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The OpenAirInterface Software Alliance licenses this file to You under
* the OAI Public License, Version 1.1 (the "License"); you may not use this file
* except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.openairinterface.org/?page_id=698
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*-------------------------------------------------------------------------------
* For more information about the OpenAirInterface (OAI) Software Alliance:
* contact@openairinterface.org
*/
/*! \file lte-enb.c
* \brief Top-level threads for eNodeB
* \author R. Knopp, F. Kaltenberger, Navid Nikaein
* \date 2012
* \version 0.1
* \company Eurecom
* \email: knopp@eurecom.fr,florian.kaltenberger@eurecom.fr, navid.nikaein@eurecom.fr
* \note
* \warning
*/
/*! \function wakeup_txfh
* \brief Implementation of creating multiple RU threads for beamforming emulation
* \author TH Wang(Judy), TY Hsu, SY Yeh(fdragon)
* \date 2018
* \version 0.1
* \company Eurecom and ISIP@NCTU
* \email: Tsu-Han.Wang@eurecom.fr,tyhsu@cs.nctu.edu.tw,fdragon.cs96g@g2.nctu.edu.tw
* \note
* \warning
*/
#define _GNU_SOURCE
#include <pthread.h>
#undef MALLOC //there are two conflicting definitions, so we better make sure we don't use it at all
#include "assertions.h"
#include "PHY/types.h"
#include "PHY/INIT/phy_init.h"
#include "PHY/defs_eNB.h"
#include "SCHED/sched_eNB.h"
#include "PHY/LTE_TRANSPORT/transport_proto.h"
#include "nfapi/oai_integration/vendor_ext.h"
#undef MALLOC //there are two conflicting definitions, so we better make sure we don't use it at all
//#undef FRAME_LENGTH_COMPLEX_SAMPLES //there are two conflicting definitions, so we better make sure we don't use it at all
#include "radio/COMMON/common_lib.h"
//#undef FRAME_LENGTH_COMPLEX_SAMPLES //there are two conflicting definitions, so we better make sure we don't use it at all
#include "PHY/LTE_TRANSPORT/if4_tools.h"
#include "PHY/LTE_ESTIMATION/lte_estimation.h"
#include "PHY/phy_extern.h"
#include "LAYER2/MAC/mac.h"
#include "LAYER2/MAC/mac_extern.h"
#include "LAYER2/MAC/mac_proto.h"
#include "RRC/LTE/rrc_extern.h"
#include "PHY_INTERFACE/phy_interface.h"
#include "common/utils/LOG/log.h"
#include "UTIL/OTG/otg_tx.h"
#include "UTIL/OTG/otg_externs.h"
#include "UTIL/MATH/oml.h"
#include "common/utils/LOG/vcd_signal_dumper.h"
#include "UTIL/OPT/opt.h"
#include "enb_config.h"
#include "executables/lte-softmodem.h"
#include "s1ap_eNB.h"
#include "SIMULATION/ETH_TRANSPORT/proto.h"
#include "T.h"
#include "common/ran_context.h"
extern RAN_CONTEXT_t RC;
//#define DEBUG_THREADS 1
//#define USRP_DEBUG 1
struct timing_info_t {
//unsigned int frame, hw_slot, last_slot, next_slot;
//unsigned int mbox0, mbox1, mbox2, mbox_target;
unsigned int n_samples;
} timing_info;
// Fix per CC openair rf/if device update
// extern openair0_device openair0;
extern int oai_exit;
extern int transmission_mode;
extern int oaisim_flag;
#include "executables/thread-common.h"
//extern PARALLEL_CONF_t get_thread_parallel_conf(void);
//extern WORKER_CONF_t get_thread_worker_conf(void);
//pthread_t main_eNB_thread;
time_stats_t softmodem_stats_mt; // main thread
time_stats_t softmodem_stats_hw; // hw acquisition
time_stats_t softmodem_stats_rxtx_sf; // total tx time
time_stats_t nfapi_meas; // total tx time
time_stats_t softmodem_stats_rx_sf; // total rx time
/* mutex, cond and variable to serialize phy proc TX calls
* (this mechanism may be relaxed in the future for better
* performances)
*/
static struct {
pthread_mutex_t mutex_phy_proc_tx;
pthread_cond_t cond_phy_proc_tx;
volatile uint8_t phy_proc_CC_id;
} sync_phy_proc;
extern double cpuf;
void init_eNB(int,int);
void stop_eNB(int nb_inst);
int wakeup_tx(PHY_VARS_eNB *eNB, int frame_rx, int subframe_rx, int frame_tx, int subframe_tx, uint64_t timestamp_tx);
int wakeup_txfh(PHY_VARS_eNB *eNB, L1_rxtx_proc_t *proc, int frame_tx, int subframe_tx, uint64_t timestamp_tx);
void wakeup_prach_eNB(PHY_VARS_eNB *eNB,RU_t *ru, int frame, int subframe);
void wakeup_prach_eNB_br(PHY_VARS_eNB *eNB,RU_t *ru, int frame, int subframe);
extern void oai_subframe_ind(uint16_t sfn, uint16_t sf);
extern void add_subframe(uint16_t *frameP, uint16_t *subframeP, int offset);
//#define TICK_TO_US(ts) (ts.diff)
#define TICK_TO_US(ts) (ts.trials==0?0:ts.diff/ts.trials)
static inline int rxtx(PHY_VARS_eNB *eNB,
L1_rxtx_proc_t *proc,
char *thread_name) {
int ret;
start_meas(&softmodem_stats_rxtx_sf);
//L1_rxtx_proc_t *L1_proc_tx = &eNB->proc.L1_proc_tx;
// *******************************************************************
#if defined(PRE_SCD_THREAD)
RU_t *ru = RC.ru[0];
#endif
if (eNB ==NULL) {
LOG_D(PHY,"%s:%d: rxtx invalid argument, eNB pointer is NULL",__FILE__,__LINE__);
return -1;
}
if (NFAPI_MODE==NFAPI_MODE_PNF) {
// I am a PNF and I need to let nFAPI know that we have a (sub)frame tick
uint16_t frame = proc->frame_rx;
uint16_t subframe = proc->subframe_rx;
//add_subframe(&frame, &subframe, 4);
//oai_subframe_ind(proc->frame_tx, proc->subframe_tx);
//LOG_D(PHY, "oai_subframe_ind(frame:%u, subframe:%d) - NOT CALLED ********\n", frame, subframe);
start_meas(&nfapi_meas);
oai_subframe_ind(frame, subframe);
stop_meas(&nfapi_meas);
if (eNB->UL_INFO.rx_ind.rx_indication_body.number_of_pdus||
eNB->UL_INFO.harq_ind.harq_indication_body.number_of_harqs ||
eNB->UL_INFO.crc_ind.crc_indication_body.number_of_crcs ||
eNB->UL_INFO.rach_ind.rach_indication_body.number_of_preambles ||
eNB->UL_INFO.cqi_ind.cqi_indication_body.number_of_cqis
) {
LOG_D(PHY, "UL_info[rx_ind:%05d:%d harqs:%05d:%d crcs:%05d:%d preambles:%05d:%d cqis:%d] RX:%04d%d TX:%04d%d num_pdcch_symbols:%d\n",
NFAPI_SFNSF2DEC(eNB->UL_INFO.rx_ind.sfn_sf), eNB->UL_INFO.rx_ind.rx_indication_body.number_of_pdus,
NFAPI_SFNSF2DEC(eNB->UL_INFO.harq_ind.sfn_sf), eNB->UL_INFO.harq_ind.harq_indication_body.number_of_harqs,
NFAPI_SFNSF2DEC(eNB->UL_INFO.crc_ind.sfn_sf), eNB->UL_INFO.crc_ind.crc_indication_body.number_of_crcs,
NFAPI_SFNSF2DEC(eNB->UL_INFO.rach_ind.sfn_sf), eNB->UL_INFO.rach_ind.rach_indication_body.number_of_preambles,
eNB->UL_INFO.cqi_ind.cqi_indication_body.number_of_cqis,
proc->frame_rx, proc->subframe_rx,
proc->frame_tx, proc->subframe_tx, eNB->pdcch_vars[proc->subframe_tx&1].num_pdcch_symbols);
}
}
if (NFAPI_MODE==NFAPI_MODE_PNF && eNB->pdcch_vars[proc->subframe_tx&1].num_pdcch_symbols == 0) {
LOG_E(PHY, "eNB->pdcch_vars[proc->subframe_tx&1].num_pdcch_symbols == 0");
return 0;
}
// ****************************************
// Common RX procedures subframe n
T(T_ENB_PHY_DL_TICK, T_INT(eNB->Mod_id), T_INT(proc->frame_tx), T_INT(proc->subframe_tx));
// if this is IF5 or 3GPP_eNB
if (eNB->RU_list[0] && eNB->RU_list[0]->function < NGFI_RAU_IF4p5) {
wakeup_prach_eNB(eNB,NULL,proc->frame_rx,proc->subframe_rx);
wakeup_prach_eNB_br(eNB,NULL,proc->frame_rx,proc->subframe_rx);
}
if (NFAPI_MODE!=NFAPI_MODE_PNF) {
release_UE_in_freeList(eNB->Mod_id);
} else {
release_rnti_of_phy(eNB->Mod_id);
}
// UE-specific RX processing for subframe n
if (NFAPI_MODE==NFAPI_MONOLITHIC || NFAPI_MODE==NFAPI_MODE_PNF) {
phy_procedures_eNB_uespec_RX(eNB, proc);
}
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ULSCH_SCHEDULER, 1 );
#if defined(PRE_SCD_THREAD)
if (NFAPI_MODE==NFAPI_MODE_VNF) {
new_dlsch_ue_select_tbl_in_use = dlsch_ue_select_tbl_in_use;
dlsch_ue_select_tbl_in_use = !dlsch_ue_select_tbl_in_use;
// L2-emulator can work only one eNB.
// memcpy(&pre_scd_eNB_UE_stats,&RC.mac[ru->eNB_list[0]->Mod_id]->UE_info.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
// memcpy(&pre_scd_activeUE, &RC.mac[ru->eNB_list[0]->Mod_id]->UE_info.active, sizeof(bool)*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_eNB_UE_stats,&RC.mac[0]->UE_info.eNB_UE_stats, sizeof(eNB_UE_STATS)*MAX_NUM_CCs*NUMBER_OF_UE_MAX);
memcpy(&pre_scd_activeUE, &RC.mac[0]->UE_info.active, sizeof(bool)*NUMBER_OF_UE_MAX);
AssertFatal((ret= pthread_mutex_lock(&ru->proc.mutex_pre_scd))==0,"[eNB] error locking proc mutex for eNB pre scd, return %d\n",ret);
ru->proc.instance_pre_scd++;
if (ru->proc.instance_pre_scd == 0) {
if (pthread_cond_signal(&ru->proc.cond_pre_scd) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB pre scd\n" );
exit_fun( "ERROR pthread_cond_signal cond_pre_scd" );
}
} else {
LOG_E( PHY, "[eNB] frame %d subframe %d rxtx busy instance_pre_scd %d\n",
proc->frame_rx,proc->subframe_rx,ru->proc.instance_pre_scd );
}
AssertFatal((ret= pthread_mutex_unlock(&ru->proc.mutex_pre_scd))==0,"[eNB] error unlocking proc mutex for eNB pre scd, return %d\n",ret);
}
#endif
AssertFatal((ret= pthread_mutex_lock(&eNB->UL_INFO_mutex))==0,"error locking UL_INFO_mutex, return %d\n",ret);
eNB->UL_INFO.frame = proc->frame_rx;
eNB->UL_INFO.subframe = proc->subframe_rx;
eNB->UL_INFO.module_id = eNB->Mod_id;
eNB->UL_INFO.CC_id = eNB->CC_id;
eNB->if_inst->UL_indication(&eNB->UL_INFO, (void*)proc);
AssertFatal((ret= pthread_mutex_unlock(&eNB->UL_INFO_mutex))==0,"error unlocking UL_INFO_mutex, return %d\n",ret);
/* this conflict resolution may be totally wrong, to be tested */
/* CONFLICT RESOLUTION: BEGIN */
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME(VCD_SIGNAL_DUMPER_FUNCTIONS_ENB_DLSCH_ULSCH_SCHEDULER, 0 );
if(oai_exit) return(-1);
if(get_thread_parallel_conf() == PARALLEL_SINGLE_THREAD) {
#ifndef PHY_TX_THREAD
phy_procedures_eNB_TX(eNB, proc, 1);
#endif
}
/* CONFLICT RESOLUTION: what about this release_thread call, has it to be done? if yes, where? */
//if (release_thread(&proc->mutex_rxtx,&proc->instance_cnt_rxtx,thread_name)<0) return(-1);
/* CONFLICT RESOLUTION: END */
stop_meas( &softmodem_stats_rxtx_sf );
LOG_D(PHY,"%s() Exit proc[rx:%d%d tx:%d%d]\n", __FUNCTION__, proc->frame_rx, proc->subframe_rx, proc->frame_tx, proc->subframe_tx);
LOG_D(PHY, "rxtx:%lld nfapi:%lld tx:%lld rx:%lld prach:%lld ofdm:%lld ",
softmodem_stats_rxtx_sf.p_time, nfapi_meas.p_time,
TICK_TO_US(eNB->phy_proc_tx),
TICK_TO_US(eNB->phy_proc_rx),
TICK_TO_US(eNB->rx_prach),
TICK_TO_US(eNB->ofdm_mod_stats)
);
LOG_D(PHY,
"dlsch[enc:%lld mod:%lld scr:%lld rm:%lld t:%lld i:%lld] rx_dft:%lld ",
TICK_TO_US(eNB->dlsch_encoding_stats),
TICK_TO_US(eNB->dlsch_modulation_stats),
TICK_TO_US(eNB->dlsch_scrambling_stats),
TICK_TO_US(eNB->dlsch_rate_matching_stats),
TICK_TO_US(eNB->dlsch_turbo_encoding_stats),
TICK_TO_US(eNB->dlsch_interleaving_stats),
TICK_TO_US(eNB->rx_dft_stats));
LOG_D(PHY," ulsch[ch:%lld freq:%lld dec:%lld demod:%lld ru:%lld ",
TICK_TO_US(eNB->ulsch_channel_estimation_stats),
TICK_TO_US(eNB->ulsch_freq_offset_estimation_stats),
TICK_TO_US(eNB->ulsch_decoding_stats),
TICK_TO_US(eNB->ulsch_demodulation_stats),
TICK_TO_US(eNB->ulsch_rate_unmatching_stats));
LOG_D(PHY, "td:%lld dei:%lld dem:%lld llr:%lld tci:%lld ",
TICK_TO_US(eNB->ulsch_turbo_decoding_stats),
TICK_TO_US(eNB->ulsch_deinterleaving_stats),
TICK_TO_US(eNB->ulsch_demultiplexing_stats),
TICK_TO_US(eNB->ulsch_llr_stats),
TICK_TO_US(eNB->ulsch_tc_init_stats));
LOG_D(PHY, "tca:%lld tcb:%lld tcg:%lld tce:%lld l1:%lld l2:%lld]\n\n",
TICK_TO_US(eNB->ulsch_tc_alpha_stats),
TICK_TO_US(eNB->ulsch_tc_beta_stats),
TICK_TO_US(eNB->ulsch_tc_gamma_stats),
TICK_TO_US(eNB->ulsch_tc_ext_stats),
TICK_TO_US(eNB->ulsch_tc_intl1_stats),
TICK_TO_US(eNB->ulsch_tc_intl2_stats)
);
return(0);
}
static void *L1_thread_tx(void *param) {
L1_proc_t *eNB_proc = (L1_proc_t *)param;
L1_rxtx_proc_t *proc = &eNB_proc->L1_proc_tx;
PHY_VARS_eNB *eNB = eNB_proc->eNB;
char thread_name[100];
sprintf(thread_name,"TXnp4_%d\n",&eNB->proc.L1_proc == proc ? 0 : 1);
thread_top_init(thread_name,1,470000,500000,500000);
int ret;
//wait_sync("tx_thread");
while (!oai_exit) {
LOG_D(PHY,"Waiting for TX (IC %d)\n",proc->instance_cnt);
if (wait_on_condition(&proc->mutex,&proc->cond,&proc->instance_cnt,thread_name)<0) break;
if (oai_exit) break;
LOG_D(PHY,"L1_thread_tx: Running for %d.%d\n",proc->frame_tx,proc->subframe_tx);
// *****************************************
// TX processing for subframe n+4
// run PHY TX procedures the one after the other for all CCs to avoid race conditions
// (may be relaxed in the future for performance reasons)
// *****************************************
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX1_ENB,proc->subframe_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX1_ENB,proc->subframe_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX1_ENB,proc->frame_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX1_ENB,proc->frame_rx);
LOG_D(PHY,"L1 TX processing %d.%d\n",proc->frame_tx,proc->subframe_tx);
phy_procedures_eNB_TX(eNB, proc, 1);
AssertFatal((ret= pthread_mutex_lock( &proc->mutex ))==0,"error locking L1_proc_tx mutex, return %d\n",ret);
int subframe_tx = proc->subframe_tx;
int frame_tx = proc->frame_tx;
uint64_t timestamp_tx = proc->timestamp_tx;
proc->instance_cnt = -1;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_L1_PROC_TX_IC,proc->instance_cnt);
LOG_D(PHY,"L1 TX signaling done for %d.%d\n",proc->frame_tx,proc->subframe_tx);
// the thread can now be woken up
LOG_D(PHY,"L1_thread_tx: signaling completion in %d.%d\n",proc->frame_tx,proc->subframe_tx);
AssertFatal((ret=pthread_cond_signal(&proc->cond))== 0, "ERROR pthread_cond_signal for eNB TXnp4 thread ret %d\n",ret);
AssertFatal((ret= pthread_mutex_unlock( &proc->mutex ))==0,"error unlocking L1_proc_tx mutex, return %d\n",ret);
wakeup_txfh(eNB,proc,frame_tx,subframe_tx,timestamp_tx);
}
return 0;
}
/*!
* \brief The RX UE-specific and TX thread of eNB.
* \param param is a \ref L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
static void *L1_thread( void *param ) {
static int eNB_thread_rxtx_status;
L1_rxtx_proc_t *proc;
// Working
if(NFAPI_MODE==NFAPI_MODE_VNF) {
proc = (L1_rxtx_proc_t *)param;
} else {
L1_proc_t *eNB_proc = (L1_proc_t *)param;
proc = &eNB_proc->L1_proc;
}
PHY_VARS_eNB *eNB = RC.eNB[0][proc->CC_id];
char thread_name[100];
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
// set default return value
eNB_thread_rxtx_status = 0;
sprintf(thread_name,"RXn_TXnp4_%d\n",&eNB->proc.L1_proc == proc ? 0 : 1);
#if 1
{
struct sched_param sparam =
{
.sched_priority = 79,
};
if (pthread_setschedparam(pthread_self(), SCHED_RR, &sparam) != 0)
{
LOG_E(PHY,"pthread_setschedparam: %s\n", strerror(errno));
}
}
#else
thread_top_init(thread_name,1,470000,500000,500000);
#endif
LOG_I(PHY,"thread rxtx created id=%ld\n", syscall(__NR_gettid));
while (!oai_exit) {
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0, 0 );
T(T_ENB_MASTER_TICK, T_INT(0), T_INT(proc->frame_rx), T_INT(proc->subframe_rx));
LOG_D(PHY,"L1RX waiting for RU RX\n");
if (wait_on_condition(&proc->mutex,&proc->cond,&proc->instance_cnt,thread_name)<0) break;
LOG_D(PHY,"L1RX starting in %d.%d\n",proc->frame_rx,proc->subframe_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_CPUID_ENB_THREAD_RXTX,sched_getcpu());
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0, 1 );
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_TX0_ENB,proc->subframe_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_RX0_ENB,proc->subframe_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_TX0_ENB,proc->frame_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_ENB,proc->frame_rx);
if (oai_exit) break;
if (eNB->CC_id==0) {
if (rxtx(eNB,proc,thread_name) < 0) break;
}
LOG_D(PHY,"L1 RX %d.%d done\n",proc->frame_rx,proc->subframe_rx);
if (release_thread(&proc->mutex,&proc->instance_cnt,thread_name)<0) break;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_L1_PROC_IC, proc->instance_cnt);
if (NFAPI_MODE!=NFAPI_MODE_VNF) {
if(get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) wakeup_tx(eNB,proc->frame_rx,proc->subframe_rx,proc->frame_tx,proc->subframe_tx,proc->timestamp_tx);
else if(get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT) {
phy_procedures_eNB_TX(eNB, proc, 1);
wakeup_txfh(eNB,proc,proc->frame_tx,proc->subframe_tx,proc->timestamp_tx);
}
}
} // while !oai_exit
VCD_SIGNAL_DUMPER_DUMP_FUNCTION_BY_NAME( VCD_SIGNAL_DUMPER_FUNCTIONS_eNB_PROC_RXTX0+(proc->subframe_rx&1), 0 );
LOG_D(PHY, " *** Exiting eNB thread RXn_TXnp4\n");
eNB_thread_rxtx_status = 0;
return &eNB_thread_rxtx_status;
}
void eNB_top(PHY_VARS_eNB *eNB,
int frame_rx,
int subframe_rx,
char *string,
RU_t *ru) {
L1_proc_t *proc = &eNB->proc;
L1_rxtx_proc_t *L1_proc= &proc->L1_proc;
LTE_DL_FRAME_PARMS *fp = ru->frame_parms;
RU_proc_t *ru_proc = &ru->proc;
proc->frame_rx = frame_rx;
proc->subframe_rx = subframe_rx;
if (!oai_exit) {
T(T_ENB_MASTER_TICK, T_INT(0), T_INT(ru_proc->frame_rx), T_INT(ru_proc->tti_rx));
L1_proc->timestamp_tx = ru_proc->timestamp_rx + (ru->sf_ahead*fp->samples_per_tti);
L1_proc->frame_rx = ru_proc->frame_rx;
L1_proc->subframe_rx = ru_proc->tti_rx;
L1_proc->frame_tx = (L1_proc->subframe_rx > (9-ru->sf_ahead)) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
L1_proc->subframe_tx = (L1_proc->subframe_rx + ru->sf_ahead)%10;
if (rxtx(eNB,L1_proc,string) < 0)
LOG_E(PHY,"eNB %d CC_id %d failed during execution\n",eNB->Mod_id,eNB->CC_id);
ru_proc->timestamp_tx = L1_proc->timestamp_tx;
ru_proc->tti_tx = L1_proc->subframe_tx;
ru_proc->frame_tx = L1_proc->frame_tx;
}
}
int wakeup_txfh(PHY_VARS_eNB *eNB,
L1_rxtx_proc_t *proc,
int frame_tx,
int subframe_tx,
uint64_t timestamp_tx) {
RU_t *ru;
RU_proc_t *ru_proc;
LTE_DL_FRAME_PARMS *fp;
int waitret,ret;
LOG_D(PHY,"L1 TX Waking up TX FH %d.%d (IC RU TX %d)\n",frame_tx,subframe_tx,proc->instance_cnt_RUs);
// grab the information for the RU due to the following wait
waitret=timedwait_on_condition(&proc->mutex_RUs,&proc->cond_RUs,&proc->instance_cnt_RUs,"wakeup_txfh",1000000);
AssertFatal(release_thread(&proc->mutex_RUs,&proc->instance_cnt_RUs,"wakeup_txfh")==0, "error releaseing eNB lock on RUs\n");
if (waitret == ETIMEDOUT) {
LOG_W(PHY,"Dropping TX slot (%d.%d) because FH is blocked more than 1 subframe times (1ms)\n",frame_tx,subframe_tx);
AssertFatal((ret=pthread_mutex_lock(&eNB->proc.mutex_RU_tx))==0,"mutex_lock returns %d\n",ret);
eNB->proc.RU_mask_tx = 0;
AssertFatal((ret=pthread_mutex_unlock(&eNB->proc.mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_RUs))==0,"mutex_lock returns %d\n",ret);
proc->instance_cnt_RUs = 0;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_RX0_UE,proc->instance_cnt_RUs);
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RUs))==0,"mutex_unlock returns %d\n",ret);
return(-1);
}
for(int ru_id=0; ru_id<eNB->num_RU; ru_id++) {
ru = eNB->RU_list[ru_id];
ru_proc = &ru->proc;
fp = ru->frame_parms;
if (((fp->frame_type == TDD) && (subframe_select(fp,proc->subframe_tx)==SF_UL))||
(eNB->RU_list[ru_id]->state == RU_SYNC)||
(eNB->RU_list[ru_id]->wait_cnt>0)) {
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_RUs))==0, "mutex_lock returns %d\n",ret);
proc->instance_cnt_RUs = 0;
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RUs))==0, "mutex_unlock returns %d\n",ret);
continue;//hacking only works when all RU_tx works on the same subframe #TODO: adding mask stuff
}
if (ru_proc->instance_cnt_eNBs == 0) {
LOG_E(PHY,"Frame %d, subframe %d: TX FH thread busy, dropping Frame %d, subframe %d\n", ru_proc->frame_tx, ru_proc->tti_tx, proc->frame_rx, proc->subframe_rx);
AssertFatal((ret=pthread_mutex_lock(&eNB->proc.mutex_RU_tx))==0,"mutex_lock returns %d\n",ret);
eNB->proc.RU_mask_tx = 0;
AssertFatal((ret=pthread_mutex_unlock(&eNB->proc.mutex_RU_tx))==0,"mutex_unlock returns %d\n",ret);
return(-1);
}
AssertFatal((ret = pthread_mutex_lock(&ru_proc->mutex_eNBs))==0,"ERROR pthread_mutex_lock failed on mutex_eNBs L1_thread_tx with ret=%d\n",ret);
ru_proc->instance_cnt_eNBs = 0;
ru_proc->timestamp_tx = timestamp_tx;
ru_proc->tti_tx = subframe_tx;
ru_proc->frame_tx = frame_tx;
LOG_D(PHY,"L1 TX Waking up TX FH (2) %d.%d\n",frame_tx,subframe_tx);
// the thread can now be woken up
AssertFatal(pthread_cond_signal(&ru_proc->cond_eNBs) == 0,
"[eNB] ERROR pthread_cond_signal for eNB TXnp4 thread\n");
AssertFatal((ret=pthread_mutex_unlock(&ru_proc->mutex_eNBs))==0,"mutex_unlock returned %d\n",ret);
}
return(0);
}
int wakeup_tx(PHY_VARS_eNB *eNB,
int frame_rx,
int subframe_rx,
int frame_tx,
int subframe_tx,
uint64_t timestamp_tx) {
L1_rxtx_proc_t *L1_proc_tx = &eNB->proc.L1_proc_tx;
int ret;
LOG_D(PHY,"ENTERED wakeup_tx (IC %d)\n",L1_proc_tx->instance_cnt);
// check if subframe is a has TX else return
if (subframe_select(&eNB->frame_parms,subframe_tx) == SF_UL) return 0;
AssertFatal((ret = pthread_mutex_lock(&L1_proc_tx->mutex))==0,"mutex_lock returns %d\n",ret);
LOG_D(PHY,"L1 RX %d.%d Waiting to wake up L1 TX %d.%d (IC L1TX %d)\n",frame_rx,subframe_rx,frame_tx,subframe_tx,L1_proc_tx->instance_cnt);
while(L1_proc_tx->instance_cnt == 0) {
pthread_cond_wait(&L1_proc_tx->cond,&L1_proc_tx->mutex);
}
LOG_D(PHY,"L1 RX Got signal that TX %d.%d is done\n",L1_proc_tx->frame_tx,L1_proc_tx->subframe_tx);
L1_proc_tx->instance_cnt = 0;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_L1_PROC_TX_IC,L1_proc_tx->instance_cnt);
L1_proc_tx->subframe_rx = subframe_rx;
L1_proc_tx->frame_rx = frame_rx;
L1_proc_tx->subframe_tx = subframe_tx;
L1_proc_tx->frame_tx = frame_tx;
L1_proc_tx->timestamp_tx = timestamp_tx;
// the thread can now be woken up
LOG_D(PHY,"L1 RX Waking up L1 TX %d.%d\n",frame_tx,subframe_tx);
AssertFatal(pthread_cond_signal(&L1_proc_tx->cond) == 0, "ERROR pthread_cond_signal for eNB L1 thread tx\n");
AssertFatal((ret=pthread_mutex_unlock(&L1_proc_tx->mutex))==0,"mutex_unlock returns %d\n",ret);
return(0);
}
int wakeup_rxtx(PHY_VARS_eNB *eNB,
RU_t *ru) {
L1_proc_t *proc =&eNB->proc;
RU_proc_t *ru_proc=&ru->proc;
L1_rxtx_proc_t *L1_proc=&proc->L1_proc;
LTE_DL_FRAME_PARMS *fp = &eNB->frame_parms;
int ret;
LOG_D(PHY,"ENTERED wakeup_rxtx, %d.%d\n",ru_proc->frame_rx,ru_proc->tti_rx);
// wake up TX for subframe n+sl_ahead
// lock the TX mutex and make sure the thread is ready
AssertFatal((ret=pthread_mutex_lock(&L1_proc->mutex)) == 0,"mutex_lock returns %d\n", ret);
if (L1_proc->instance_cnt == 0) { // L1_thread is busy so abort the subframe
AssertFatal((ret=pthread_mutex_unlock( &L1_proc->mutex))==0,"mutex_unlock return %d\n",ret);
LOG_W(PHY,"L1_thread isn't ready in %d.%d, aborting RX processing\n",ru_proc->frame_rx,ru_proc->tti_rx);
/* AssertFatal(1==0,"L1_thread isn't ready in %d.%d (L1RX %d.%d), aborting RX, exiting\n",
ru_proc->frame_rx,ru_proc->tti_rx,L1_proc->frame_rx,L1_proc->subframe_rx);
*/
return(0);
}
++L1_proc->instance_cnt;
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_L1_PROC_IC, L1_proc->instance_cnt);
// We have just received and processed the common part of a subframe, say n.
// TS_rx is the last received timestamp (start of 1st slot), TS_tx is the desired
// transmitted timestamp of the next TX slot (first).
// The last (TS_rx mod samples_per_frame) was n*samples_per_tti,
// we want to generate subframe (n+sf_ahead), so TS_tx = TX_rx+sf_ahead*samples_per_tti,
// and proc->subframe_tx = proc->subframe_rx+sf_ahead
L1_proc->timestamp_tx = ru_proc->timestamp_rx + (ru->sf_ahead*fp->samples_per_tti);
L1_proc->frame_rx = ru_proc->frame_rx;
L1_proc->subframe_rx = ru_proc->tti_rx;
L1_proc->frame_tx = (L1_proc->subframe_rx > (9-ru->sf_ahead)) ? (L1_proc->frame_rx+1)&1023 : L1_proc->frame_rx;
L1_proc->subframe_tx = (L1_proc->subframe_rx + ru->sf_ahead)%10;
LOG_D(PHY,"wakeup_rxtx: L1_proc->subframe_rx %d, L1_proc->subframe_tx %d, RU %d\n",L1_proc->subframe_rx,L1_proc->subframe_tx,ru->idx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_WAKEUP_RXTX_RX_RU+ru->idx, L1_proc->frame_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_WAKEUP_RXTX_RX_RU+ru->idx, L1_proc->subframe_rx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_FRAME_NUMBER_WAKEUP_RXTX_TX_RU+ru->idx, L1_proc->frame_tx);
VCD_SIGNAL_DUMPER_DUMP_VARIABLE_BY_NAME(VCD_SIGNAL_DUMPER_VARIABLES_SUBFRAME_NUMBER_WAKEUP_RXTX_TX_RU+ru->idx, L1_proc->subframe_tx);
// the thread can now be woken up
AssertFatal((ret=pthread_cond_signal(&L1_proc->cond))== 0, "ERROR pthread_cond_signal for eNB RXn-TXnp4 thread, ret %d\n",ret);
AssertFatal((ret=pthread_mutex_unlock( &L1_proc->mutex))==0,"mutex_unlock return %d\n",ret);
return(0);
}
void wakeup_prach_eNB(PHY_VARS_eNB *eNB,
RU_t *ru,
int frame,
int subframe) {
L1_proc_t *proc = &eNB->proc;
LTE_DL_FRAME_PARMS *fp=&eNB->frame_parms;
int i,ret;
if (ru!=NULL) {
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_RU_PRACH))==0,"mutex_lock return %d\n",ret);
for (i=0; i<eNB->num_RU; i++) {
if (ru == eNB->RU_list[i] && eNB->RU_list[i]->wait_cnt == 0) {
LOG_D(PHY,"frame %d, subframe %d: RU %d for eNB %d signals PRACH (mask %x, num_RU %d)\n",frame,subframe,i,eNB->Mod_id,proc->RU_mask_prach,eNB->num_RU);
proc->RU_mask_prach |= (1<<i);
} else if (eNB->RU_list[i]->state == RU_SYNC || eNB->RU_list[i]->wait_cnt > 0) {
proc->RU_mask_prach |= (1<<i);
}
}
if (proc->RU_mask_prach != (1<<eNB->num_RU)-1) { // not all RUs have provided their information so return
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU_PRACH))==0,"mutex_unlock return %d\n",ret);
return;
} else { // all RUs have provided their information so continue on and wakeup eNB processing
proc->RU_mask_prach = 0;
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU_PRACH))==0,"mutex_unlock return %d\n",ret);
}
}
// check if we have to detect PRACH first
if (is_prach_subframe(fp,frame,subframe)>0) {
LOG_D(PHY,"Triggering prach processing, frame %d, subframe %d\n",frame,subframe);
if (proc->instance_cnt_prach == 0) {
LOG_W(PHY,"[eNB] Frame %d Subframe %d, dropping PRACH\n", frame,subframe);
return;
}
// wake up thread for PRACH RX
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_prach))==0,"[eNB] ERROR pthread_mutex_lock for eNB PRACH thread %d (IC %d)\n", proc->thread_index, proc->instance_cnt_prach);
++proc->instance_cnt_prach;
// set timing for prach thread
proc->frame_prach = frame;
proc->subframe_prach = subframe;
// the thread can now be woken up
if (pthread_cond_signal(&proc->cond_prach) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB PRACH thread %d\n", proc->thread_index);
exit_fun( "ERROR pthread_cond_signal" );
return;
}
AssertFatal((ret=pthread_mutex_unlock( &proc->mutex_prach))==0,"mutex_unlock return %d\n",ret);
}
}
void wakeup_prach_eNB_br(PHY_VARS_eNB *eNB,
RU_t *ru,
int frame,
int subframe) {
L1_proc_t *proc = &eNB->proc;
LTE_DL_FRAME_PARMS *fp=&eNB->frame_parms;
int i,ret;
if (ru!=NULL) {
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_RU_PRACH_br))==0,"mutex_lock return %d\n",ret);
for (i=0; i<eNB->num_RU; i++) {
if (ru == eNB->RU_list[i]) {
LOG_D(PHY,"frame %d, subframe %d: RU %d for eNB %d signals PRACH BR (mask %x, num_RU %d)\n",frame,subframe,i,eNB->Mod_id,proc->RU_mask_prach_br,eNB->num_RU);
if ((proc->RU_mask_prach_br&(1<<i)) > 0)
LOG_E(PHY,"eNB %d frame %d, subframe %d : previous information (PRACH BR) from RU %d (num_RU %d, mask %x) has not been served yet!\n",
eNB->Mod_id,frame,subframe,ru->idx,eNB->num_RU,proc->RU_mask_prach_br);
proc->RU_mask_prach_br |= (1<<i);
}
}
if (proc->RU_mask_prach_br != (1<<eNB->num_RU)-1) { // not all RUs have provided their information so return
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU_PRACH_br))==0,"mutex_unlock return %d\n",ret);
return;
} else { // all RUs have provided their information so continue on and wakeup eNB processing
proc->RU_mask_prach_br = 0;
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_RU_PRACH_br))==0,"mutex_unlock return %d\n",ret);
}
}
// check if we have to detect PRACH first
if (is_prach_subframe(fp,frame,subframe)>0) {
LOG_D(PHY,"Triggering prach br processing, frame %d, subframe %d\n",frame,subframe);
if (proc->instance_cnt_prach_br == 0) {
LOG_W(PHY,"[eNB] Frame %d Subframe %d, dropping PRACH BR\n", frame,subframe);
return;
}
// wake up thread for PRACH RX
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_prach_br))==0,"[eNB] ERROR pthread_mutex_lock for eNB PRACH thread %d (IC %d)\n", proc->thread_index, proc->instance_cnt_prach_br);
++proc->instance_cnt_prach_br;
// set timing for prach thread
proc->frame_prach_br = frame;
proc->subframe_prach_br = subframe;
// the thread can now be woken up
if (pthread_cond_signal(&proc->cond_prach_br) != 0) {
LOG_E( PHY, "[eNB] ERROR pthread_cond_signal for eNB PRACH BR thread %d\n", proc->thread_index);
exit_fun( "ERROR pthread_cond_signal" );
return;
}
AssertFatal((ret=pthread_mutex_unlock( &proc->mutex_prach_br))==0,"mutex_unlock return %d\n",ret);
}
}
/*!
* \brief The prach receive thread of eNB.
* \param param is a \ref L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
static void *eNB_thread_prach( void *param ) {
static int eNB_thread_prach_status;
PHY_VARS_eNB *eNB= (PHY_VARS_eNB *)param;
L1_proc_t *proc = &eNB->proc;
// set default return value
eNB_thread_prach_status = 0;
thread_top_init("eNB_thread_prach",1,500000,1000000,20000000);
//wait_sync("eNB_thread_prach");
while (!oai_exit) {
if (wait_on_condition(&proc->mutex_prach,&proc->cond_prach,&proc->instance_cnt_prach,"eNB_prach_thread") < 0) break;
if (oai_exit) break;
LOG_D(PHY,"Running eNB prach procedures\n");
prach_procedures(eNB,0 );
if (release_thread(&proc->mutex_prach,&proc->instance_cnt_prach,"eNB_prach_thread") < 0) break;
}
LOG_I(PHY, "Exiting eNB thread PRACH\n");
eNB_thread_prach_status = 0;
return &eNB_thread_prach_status;
}
/*!
* \brief The prach receive thread of eNB for BL/CE UEs.
* \param param is a \ref L1_proc_t structure which contains the info what to process.
* \returns a pointer to an int. The storage is not on the heap and must not be freed.
*/
static void *eNB_thread_prach_br( void *param ) {
static int eNB_thread_prach_status;
PHY_VARS_eNB *eNB= (PHY_VARS_eNB *)param;
L1_proc_t *proc = &eNB->proc;
// set default return value
eNB_thread_prach_status = 0;
thread_top_init("eNB_thread_prach_br",1,500000,1000000,20000000);
while (!oai_exit) {
if (wait_on_condition(&proc->mutex_prach_br,&proc->cond_prach_br,&proc->instance_cnt_prach_br,"eNB_prach_thread_br") < 0) break;
if (oai_exit) break;
LOG_D(PHY,"Running eNB prach procedures for BL/CE UEs\n");
prach_procedures(eNB,1);
if (release_thread(&proc->mutex_prach_br,&proc->instance_cnt_prach_br,"eNB_prach_thread_br") < 0) break;
}
LOG_I(PHY, "Exiting eNB thread PRACH BR\n");
eNB_thread_prach_status = 0;
return &eNB_thread_prach_status;
}
static void print_opp_meas(PHY_VARS_eNB *eNB);
static void reset_opp_meas(PHY_VARS_eNB *eNB);
static void *process_stats_thread(void *param) {
PHY_VARS_eNB *eNB = (PHY_VARS_eNB *)param;
wait_sync("process_stats_thread");
reset_opp_meas(eNB);
while (!oai_exit) {
sleep(1);
print_opp_meas(eNB);
if (RC.mac)
lte_dump_mac_stats(RC.mac[0], stdout);
if (time(NULL) % 10)
reset_opp_meas(eNB);
}
return(NULL);
}
void *L1_stats_thread(void *param) {
PHY_VARS_eNB *eNB = (PHY_VARS_eNB *)param;
wait_sync("L1_stats_thread");
FILE *fd;
while (!oai_exit) {
sleep(1);
fd=fopen("L1_stats.log","w");
AssertFatal(fd!=NULL,"Cannot open L1_stats.log\n");
dump_I0_stats(fd,eNB);
dump_ulsch_stats(fd,eNB,eNB->proc.L1_proc_tx.frame_tx);
dump_uci_stats(fd,eNB,eNB->proc.L1_proc_tx.frame_tx);
fclose(fd);
}
return(NULL);
}
void init_eNB_proc(int inst) {
/*int i=0;*/
int CC_id;
PHY_VARS_eNB *eNB;
L1_proc_t *proc;
L1_rxtx_proc_t *L1_proc, *L1_proc_tx;
pthread_attr_t *attr0=NULL,*attr1=NULL,*attr_prach=NULL;
pthread_attr_t *attr_prach_br=NULL;
LOG_I(PHY,"%s(inst:%d) RC.nb_CC[inst]:%d \n",__FUNCTION__,inst,RC.nb_CC[inst]);
for (CC_id=0; CC_id<RC.nb_CC[inst]; CC_id++) {
eNB = RC.eNB[inst][CC_id];
LOG_I(PHY,"Initializing eNB processes instance:%d CC_id %d \n",inst,CC_id);
proc = &eNB->proc;
L1_proc = &proc->L1_proc;
L1_proc_tx = &proc->L1_proc_tx;
L1_proc->instance_cnt = -1;
L1_proc_tx->instance_cnt = -1;
L1_proc->instance_cnt_RUs = 0;
L1_proc_tx->instance_cnt_RUs = 0;
proc->eNB = eNB;
proc->instance_cnt_prach = -1;
proc->instance_cnt_asynch_rxtx = -1;
proc->instance_cnt_synch = -1;
proc->CC_id = CC_id;
proc->first_rx =1;
proc->first_tx =1;
proc->RU_mask_tx = (1<<eNB->num_RU)-1;
memset((void *)proc->RU_mask,0,10*sizeof(proc->RU_mask[0]));
proc->RU_mask_prach =0;
pthread_mutex_init( &eNB->UL_INFO_mutex, NULL);
pthread_mutex_init( &L1_proc->mutex, NULL);
pthread_mutex_init( &L1_proc_tx->mutex, NULL);
pthread_cond_init( &L1_proc->cond, NULL);
pthread_cond_init( &L1_proc_tx->cond, NULL);
pthread_mutex_init( &L1_proc->mutex_RUs, NULL);
pthread_mutex_init( &L1_proc_tx->mutex_RUs, NULL);
pthread_cond_init( &L1_proc->cond_RUs, NULL);
pthread_cond_init( &L1_proc_tx->cond_RUs, NULL);
pthread_mutex_init( &proc->mutex_prach, NULL);
pthread_mutex_init( &proc->mutex_asynch_rxtx, NULL);
pthread_mutex_init( &proc->mutex_RU,NULL);
pthread_mutex_init( &proc->mutex_RU_tx,NULL);
pthread_mutex_init( &proc->mutex_RU_PRACH,NULL);
pthread_cond_init( &proc->cond_prach, NULL);
pthread_cond_init( &proc->cond_asynch_rxtx, NULL);
pthread_attr_init( &proc->attr_prach);
pthread_attr_init( &proc->attr_asynch_rxtx);
pthread_attr_init( &L1_proc->attr);
pthread_attr_init( &L1_proc_tx->attr);
proc->instance_cnt_prach_br = -1;
proc->RU_mask_prach_br=0;
pthread_mutex_init( &proc->mutex_prach_br, NULL);
pthread_mutex_init( &proc->mutex_RU_PRACH_br,NULL);
pthread_cond_init( &proc->cond_prach_br, NULL);
pthread_attr_init( &proc->attr_prach_br);
LOG_I(PHY,"eNB->single_thread_flag:%d\n", eNB->single_thread_flag);
if ((get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT) && NFAPI_MODE!=NFAPI_MODE_VNF) {
pthread_create( &L1_proc->pthread, attr0, L1_thread, proc );
} else if ((get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) && NFAPI_MODE!=NFAPI_MODE_VNF) {
pthread_create( &L1_proc->pthread, attr0, L1_thread, proc );
pthread_create( &L1_proc_tx->pthread, attr1, L1_thread_tx, proc);
} else if (NFAPI_MODE==NFAPI_MODE_VNF) { // this is neccesary in VNF or L2 FAPI simulator.
// Original Code from Fujitsu w/ old structure/field name
//pthread_create( &proc_rxtx[0].pthread_rxtx, attr0, eNB_thread_rxtx, &proc_rxtx[0] );
//pthread_create( &proc_rxtx[1].pthread_rxtx, attr1, eNB_thread_rxtx, &proc_rxtx[1] );
pthread_create( &L1_proc->pthread, attr0, L1_thread, L1_proc );
if (pthread_setname_np(L1_proc->pthread, "oai:enb-L1-rx") != 0)
{
LOG_E(PHY, "pthread_setname_np: %s\n", strerror(errno));
}
pthread_create( &L1_proc_tx->pthread, attr1, L1_thread, L1_proc_tx);
if (pthread_setname_np(L1_proc_tx->pthread, "oai:enb-L1-tx") != 0)
{
LOG_E(PHY, "pthread_setname_np: %s\n", strerror(errno));
}
}
if (NFAPI_MODE!=NFAPI_MODE_VNF) {
pthread_create( &proc->pthread_prach, attr_prach, eNB_thread_prach, eNB );
pthread_create( &proc->pthread_prach_br, attr_prach_br, eNB_thread_prach_br, eNB );
}
AssertFatal(proc->instance_cnt_prach == -1,"instance_cnt_prach = %d\n",proc->instance_cnt_prach);
if (opp_enabled == 1)
threadCreate(&proc->process_stats_thread, process_stats_thread, (void *)eNB, "opp stats", -1, sched_get_priority_min(SCHED_OAI));
if (!IS_SOFTMODEM_NOSTATS_BIT)
threadCreate(&proc->L1_stats_thread, L1_stats_thread, (void *)eNB, "L1 stats", -1, sched_get_priority_min(SCHED_OAI));
}
//for multiple CCs: setup master and slaves
/*
for (CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
eNB = PHY_vars_eNB_g[inst][CC_id];
if (eNB->node_timing == synch_to_ext_device) { //master
eNB->proc.num_slaves = MAX_NUM_CCs-1;
eNB->proc.slave_proc = (L1_proc_t**)malloc(eNB->proc.num_slaves*sizeof(L1_proc_t*));
for (i=0; i< eNB->proc.num_slaves; i++) {
if (i < CC_id) eNB->proc.slave_proc[i] = &(PHY_vars_eNB_g[inst][i]->proc);
if (i >= CC_id) eNB->proc.slave_proc[i] = &(PHY_vars_eNB_g[inst][i+1]->proc);
}
}
}
*/
/* setup PHY proc TX sync mechanism */
pthread_mutex_init(&sync_phy_proc.mutex_phy_proc_tx, NULL);
pthread_cond_init(&sync_phy_proc.cond_phy_proc_tx, NULL);
sync_phy_proc.phy_proc_CC_id = 0;
}
/*!
* \brief Terminate eNB TX and RX threads.
*/
void kill_eNB_proc(int inst) {
int *status;
PHY_VARS_eNB *eNB;
L1_proc_t *proc;
L1_rxtx_proc_t *L1_proc, *L1_proc_tx;
int ret;
for (int CC_id=0; CC_id<MAX_NUM_CCs; CC_id++) {
eNB=RC.eNB[inst][CC_id];
proc = &eNB->proc;
L1_proc = &proc->L1_proc;
L1_proc_tx = &proc->L1_proc_tx;
LOG_I(PHY, "Killing TX CC_id %d inst %d\n", CC_id, inst );
if ((get_thread_parallel_conf() == PARALLEL_RU_L1_SPLIT || get_thread_parallel_conf() == PARALLEL_RU_L1_TRX_SPLIT) && NFAPI_MODE!=NFAPI_MODE_VNF) {
AssertFatal((ret=pthread_mutex_lock(&L1_proc->mutex))==0,"mutex_lock return %d\n",ret);
L1_proc->instance_cnt = 0;
pthread_cond_signal(&L1_proc->cond);
AssertFatal((ret=pthread_mutex_unlock(&L1_proc->mutex))==0,"mutex_unlock return %d\n",ret);
AssertFatal((ret=pthread_mutex_lock(&L1_proc_tx->mutex))==0,"mutex_lock return %d\n",ret);
L1_proc_tx->instance_cnt = 0;
pthread_cond_signal(&L1_proc_tx->cond);
AssertFatal((ret=pthread_mutex_unlock(&L1_proc_tx->mutex))==0,"muex_unlock return %d\n",ret);
}
AssertFatal((ret=pthread_mutex_lock(&proc->mutex_prach))==0,"mutex_lock return %d\n",ret);
proc->instance_cnt_prach = 0;
pthread_cond_signal( &proc->cond_prach );
AssertFatal((ret=pthread_mutex_unlock(&proc->mutex_prach))==0,"mutex_unlock return %d\n",ret);
pthread_cond_signal( &proc->cond_asynch_rxtx );
pthread_cond_broadcast(&sync_phy_proc.cond_phy_proc_tx);
LOG_D(PHY, "joining pthread_prach\n");
pthread_join( proc->pthread_prach, (void **)&status );